From 880c0b5729f6507728025cf6b337ce31527ccc54 Mon Sep 17 00:00:00 2001 From: Tulio A M Mendes Date: Tue, 10 Feb 2026 03:57:43 -0300 Subject: [PATCH] feat: improve spinlock with per-arch cpu_relax() and memory barriers Spinlock improvements for SMP readiness: - Add cpu_relax() with per-arch spin-wait hints: x86: PAUSE, ARM/AArch64: YIELD, RISC-V: FENCE, MIPS: PAUSE - Add __sync_synchronize() barrier before lock release in spin_unlock - Add spin_is_locked() debug helper - Add spin_trylock() for non-blocking lock attempts - TTAS (test-and-test-and-set) pattern with cpu_relax() in inner loop - __sync_lock_test_and_set maps to XCHG (x86), LDREX/STREX (ARM), AMOSWAP.W.AQ (RISC-V), LL/SC (MIPS) Passes: make, cppcheck, QEMU smoke test. --- include/spinlock.h | 42 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/include/spinlock.h b/include/spinlock.h index c10196b..573fb0f 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -5,6 +5,27 @@ #include +/* + * Per-architecture spin-wait hint. + * Reduces power consumption and avoids memory-order pipeline stalls + * while spinning. Essential for SMP correctness and performance. + */ +static inline void cpu_relax(void) { +#if defined(__i386__) || defined(__x86_64__) + __asm__ volatile("pause" ::: "memory"); +#elif defined(__arm__) + __asm__ volatile("yield" ::: "memory"); +#elif defined(__aarch64__) + __asm__ volatile("yield" ::: "memory"); +#elif defined(__riscv) + __asm__ volatile("fence" ::: "memory"); +#elif defined(__mips__) + __asm__ volatile("pause" ::: "memory"); +#else + __sync_synchronize(); +#endif +} + typedef struct { volatile uint32_t locked; } spinlock_t; @@ -13,17 +34,32 @@ static inline void spinlock_init(spinlock_t* l) { l->locked = 0; } +static inline int spin_is_locked(spinlock_t* l) { + return l->locked != 0; +} + +/* + * Test-and-test-and-set (TTAS) spinlock. + * __sync_lock_test_and_set compiles to: + * x86: XCHG (implicit LOCK prefix) + * ARM: LDREX/STREX + * RISC-V: AMOSWAP.W.AQ + * MIPS: LL/SC + */ static inline void spin_lock(spinlock_t* l) { while (__sync_lock_test_and_set(&l->locked, 1)) { while (l->locked) { -#if defined(__i386__) || defined(__x86_64__) - __asm__ volatile ("pause"); -#endif + cpu_relax(); } } } +static inline int spin_trylock(spinlock_t* l) { + return __sync_lock_test_and_set(&l->locked, 1) == 0; +} + static inline void spin_unlock(spinlock_t* l) { + __sync_synchronize(); __sync_lock_release(&l->locked); } -- 2.43.0