spin_lock.h
1/*
2 * Copyright (c) 2024 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef _HARDWARE_SYNC_SPIN_LOCK_H
8#define _HARDWARE_SYNC_SPIN_LOCK_H
9
10#include "pico.h"
11#include "hardware/sync.h"
12
13// PICO_CONFIG: PICO_USE_SW_SPIN_LOCKS, Use software implementation for spin locks, type=bool, default=1 on RP2350 due to errata, group=hardware_sync
14#ifndef PICO_USE_SW_SPIN_LOCKS
15#if PICO_RP2350
16#define PICO_USE_SW_SPIN_LOCKS 1
17#endif
18#endif
19
20// PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
21#ifndef PICO_SPINLOCK_ID_IRQ
22#define PICO_SPINLOCK_ID_IRQ 9
23#endif
24
25// PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
26#ifndef PICO_SPINLOCK_ID_TIMER
27#define PICO_SPINLOCK_ID_TIMER 10
28#endif
29
30// PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
31#ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
32#define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
33#endif
34
35// PICO_CONFIG: PICO_SPINLOCK_ID_RAND, Spinlock ID for Random Number Generator, min=0, max=31, default=12, group=hardware_sync
36#ifndef PICO_SPINLOCK_ID_RAND
37#define PICO_SPINLOCK_ID_RAND 12
38#endif
39
40// PICO_CONFIG: PICO_SPINLOCK_ID_ATOMIC, Spinlock ID for atomics, min=0, max=31, default=13, group=hardware_sync
41#ifndef PICO_SPINLOCK_ID_ATOMIC
42#define PICO_SPINLOCK_ID_ATOMIC 13
43#endif
44
45// PICO_CONFIG: PICO_SPINLOCK_ID_OS1, First Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=14, group=hardware_sync
46#ifndef PICO_SPINLOCK_ID_OS1
47#define PICO_SPINLOCK_ID_OS1 14
48#endif
49
50// PICO_CONFIG: PICO_SPINLOCK_ID_OS2, Second Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=15, group=hardware_sync
51#ifndef PICO_SPINLOCK_ID_OS2
52#define PICO_SPINLOCK_ID_OS2 15
53#endif
54
55// PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Lowest Spinlock ID in the 'striped' range, min=0, max=31, default=16, group=hardware_sync
56#ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
57#define PICO_SPINLOCK_ID_STRIPED_FIRST 16
58#endif
59
60// PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Highest Spinlock ID in the 'striped' range, min=0, max=31, default=23, group=hardware_sync
61#ifndef PICO_SPINLOCK_ID_STRIPED_LAST
62#define PICO_SPINLOCK_ID_STRIPED_LAST 23
63#endif
64
65// PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Lowest Spinlock ID in the 'claim free' range, min=0, max=31, default=24, group=hardware_sync
66#ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
67#define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
68#endif
69
70#ifdef PICO_SPINLOCK_ID_CLAIM_FREE_END
71#warning PICO_SPINLOCK_ID_CLAIM_FREE_END has been renamed to PICO_SPINLOCK_ID_CLAIM_FREE_LAST
72#endif
73
74// PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_LAST, Highest Spinlock ID in the 'claim free' range, min=0, max=31, default=31, group=hardware_sync
75#ifndef PICO_SPINLOCK_ID_CLAIM_FREE_LAST
76#define PICO_SPINLOCK_ID_CLAIM_FREE_LAST 31
77#endif
78
82#if !PICO_USE_SW_SPIN_LOCKS
83// Hardware lock flag in SIO:
84typedef io_rw_32 spin_lock_t;
85#else
86#ifndef SW_SPIN_LOCK_TYPE
87// Byte flag in memory:
88#define SW_SPIN_LOCK_TYPE volatile uint8_t
89#endif
90typedef SW_SPIN_LOCK_TYPE spin_lock_t;
91#endif
92
93#if PICO_USE_SW_SPIN_LOCKS
94#ifndef SW_SPIN_LOCK_INSTANCE
95#define SW_SPIN_LOCK_INSTANCE(lock_num) ({ \
96 extern spin_lock_t _sw_spin_locks[NUM_SPIN_LOCKS]; \
97 &_sw_spin_locks[lock_num]; \
98 })
99#endif
100
101#ifndef SW_SPIN_LOCK_NUM
102#define SW_SPIN_LOCK_NUM(lock) ({ \
103 extern spin_lock_t _sw_spin_locks[NUM_SPIN_LOCKS]; \
104 (lock) - _sw_spin_locks; \
105 })
106#endif
107
108#ifndef SW_SPIN_LOCK_IS_LOCKED
109#define SW_SPIN_LOCK_IS_LOCKED(lock) ((bool) *(lock))
110#endif
111
112#ifndef SW_SPIN_LOCK_LOCK
113#if __ARM_ARCH_8M_MAIN__
114#define SW_SPIN_LOCK_LOCK(lock) ({ \
115 uint32_t _tmp0, _tmp1; \
116 pico_default_asm_volatile ( \
117 "1:\n" \
118 "ldaexb %1, [%2]\n" \
119 "movs %0, #1\n" /* fill dependency slot */ \
120 "cmp %1, #0\n" \
121 /* Immediately retry if lock is seen to be taken */ \
122 "bne 1b\n" \
123 /* Attempt to claim */ \
124 "strexb %1, %0, [%2]\n" \
125 "cmp %1, #0\n" \
126 /* Claim failed due to intervening write, so retry */ \
127 "bne 1b\n" \
128 : "=&r" (_tmp0), "=&r" (_tmp1) : "r" (lock) \
129 ); \
130 __mem_fence_acquire(); \
131 })
132#elif __riscv && (defined(__riscv_a) || defined(__riscv_zaamo))
133#define SW_SPIN_LOCK_LOCK(lock) ({ \
134 uint32_t _tmp0, _tmp1; \
135 pico_default_asm_volatile ( \
136 /* Get word address, and bit mask for LSB of the */ \
137 /* correct byte within that word -- note shamt is modulo xlen: */ \
138 "slli %1, %0, 3\n" \
139 "bset %1, zero, %1\n" \
140 "andi %0, %0, -4\n" \
141 /* Repeatedly set the bit until we see that it was clear at the */ \
142 /* point we set it. A set from 0 -> 1 is a successful lock take. */ \
143 "1:" \
144 "amoor.w.aq %2, %1, (%0)\n" \
145 "and %2, %2, %1\n" \
146 "bnez %2, 1b\n" \
147 : "+r" (lock), "=r" (_tmp0), "=r" (_tmp1) \
148 ); \
149 __mem_fence_acquire(); \
150 })
151#else
152#error no SW_SPIN_TRY_LOCK available for PICO_USE_SW_SPIN_LOCK on this platform
153#endif
154#endif
155
156#ifndef SW_SPIN_TRY_LOCK
157#if __ARM_ARCH_8M_MAIN__
158#define SW_SPIN_TRY_LOCK(lock) ({ \
159 uint32_t _tmp0, _tmp1; \
160 pico_default_asm_volatile ( \
161 "ldaexb %1, [%2]\n" \
162 "movs %0, #1\n" /* fill dependency slot */ \
163 "cmp %1, #0\n" \
164 /* Immediately give up if lock is seen to be taken */ \
165 "bne 1f\n" \
166 /* Otherwise attempt to claim, once. */ \
167 "strexb %1, %0, [%2]\n" \
168 "1:\n" \
169 : "=&r" (_tmp0), "=&r" (_tmp1) : "r" (lock) \
170 ); \
171 __mem_fence_acquire(); \
172 !_tmp1; \
173 })
174#elif __riscv && (defined(__riscv_a) || defined(__riscv_zaamo))
175#define SW_SPIN_TRY_LOCK(lock) ({ \
176 uint32_t _tmp0; \
177 pico_default_asm_volatile ( \
178 /* Get word address, and bit mask for LSB of the */ \
179 /* correct byte within that word -- note shamt is modulo xlen: */ \
180 "slli %1, %0, 3\n" \
181 "bset %1, zero, %1\n" \
182 "andi %0, %0, -4\n" \
183 /* Set the bit. If it was clear at the point we set it, then we took */ \
184 /* the lock. Otherwise the lock was already held, and we give up. */ \
185 "amoor.w.aq %0, %1, (%0)\n" \
186 "and %1, %1, %0\n" \
187 : "+r" (lock), "=r" (_tmp0) \
188 ); \
189 __mem_fence_acquire(); \
190 !_tmp0; \
191 })
192#else
193#error no SW_SPIN_TRY_LOCK available for PICO_USE_SW_SPIN_LOCK on this platform
194#endif
195#endif
196
197#ifndef SW_SPIN_LOCK_UNLOCK
198#if __ARM_ARCH_8M_MAIN__
199#define SW_SPIN_LOCK_UNLOCK(lock) ({ \
200 /* Release-ordered store is available: use instead of separate fence */ \
201 uint32_t zero = 0; \
202 pico_default_asm_volatile( \
203 "stlb %0, [%1]\n" \
204 : : "r" (zero), "r" (lock) \
205 ); \
206 })
207#elif __riscv
208#define SW_SPIN_LOCK_UNLOCK(lock) ({ \
209 __mem_fence_release(); \
210 *(lock) = 0; /* write to spinlock register (release lock) */ \
211 })
212#else
213#error no SW_SPIN_TRY_LOCK available for PICO_USE_SW_SPIN_LOCK on this platform
214#endif
215#endif
216
217#endif
218
225__force_inline static spin_lock_t *spin_lock_instance(uint lock_num) {
226 invalid_params_if(HARDWARE_SYNC, lock_num >= NUM_SPIN_LOCKS);
227#if PICO_USE_SW_SPIN_LOCKS
228 return SW_SPIN_LOCK_INSTANCE(lock_num);
229#else
230 return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
231#endif
232}
233
240__force_inline static uint spin_lock_get_num(spin_lock_t *lock) {
241#if PICO_USE_SW_SPIN_LOCKS
242 uint lock_num = SW_SPIN_LOCK_NUM(lock);
243 invalid_params_if(HARDWARE_SYNC, lock_num >= (uint)NUM_SPIN_LOCKS);
244 return lock_num;
245#else
246 invalid_params_if(HARDWARE_SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
247 (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
248 ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
249 return (uint) (lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET));
250#endif
251}
252
258__force_inline static void spin_lock_unsafe_blocking(spin_lock_t *lock) {
259 // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
260 // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
261 // anyway which should be finished soon
262#if PICO_USE_SW_SPIN_LOCKS
263 SW_SPIN_LOCK_LOCK(lock);
264#else
265 while (__builtin_expect(!*lock, 0)) { // read from spinlock register (tries to acquire the lock)
267 }
269#endif
270}
271
272__force_inline static bool spin_try_lock_unsafe(spin_lock_t *lock) {
273#if PICO_USE_SW_SPIN_LOCKS
274 return SW_SPIN_TRY_LOCK(lock);
275#else
276 return *lock;
277#endif
278}
284__force_inline static void spin_unlock_unsafe(spin_lock_t *lock) {
285#if PICO_USE_SW_SPIN_LOCKS
286 SW_SPIN_LOCK_UNLOCK(lock);
287#else
289 *lock = 0; // write to spinlock register (release lock)
290#endif
291}
292
301__force_inline static uint32_t spin_lock_blocking(spin_lock_t *lock) {
302 uint32_t save = save_and_disable_interrupts();
304 return save;
305}
306
312inline static bool is_spin_locked(spin_lock_t *lock) {
313#if PICO_USE_SW_SPIN_LOCKS
314 return SW_SPIN_LOCK_IS_LOCKED(lock);
315#else
316 check_hw_size(spin_lock_t, 4);
317 uint lock_num = spin_lock_get_num(lock);
318 return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
319#endif
320}
321
332__force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
333 spin_unlock_unsafe(lock);
335}
336
345spin_lock_t *spin_lock_init(uint lock_num);
346
350void spin_locks_reset(void);
351
352#endif
static __force_inline uint32_t spin_lock_blocking(spin_lock_t *lock)
Acquire a spin lock safely.
Definition: spin_lock.h:301
static __force_inline uint32_t save_and_disable_interrupts(void)
Save and disable interrupts.
Definition: sync.h:206
static __force_inline void __mem_fence_release(void)
Release a memory fence.
Definition: sync.h:189
static __force_inline void spin_unlock_unsafe(spin_lock_t *lock)
Release a spin lock without re-enabling interrupts.
Definition: spin_lock.h:284
static __force_inline void spin_unlock(spin_lock_t *lock, uint32_t saved_irq)
Release a spin lock safely.
Definition: spin_lock.h:332
static __force_inline void restore_interrupts_from_disabled(uint32_t status)
Restore interrupts to a specified state with restricted transitions.
Definition: sync.h:249
void spin_locks_reset(void)
Release all spin locks.
Definition: sync_spin_lock.c:8
static __force_inline spin_lock_t * spin_lock_instance(uint lock_num)
Get HW Spinlock instance from number.
Definition: spin_lock.h:225
static __force_inline void __mem_fence_acquire(void)
Acquire a memory fence.
Definition: sync.h:173
static __force_inline void spin_lock_unsafe_blocking(spin_lock_t *lock)
Acquire a spin lock without disabling interrupts (hence unsafe)
Definition: spin_lock.h:258
spin_lock_t * spin_lock_init(uint lock_num)
Initialise a spin lock.
Definition: sync_spin_lock.c:14
static __force_inline uint spin_lock_get_num(spin_lock_t *lock)
Get HW Spinlock number from instance.
Definition: spin_lock.h:240
static bool is_spin_locked(spin_lock_t *lock)
Check to see if a spinlock is currently acquired elsewhere.
Definition: spin_lock.h:312
#define SW_SPIN_LOCK_TYPE
A spin lock identifier.
Definition: spin_lock.h:88
#define __force_inline
Attribute to force inlining of a function regardless of optimization level.
Definition: compiler.h:125
static __force_inline void tight_loop_contents(void)
No-op function for the body of tight loops.
Definition: platform.h:67