]> Git Repo - linux.git/blob - arch/powerpc/include/asm/qspinlock.h
enetc: Migrate to PHYLINK and PCS_LYNX
[linux.git] / arch / powerpc / include / asm / qspinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_QSPINLOCK_H
3 #define _ASM_POWERPC_QSPINLOCK_H
4
5 #include <asm-generic/qspinlock_types.h>
6 #include <asm/paravirt.h>
7
8 #define _Q_PENDING_LOOPS        (1 << 9) /* not tuned */
9
10 #ifdef CONFIG_PARAVIRT_SPINLOCKS
11 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
12 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
13 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
14
15 static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
16 {
17         if (!is_shared_processor())
18                 native_queued_spin_lock_slowpath(lock, val);
19         else
20                 __pv_queued_spin_lock_slowpath(lock, val);
21 }
22
23 #define queued_spin_unlock queued_spin_unlock
24 static inline void queued_spin_unlock(struct qspinlock *lock)
25 {
26         if (!is_shared_processor())
27                 smp_store_release(&lock->locked, 0);
28         else
29                 __pv_queued_spin_unlock(lock);
30 }
31
32 #else
33 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34 #endif
35
36 static __always_inline void queued_spin_lock(struct qspinlock *lock)
37 {
38         u32 val = 0;
39
40         if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
41                 return;
42
43         queued_spin_lock_slowpath(lock, val);
44 }
45 #define queued_spin_lock queued_spin_lock
46
47 #define smp_mb__after_spinlock()   smp_mb()
48
49 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
50 {
51         /*
52          * This barrier was added to simple spinlocks by commit 51d7d5205d338,
53          * but it should now be possible to remove it, asm arm64 has done with
54          * commit c6f5d02b6a0f.
55          */
56         smp_mb();
57         return atomic_read(&lock->val);
58 }
59 #define queued_spin_is_locked queued_spin_is_locked
60
61 #ifdef CONFIG_PARAVIRT_SPINLOCKS
62 #define SPIN_THRESHOLD (1<<15) /* not tuned */
63
64 static __always_inline void pv_wait(u8 *ptr, u8 val)
65 {
66         if (*ptr != val)
67                 return;
68         yield_to_any();
69         /*
70          * We could pass in a CPU here if waiting in the queue and yield to
71          * the previous CPU in the queue.
72          */
73 }
74
75 static __always_inline void pv_kick(int cpu)
76 {
77         prod_cpu(cpu);
78 }
79
80 extern void __pv_init_lock_hash(void);
81
82 static inline void pv_spinlocks_init(void)
83 {
84         __pv_init_lock_hash();
85 }
86
87 #endif
88
89 #include <asm-generic/qspinlock.h>
90
91 #endif /* _ASM_POWERPC_QSPINLOCK_H */
This page took 0.038878 seconds and 4 git commands to generate.