]> Git Repo - J-linux.git/blob - arch/s390/include/asm/spinlock.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / s390 / include / asm / spinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Martin Schwidefsky ([email protected])
6  *
7  *  Derived from "include/asm-i386/spinlock.h"
8  */
9
10 #ifndef __ASM_SPINLOCK_H
11 #define __ASM_SPINLOCK_H
12
13 #include <linux/smp.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/alternative.h>
18
19 #define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
20
21 extern int spin_retry;
22
23 bool arch_vcpu_is_preempted(int cpu);
24
25 #define vcpu_is_preempted arch_vcpu_is_preempted
26
27 /*
28  * Simple spin lock operations.  There are two variants, one clears IRQ's
29  * on the local processor, one does not.
30  *
31  * We make no fairness assumptions. They have a cost.
32  *
33  * (the type definitions are in asm/spinlock_types.h)
34  */
35
36 void arch_spin_relax(arch_spinlock_t *lock);
37 #define arch_spin_relax arch_spin_relax
38
39 void arch_spin_lock_wait(arch_spinlock_t *);
40 int arch_spin_trylock_retry(arch_spinlock_t *);
41 void arch_spin_lock_setup(int cpu);
42
43 static inline u32 arch_spin_lockval(int cpu)
44 {
45         return cpu + 1;
46 }
47
48 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49 {
50         return lock.lock == 0;
51 }
52
53 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
54 {
55         return READ_ONCE(lp->lock) != 0;
56 }
57
58 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
59 {
60         int old = 0;
61
62         barrier();
63         return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
64 }
65
66 static inline void arch_spin_lock(arch_spinlock_t *lp)
67 {
68         if (!arch_spin_trylock_once(lp))
69                 arch_spin_lock_wait(lp);
70 }
71
72 static inline int arch_spin_trylock(arch_spinlock_t *lp)
73 {
74         if (!arch_spin_trylock_once(lp))
75                 return arch_spin_trylock_retry(lp);
76         return 1;
77 }
78
79 static inline void arch_spin_unlock(arch_spinlock_t *lp)
80 {
81         typecheck(int, lp->lock);
82         kcsan_release();
83         asm_inline volatile(
84                 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
85                 "       mvhhi   %[lock],0\n"
86                 : [lock] "=Q" (((unsigned short *)&lp->lock)[1])
87                 :
88                 : "memory");
89 }
90
91 /*
92  * Read-write spinlocks, allowing multiple readers
93  * but only one writer.
94  *
95  * NOTE! it is quite common to have readers in interrupts
96  * but no interrupt writers. For those circumstances we
97  * can "mix" irq-safe locks - any writer needs to get a
98  * irq-safe write-lock, but readers can get non-irqsafe
99  * read-locks.
100  */
101
102 #define arch_read_relax(rw) barrier()
103 #define arch_write_relax(rw) barrier()
104
105 void arch_read_lock_wait(arch_rwlock_t *lp);
106 void arch_write_lock_wait(arch_rwlock_t *lp);
107
108 static inline void arch_read_lock(arch_rwlock_t *rw)
109 {
110         int old;
111
112         old = __atomic_add(1, &rw->cnts);
113         if (old & 0xffff0000)
114                 arch_read_lock_wait(rw);
115 }
116
117 static inline void arch_read_unlock(arch_rwlock_t *rw)
118 {
119         __atomic_add_const_barrier(-1, &rw->cnts);
120 }
121
122 static inline void arch_write_lock(arch_rwlock_t *rw)
123 {
124         int old = 0;
125
126         if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000))
127                 arch_write_lock_wait(rw);
128 }
129
130 static inline void arch_write_unlock(arch_rwlock_t *rw)
131 {
132         __atomic_add_barrier(-0x30000, &rw->cnts);
133 }
134
135
136 static inline int arch_read_trylock(arch_rwlock_t *rw)
137 {
138         int old;
139
140         old = READ_ONCE(rw->cnts);
141         return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1));
142 }
143
144 static inline int arch_write_trylock(arch_rwlock_t *rw)
145 {
146         int old;
147
148         old = READ_ONCE(rw->cnts);
149         return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000);
150 }
151
152 #endif /* __ASM_SPINLOCK_H */
This page took 0.034435 seconds and 4 git commands to generate.