1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_QSPINLOCK_PARAVIRT_H
3 #define __ASM_QSPINLOCK_PARAVIRT_H
8 * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
9 * registers. For i386, however, only 1 32-bit register needs to be saved
10 * and restored. So an optimized version of __pv_queued_spin_unlock() is
11 * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
15 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
16 #define __pv_queued_spin_unlock __pv_queued_spin_unlock
19 * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
20 * which combines the registers saving trunk and the body of the following
21 * C code. Note that it puts the code in the .spinlock.text section which
22 * is equivalent to adding __lockfunc in the C code:
24 * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
26 * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
28 * if (likely(lockval == _Q_LOCKED_VAL))
30 * pv_queued_spin_unlock_slowpath(lock, lockval);
34 * rdi = lock (first argument)
35 * rsi = lockval (second argument)
36 * rdx = internal variable (set to 0)
38 #define PV_UNLOCK_ASM \
43 LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
51 "movzbl %al,%esi\n\t" \
52 "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \
57 DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
58 PV_UNLOCK_ASM, .spinlock.text);
60 #else /* CONFIG_64BIT */
62 extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
63 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
65 #endif /* CONFIG_64BIT */