]> Git Repo - linux.git/blob - arch/x86/include/asm/qspinlock_paravirt.h
scsi: zfcp: Trace when request remove fails after qdio send fails
[linux.git] / arch / x86 / include / asm / qspinlock_paravirt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_QSPINLOCK_PARAVIRT_H
3 #define __ASM_QSPINLOCK_PARAVIRT_H
4
5 #include <asm/ibt.h>
6
7 /*
8  * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
9  * registers. For i386, however, only 1 32-bit register needs to be saved
10  * and restored. So an optimized version of __pv_queued_spin_unlock() is
11  * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
12  */
13 #ifdef CONFIG_64BIT
14
15 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
16 #define __pv_queued_spin_unlock __pv_queued_spin_unlock
17
18 /*
19  * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
20  * which combines the registers saving trunk and the body of the following
21  * C code.  Note that it puts the code in the .spinlock.text section which
22  * is equivalent to adding __lockfunc in the C code:
23  *
24  * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
25  * {
26  *      u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
27  *
28  *      if (likely(lockval == _Q_LOCKED_VAL))
29  *              return;
30  *      pv_queued_spin_unlock_slowpath(lock, lockval);
31  * }
32  *
33  * For x86-64,
34  *   rdi = lock              (first argument)
35  *   rsi = lockval           (second argument)
36  *   rdx = internal variable (set to 0)
37  */
38 #define PV_UNLOCK_ASM                                                   \
39         FRAME_BEGIN                                                     \
40         "push  %rdx\n\t"                                                \
41         "mov   $0x1,%eax\n\t"                                           \
42         "xor   %edx,%edx\n\t"                                           \
43         LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t"                            \
44         "cmp   $0x1,%al\n\t"                                            \
45         "jne   .slowpath\n\t"                                           \
46         "pop   %rdx\n\t"                                                \
47         FRAME_END                                                       \
48         ASM_RET                                                         \
49         ".slowpath:\n\t"                                                \
50         "push   %rsi\n\t"                                               \
51         "movzbl %al,%esi\n\t"                                           \
52         "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t"   \
53         "pop    %rsi\n\t"                                               \
54         "pop    %rdx\n\t"                                               \
55         FRAME_END
56
57 DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
58                     PV_UNLOCK_ASM, .spinlock.text);
59
60 #else /* CONFIG_64BIT */
61
62 extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
63 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
64
65 #endif /* CONFIG_64BIT */
66 #endif
This page took 0.039835 seconds and 4 git commands to generate.