]> Git Repo - linux.git/commitdiff
locking/qspinlock,x86: Clarify virt_spin_lock_key
authorPeter Zijlstra <[email protected]>
Thu, 1 Aug 2019 13:30:28 +0000 (15:30 +0200)
committerPeter Zijlstra <[email protected]>
Tue, 6 Aug 2019 10:49:16 +0000 (12:49 +0200)
Add a few comments to clarify how this is supposed to work.

Reported-by: Thomas Gleixner <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Juergen Gross <[email protected]>
arch/x86/include/asm/qspinlock.h

index bd5ac6cc37db5f87c92cc3013138dffa8b2a0302..444d6fd9a6d8991bc152721720bb47fc3b258e46 100644 (file)
@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
 #endif
 
 #ifdef CONFIG_PARAVIRT
+/*
+ * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
+ *
+ * Native (and PV wanting native due to vCPU pinning) should disable this key.
+ * It is done in this backwards fashion to only have a single direction change,
+ * which removes ordering between native_pv_spin_init() and HV setup.
+ */
 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
 
 void native_pv_lock_init(void) __init;
 
+/*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+ * virt to hijack it.
+ *
+ * Returns:
+ *   true - lock has been negotiated, all done;
+ *   false - queued_spin_lock_slowpath() will do its thing.
+ */
 #define virt_spin_lock virt_spin_lock
 static inline bool virt_spin_lock(struct qspinlock *lock)
 {
This page took 0.05886 seconds and 4 git commands to generate.