]> Git Repo - linux.git/commitdiff
locking/osq: Relax atomic semantics
authorDavidlohr Bueso <[email protected]>
Mon, 14 Sep 2015 07:37:24 +0000 (00:37 -0700)
committerIngo Molnar <[email protected]>
Fri, 18 Sep 2015 07:27:29 +0000 (09:27 +0200)
... by using acquire/release for ops around the lock->tail. As such,
weakly ordered archs can benefit from more relaxed use of barriers
when issuing atomics.

Signed-off-by: Davidlohr Bueso <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Waiman Long <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
kernel/locking/osq_lock.c

index dc85ee23a26f79416a140241e3067a5a2ca24d0b..d092a0c9c2d4ed838004215cdf038691a14f4598 100644 (file)
@@ -50,7 +50,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
 
        for (;;) {
                if (atomic_read(&lock->tail) == curr &&
-                   atomic_cmpxchg(&lock->tail, curr, old) == curr) {
+                   atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
                        /*
                         * We were the last queued, we moved @lock back. @prev
                         * will now observe @lock and will complete its
@@ -92,7 +92,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
        node->next = NULL;
        node->cpu = curr;
 
-       old = atomic_xchg(&lock->tail, curr);
+       /*
+        * ACQUIRE semantics, pairs with corresponding RELEASE
+        * in unlock() uncontended, or fastpath.
+        */
+       old = atomic_xchg_acquire(&lock->tail, curr);
        if (old == OSQ_UNLOCKED_VAL)
                return true;
 
@@ -184,7 +188,8 @@ void osq_unlock(struct optimistic_spin_queue *lock)
        /*
         * Fast path for the uncontended case.
         */
-       if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
+       if (likely(atomic_cmpxchg_release(&lock->tail, curr,
+                                         OSQ_UNLOCKED_VAL) == curr))
                return;
 
        /*
This page took 0.060279 seconds and 4 git commands to generate.