]>
Commit | Line | Data |
---|---|---|
6053ee3b IM |
1 | /* |
2 | * kernel/mutex.c | |
3 | * | |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
0d66bf6d PZ |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes | |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
16 | * and Sven Dietrich. | |
17 | * | |
6053ee3b IM |
18 | * Also see Documentation/mutex-design.txt. |
19 | */ | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/sched.h> | |
8bd75c77 | 22 | #include <linux/sched/rt.h> |
9984de1a | 23 | #include <linux/export.h> |
6053ee3b IM |
24 | #include <linux/spinlock.h> |
25 | #include <linux/interrupt.h> | |
9a11b49a | 26 | #include <linux/debug_locks.h> |
6053ee3b IM |
27 | |
28 | /* | |
29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
30 | * which forces all calls into the slowpath: | |
31 | */ | |
32 | #ifdef CONFIG_DEBUG_MUTEXES | |
33 | # include "mutex-debug.h" | |
34 | # include <asm-generic/mutex-null.h> | |
35 | #else | |
36 | # include "mutex.h" | |
37 | # include <asm/mutex.h> | |
38 | #endif | |
39 | ||
0dc8c730 | 40 | /* |
cc189d25 WL |
41 | * A negative mutex count indicates that waiters are sleeping waiting for the |
42 | * mutex. | |
0dc8c730 | 43 | */ |
0dc8c730 | 44 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
0dc8c730 | 45 | |
ef5d4707 IM |
46 | void |
47 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b IM |
48 | { |
49 | atomic_set(&lock->count, 1); | |
50 | spin_lock_init(&lock->wait_lock); | |
51 | INIT_LIST_HEAD(&lock->wait_list); | |
0d66bf6d | 52 | mutex_clear_owner(lock); |
2bd2c92c WL |
53 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
54 | lock->spin_mlock = NULL; | |
55 | #endif | |
6053ee3b | 56 | |
ef5d4707 | 57 | debug_mutex_init(lock, name, key); |
6053ee3b IM |
58 | } |
59 | ||
60 | EXPORT_SYMBOL(__mutex_init); | |
61 | ||
e4564f79 | 62 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
63 | /* |
64 | * We split the mutex lock/unlock logic into separate fastpath and | |
65 | * slowpath functions, to reduce the register pressure on the fastpath. | |
66 | * We also put the fastpath first in the kernel image, to make sure the | |
67 | * branch is predicted by the CPU as default-untaken. | |
68 | */ | |
7918baa5 | 69 | static __used noinline void __sched |
9a11b49a | 70 | __mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b | 71 | |
ef5dc121 | 72 | /** |
6053ee3b IM |
73 | * mutex_lock - acquire the mutex |
74 | * @lock: the mutex to be acquired | |
75 | * | |
76 | * Lock the mutex exclusively for this task. If the mutex is not | |
77 | * available right now, it will sleep until it can get it. | |
78 | * | |
79 | * The mutex must later on be released by the same task that | |
80 | * acquired it. Recursive locking is not allowed. The task | |
81 | * may not exit without first unlocking the mutex. Also, kernel | |
82 | * memory where the mutex resides mutex must not be freed with | |
83 | * the mutex still locked. The mutex must first be initialized | |
84 | * (or statically defined) before it can be locked. memset()-ing | |
85 | * the mutex to 0 is not allowed. | |
86 | * | |
87 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
88 | * checks that will enforce the restrictions and will also do | |
89 | * deadlock debugging. ) | |
90 | * | |
91 | * This function is similar to (but not equivalent to) down(). | |
92 | */ | |
b09d2501 | 93 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 94 | { |
c544bdb1 | 95 | might_sleep(); |
6053ee3b IM |
96 | /* |
97 | * The locking fastpath is the 1->0 transition from | |
98 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
99 | */ |
100 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
0d66bf6d | 101 | mutex_set_owner(lock); |
6053ee3b IM |
102 | } |
103 | ||
104 | EXPORT_SYMBOL(mutex_lock); | |
e4564f79 | 105 | #endif |
6053ee3b | 106 | |
41fcb9f2 | 107 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
2bd2c92c WL |
108 | /* |
109 | * In order to avoid a stampede of mutex spinners from acquiring the mutex | |
110 | * more or less simultaneously, the spinners need to acquire a MCS lock | |
111 | * first before spinning on the owner field. | |
112 | * | |
113 | * We don't inline mspin_lock() so that perf can correctly account for the | |
114 | * time spent in this lock function. | |
115 | */ | |
116 | struct mspin_node { | |
117 | struct mspin_node *next ; | |
118 | int locked; /* 1 if lock acquired */ | |
119 | }; | |
120 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) | |
121 | ||
122 | static noinline | |
123 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) | |
124 | { | |
125 | struct mspin_node *prev; | |
126 | ||
127 | /* Init node */ | |
128 | node->locked = 0; | |
129 | node->next = NULL; | |
130 | ||
131 | prev = xchg(lock, node); | |
132 | if (likely(prev == NULL)) { | |
133 | /* Lock acquired */ | |
134 | node->locked = 1; | |
135 | return; | |
136 | } | |
137 | ACCESS_ONCE(prev->next) = node; | |
138 | smp_wmb(); | |
139 | /* Wait until the lock holder passes the lock down */ | |
140 | while (!ACCESS_ONCE(node->locked)) | |
141 | arch_mutex_cpu_relax(); | |
142 | } | |
143 | ||
144 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) | |
145 | { | |
146 | struct mspin_node *next = ACCESS_ONCE(node->next); | |
147 | ||
148 | if (likely(!next)) { | |
149 | /* | |
150 | * Release the lock by setting it to NULL | |
151 | */ | |
152 | if (cmpxchg(lock, node, NULL) == node) | |
153 | return; | |
154 | /* Wait until the next pointer is set */ | |
155 | while (!(next = ACCESS_ONCE(node->next))) | |
156 | arch_mutex_cpu_relax(); | |
157 | } | |
158 | ACCESS_ONCE(next->locked) = 1; | |
159 | smp_wmb(); | |
160 | } | |
161 | ||
41fcb9f2 WL |
162 | /* |
163 | * Mutex spinning code migrated from kernel/sched/core.c | |
164 | */ | |
165 | ||
166 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | |
167 | { | |
168 | if (lock->owner != owner) | |
169 | return false; | |
170 | ||
171 | /* | |
172 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | |
173 | * lock->owner still matches owner, if that fails, owner might | |
174 | * point to free()d memory, if it still matches, the rcu_read_lock() | |
175 | * ensures the memory stays valid. | |
176 | */ | |
177 | barrier(); | |
178 | ||
179 | return owner->on_cpu; | |
180 | } | |
181 | ||
182 | /* | |
183 | * Look out! "owner" is an entirely speculative pointer | |
184 | * access and not reliable. | |
185 | */ | |
186 | static noinline | |
187 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |
188 | { | |
189 | rcu_read_lock(); | |
190 | while (owner_running(lock, owner)) { | |
191 | if (need_resched()) | |
192 | break; | |
193 | ||
194 | arch_mutex_cpu_relax(); | |
195 | } | |
196 | rcu_read_unlock(); | |
197 | ||
198 | /* | |
199 | * We break out the loop above on need_resched() and when the | |
200 | * owner changed, which is a sign for heavy contention. Return | |
201 | * success only when lock->owner is NULL. | |
202 | */ | |
203 | return lock->owner == NULL; | |
204 | } | |
2bd2c92c WL |
205 | |
206 | /* | |
207 | * Initial check for entering the mutex spinning loop | |
208 | */ | |
209 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
210 | { | |
211 | int retval = 1; | |
212 | ||
213 | rcu_read_lock(); | |
214 | if (lock->owner) | |
215 | retval = lock->owner->on_cpu; | |
216 | rcu_read_unlock(); | |
217 | /* | |
218 | * if lock->owner is not set, the mutex owner may have just acquired | |
219 | * it and not set the owner yet or the mutex has been released. | |
220 | */ | |
221 | return retval; | |
222 | } | |
41fcb9f2 WL |
223 | #endif |
224 | ||
7918baa5 | 225 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b | 226 | |
ef5dc121 | 227 | /** |
6053ee3b IM |
228 | * mutex_unlock - release the mutex |
229 | * @lock: the mutex to be released | |
230 | * | |
231 | * Unlock a mutex that has been locked by this task previously. | |
232 | * | |
233 | * This function must not be used in interrupt context. Unlocking | |
234 | * of a not locked mutex is not allowed. | |
235 | * | |
236 | * This function is similar to (but not equivalent to) up(). | |
237 | */ | |
7ad5b3a5 | 238 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b IM |
239 | { |
240 | /* | |
241 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
242 | * into 'unlocked' state: | |
6053ee3b | 243 | */ |
0d66bf6d PZ |
244 | #ifndef CONFIG_DEBUG_MUTEXES |
245 | /* | |
246 | * When debugging is enabled we must not clear the owner before time, | |
247 | * the slow path will always be taken, and that clears the owner field | |
248 | * after verifying that it was indeed current. | |
249 | */ | |
250 | mutex_clear_owner(lock); | |
251 | #endif | |
6053ee3b IM |
252 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
253 | } | |
254 | ||
255 | EXPORT_SYMBOL(mutex_unlock); | |
256 | ||
257 | /* | |
258 | * Lock a mutex (possibly interruptible), slowpath: | |
259 | */ | |
260 | static inline int __sched | |
e4564f79 | 261 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
e4c70a66 | 262 | struct lockdep_map *nest_lock, unsigned long ip) |
6053ee3b IM |
263 | { |
264 | struct task_struct *task = current; | |
265 | struct mutex_waiter waiter; | |
1fb00c6c | 266 | unsigned long flags; |
6053ee3b | 267 | |
41719b03 | 268 | preempt_disable(); |
e4c70a66 | 269 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 FW |
270 | |
271 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | |
0d66bf6d PZ |
272 | /* |
273 | * Optimistic spinning. | |
274 | * | |
275 | * We try to spin for acquisition when we find that there are no | |
276 | * pending waiters and the lock owner is currently running on a | |
277 | * (different) CPU. | |
278 | * | |
279 | * The rationale is that if the lock owner is running, it is likely to | |
280 | * release the lock soon. | |
281 | * | |
282 | * Since this needs the lock owner, and this mutex implementation | |
283 | * doesn't track the owner atomically in the lock field, we need to | |
284 | * track it non-atomically. | |
285 | * | |
286 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | |
287 | * to serialize everything. | |
2bd2c92c WL |
288 | * |
289 | * The mutex spinners are queued up using MCS lock so that only one | |
290 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
291 | * going to happen, there is no point in going through the lock/unlock | |
292 | * overhead. | |
0d66bf6d | 293 | */ |
2bd2c92c WL |
294 | if (!mutex_can_spin_on_owner(lock)) |
295 | goto slowpath; | |
0d66bf6d PZ |
296 | |
297 | for (;;) { | |
c6eb3dda | 298 | struct task_struct *owner; |
2bd2c92c | 299 | struct mspin_node node; |
0d66bf6d | 300 | |
0d66bf6d PZ |
301 | /* |
302 | * If there's an owner, wait for it to either | |
303 | * release the lock or go to sleep. | |
304 | */ | |
2bd2c92c | 305 | mspin_lock(MLOCK(lock), &node); |
0d66bf6d | 306 | owner = ACCESS_ONCE(lock->owner); |
2bd2c92c WL |
307 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
308 | mspin_unlock(MLOCK(lock), &node); | |
0d66bf6d | 309 | break; |
2bd2c92c | 310 | } |
0d66bf6d | 311 | |
0dc8c730 WL |
312 | if ((atomic_read(&lock->count) == 1) && |
313 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | |
ac6e60ee CM |
314 | lock_acquired(&lock->dep_map, ip); |
315 | mutex_set_owner(lock); | |
2bd2c92c | 316 | mspin_unlock(MLOCK(lock), &node); |
ac6e60ee CM |
317 | preempt_enable(); |
318 | return 0; | |
319 | } | |
2bd2c92c | 320 | mspin_unlock(MLOCK(lock), &node); |
ac6e60ee | 321 | |
0d66bf6d PZ |
322 | /* |
323 | * When there's no owner, we might have preempted between the | |
324 | * owner acquiring the lock and setting the owner field. If | |
325 | * we're an RT task that will live-lock because we won't let | |
326 | * the owner complete. | |
327 | */ | |
328 | if (!owner && (need_resched() || rt_task(task))) | |
329 | break; | |
330 | ||
0d66bf6d PZ |
331 | /* |
332 | * The cpu_relax() call is a compiler barrier which forces | |
333 | * everything in this loop to be re-loaded. We don't need | |
334 | * memory barriers as we'll eventually observe the right | |
335 | * values at the cost of a few extra spins. | |
336 | */ | |
335d7afb | 337 | arch_mutex_cpu_relax(); |
0d66bf6d | 338 | } |
2bd2c92c | 339 | slowpath: |
0d66bf6d | 340 | #endif |
1fb00c6c | 341 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b | 342 | |
9a11b49a | 343 | debug_mutex_lock_common(lock, &waiter); |
c9f4f06d | 344 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b IM |
345 | |
346 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
347 | list_add_tail(&waiter.list, &lock->wait_list); | |
348 | waiter.task = task; | |
349 | ||
0dc8c730 | 350 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
4fe87745 PZ |
351 | goto done; |
352 | ||
e4564f79 | 353 | lock_contended(&lock->dep_map, ip); |
4fe87745 | 354 | |
6053ee3b IM |
355 | for (;;) { |
356 | /* | |
357 | * Lets try to take the lock again - this is needed even if | |
358 | * we get here for the first time (shortly after failing to | |
359 | * acquire the lock), to make sure that we get a wakeup once | |
360 | * it's unlocked. Later on, if we sleep, this is the | |
361 | * operation that gives us the lock. We xchg it to -1, so | |
362 | * that when we release the lock, we properly wake up the | |
363 | * other waiters: | |
364 | */ | |
0dc8c730 WL |
365 | if (MUTEX_SHOW_NO_WAITER(lock) && |
366 | (atomic_xchg(&lock->count, -1) == 1)) | |
6053ee3b IM |
367 | break; |
368 | ||
369 | /* | |
370 | * got a signal? (This code gets eliminated in the | |
371 | * TASK_UNINTERRUPTIBLE case.) | |
372 | */ | |
6ad36762 | 373 | if (unlikely(signal_pending_state(state, task))) { |
ad776537 LH |
374 | mutex_remove_waiter(lock, &waiter, |
375 | task_thread_info(task)); | |
e4564f79 | 376 | mutex_release(&lock->dep_map, 1, ip); |
1fb00c6c | 377 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
378 | |
379 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 380 | preempt_enable(); |
6053ee3b IM |
381 | return -EINTR; |
382 | } | |
383 | __set_task_state(task, state); | |
384 | ||
25985edc | 385 | /* didn't get the lock, go to sleep: */ |
1fb00c6c | 386 | spin_unlock_mutex(&lock->wait_lock, flags); |
bd2f5536 | 387 | schedule_preempt_disabled(); |
1fb00c6c | 388 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
389 | } |
390 | ||
4fe87745 | 391 | done: |
c7e78cff | 392 | lock_acquired(&lock->dep_map, ip); |
6053ee3b | 393 | /* got the lock - rejoice! */ |
0d66bf6d PZ |
394 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
395 | mutex_set_owner(lock); | |
6053ee3b IM |
396 | |
397 | /* set it to 0 if there are no waiters left: */ | |
398 | if (likely(list_empty(&lock->wait_list))) | |
399 | atomic_set(&lock->count, 0); | |
400 | ||
1fb00c6c | 401 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
402 | |
403 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 404 | preempt_enable(); |
6053ee3b | 405 | |
6053ee3b IM |
406 | return 0; |
407 | } | |
408 | ||
ef5d4707 IM |
409 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
410 | void __sched | |
411 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
412 | { | |
413 | might_sleep(); | |
e4c70a66 | 414 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
ef5d4707 IM |
415 | } |
416 | ||
417 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 418 | |
e4c70a66 PZ |
419 | void __sched |
420 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
421 | { | |
422 | might_sleep(); | |
423 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); | |
424 | } | |
425 | ||
426 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | |
427 | ||
ad776537 LH |
428 | int __sched |
429 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
430 | { | |
431 | might_sleep(); | |
e4c70a66 | 432 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
ad776537 LH |
433 | } |
434 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
435 | ||
d63a5a74 N |
436 | int __sched |
437 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
438 | { | |
439 | might_sleep(); | |
0d66bf6d | 440 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
e4c70a66 | 441 | subclass, NULL, _RET_IP_); |
d63a5a74 N |
442 | } |
443 | ||
444 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | |
ef5d4707 IM |
445 | #endif |
446 | ||
6053ee3b IM |
447 | /* |
448 | * Release the lock, slowpath: | |
449 | */ | |
7ad5b3a5 | 450 | static inline void |
ef5d4707 | 451 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b | 452 | { |
02706647 | 453 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6c | 454 | unsigned long flags; |
6053ee3b | 455 | |
1fb00c6c | 456 | spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707 | 457 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a | 458 | debug_mutex_unlock(lock); |
6053ee3b IM |
459 | |
460 | /* | |
461 | * some architectures leave the lock unlocked in the fastpath failure | |
462 | * case, others need to leave it locked. In the later case we have to | |
463 | * unlock it here | |
464 | */ | |
465 | if (__mutex_slowpath_needs_to_unlock()) | |
466 | atomic_set(&lock->count, 1); | |
467 | ||
6053ee3b IM |
468 | if (!list_empty(&lock->wait_list)) { |
469 | /* get the first entry from the wait-list: */ | |
470 | struct mutex_waiter *waiter = | |
471 | list_entry(lock->wait_list.next, | |
472 | struct mutex_waiter, list); | |
473 | ||
474 | debug_mutex_wake_waiter(lock, waiter); | |
475 | ||
476 | wake_up_process(waiter->task); | |
477 | } | |
478 | ||
1fb00c6c | 479 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
480 | } |
481 | ||
9a11b49a IM |
482 | /* |
483 | * Release the lock, slowpath: | |
484 | */ | |
7918baa5 | 485 | static __used noinline void |
9a11b49a IM |
486 | __mutex_unlock_slowpath(atomic_t *lock_count) |
487 | { | |
ef5d4707 | 488 | __mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a IM |
489 | } |
490 | ||
e4564f79 | 491 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
492 | /* |
493 | * Here come the less common (and hence less performance-critical) APIs: | |
494 | * mutex_lock_interruptible() and mutex_trylock(). | |
495 | */ | |
7ad5b3a5 | 496 | static noinline int __sched |
ad776537 LH |
497 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
498 | ||
7ad5b3a5 | 499 | static noinline int __sched |
9a11b49a | 500 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
6053ee3b | 501 | |
ef5dc121 RD |
502 | /** |
503 | * mutex_lock_interruptible - acquire the mutex, interruptible | |
6053ee3b IM |
504 | * @lock: the mutex to be acquired |
505 | * | |
506 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
507 | * been acquired or sleep until the mutex becomes available. If a | |
508 | * signal arrives while waiting for the lock then this function | |
509 | * returns -EINTR. | |
510 | * | |
511 | * This function is similar to (but not equivalent to) down_interruptible(). | |
512 | */ | |
7ad5b3a5 | 513 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 514 | { |
0d66bf6d PZ |
515 | int ret; |
516 | ||
c544bdb1 | 517 | might_sleep(); |
0d66bf6d | 518 | ret = __mutex_fastpath_lock_retval |
6053ee3b | 519 | (&lock->count, __mutex_lock_interruptible_slowpath); |
0d66bf6d PZ |
520 | if (!ret) |
521 | mutex_set_owner(lock); | |
522 | ||
523 | return ret; | |
6053ee3b IM |
524 | } |
525 | ||
526 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
527 | ||
7ad5b3a5 | 528 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 | 529 | { |
0d66bf6d PZ |
530 | int ret; |
531 | ||
ad776537 | 532 | might_sleep(); |
0d66bf6d | 533 | ret = __mutex_fastpath_lock_retval |
ad776537 | 534 | (&lock->count, __mutex_lock_killable_slowpath); |
0d66bf6d PZ |
535 | if (!ret) |
536 | mutex_set_owner(lock); | |
537 | ||
538 | return ret; | |
ad776537 LH |
539 | } |
540 | EXPORT_SYMBOL(mutex_lock_killable); | |
541 | ||
7918baa5 | 542 | static __used noinline void __sched |
e4564f79 PZ |
543 | __mutex_lock_slowpath(atomic_t *lock_count) |
544 | { | |
545 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
546 | ||
e4c70a66 | 547 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
e4564f79 PZ |
548 | } |
549 | ||
7ad5b3a5 | 550 | static noinline int __sched |
ad776537 LH |
551 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
552 | { | |
553 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
554 | ||
e4c70a66 | 555 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
ad776537 LH |
556 | } |
557 | ||
7ad5b3a5 | 558 | static noinline int __sched |
9a11b49a | 559 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
6053ee3b IM |
560 | { |
561 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
562 | ||
e4c70a66 | 563 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
6053ee3b | 564 | } |
e4564f79 | 565 | #endif |
6053ee3b IM |
566 | |
567 | /* | |
568 | * Spinlock based trylock, we take the spinlock and check whether we | |
569 | * can get the lock: | |
570 | */ | |
571 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
572 | { | |
573 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
1fb00c6c | 574 | unsigned long flags; |
6053ee3b IM |
575 | int prev; |
576 | ||
1fb00c6c | 577 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
578 | |
579 | prev = atomic_xchg(&lock->count, -1); | |
ef5d4707 | 580 | if (likely(prev == 1)) { |
0d66bf6d | 581 | mutex_set_owner(lock); |
ef5d4707 IM |
582 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
583 | } | |
0d66bf6d | 584 | |
6053ee3b IM |
585 | /* Set it back to 0 if there are no waiters: */ |
586 | if (likely(list_empty(&lock->wait_list))) | |
587 | atomic_set(&lock->count, 0); | |
588 | ||
1fb00c6c | 589 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
590 | |
591 | return prev == 1; | |
592 | } | |
593 | ||
ef5dc121 RD |
594 | /** |
595 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
596 | * @lock: the mutex to be acquired |
597 | * | |
598 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
599 | * has been acquired successfully, and 0 on contention. | |
600 | * | |
601 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 602 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
603 | * about this when converting semaphore users to mutexes. |
604 | * | |
605 | * This function must not be used in interrupt context. The | |
606 | * mutex must be released by the same task that acquired it. | |
607 | */ | |
7ad5b3a5 | 608 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 609 | { |
0d66bf6d PZ |
610 | int ret; |
611 | ||
612 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | |
613 | if (ret) | |
614 | mutex_set_owner(lock); | |
615 | ||
616 | return ret; | |
6053ee3b | 617 | } |
6053ee3b | 618 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 AM |
619 | |
620 | /** | |
621 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
622 | * @cnt: the atomic which we are to dec | |
623 | * @lock: the mutex to return holding if we dec to 0 | |
624 | * | |
625 | * return true and hold lock if we dec to 0, return false otherwise | |
626 | */ | |
627 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
628 | { | |
629 | /* dec if we can't possibly hit 0 */ | |
630 | if (atomic_add_unless(cnt, -1, 1)) | |
631 | return 0; | |
632 | /* we might hit 0, so take the lock */ | |
633 | mutex_lock(lock); | |
634 | if (!atomic_dec_and_test(cnt)) { | |
635 | /* when we actually did the dec, we didn't hit 0 */ | |
636 | mutex_unlock(lock); | |
637 | return 0; | |
638 | } | |
639 | /* we hit 0, and we hold the lock */ | |
640 | return 1; | |
641 | } | |
642 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |