]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/sched.c | |
3 | * | |
4 | * Scheduling for synchronous and asynchronous RPC requests. | |
5 | * | |
6 | * Copyright (C) 1996 Olaf Kirch, <[email protected]> | |
cca5172a | 7 | * |
1da177e4 LT |
8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | ||
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/smp.h> | |
1da177e4 | 19 | #include <linux/spinlock.h> |
4a3e2f71 | 20 | #include <linux/mutex.h> |
d310310c | 21 | #include <linux/freezer.h> |
1da177e4 LT |
22 | |
23 | #include <linux/sunrpc/clnt.h> | |
1da177e4 | 24 | |
6951867b BH |
25 | #include "sunrpc.h" |
26 | ||
f895b252 | 27 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
1da177e4 | 28 | #define RPCDBG_FACILITY RPCDBG_SCHED |
1da177e4 LT |
29 | #endif |
30 | ||
82b0a4c3 TM |
31 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/sunrpc.h> | |
33 | ||
1da177e4 LT |
34 | /* |
35 | * RPC slabs and memory pools | |
36 | */ | |
37 | #define RPC_BUFFER_MAXSIZE (2048) | |
38 | #define RPC_BUFFER_POOLSIZE (8) | |
39 | #define RPC_TASK_POOLSIZE (8) | |
e18b890b CL |
40 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
41 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | |
ba89966c ED |
42 | static mempool_t *rpc_task_mempool __read_mostly; |
43 | static mempool_t *rpc_buffer_mempool __read_mostly; | |
1da177e4 | 44 | |
65f27f38 | 45 | static void rpc_async_schedule(struct work_struct *); |
bde8f00c | 46 | static void rpc_release_task(struct rpc_task *task); |
36df9aae | 47 | static void __rpc_queue_timer_fn(unsigned long ptr); |
1da177e4 | 48 | |
1da177e4 LT |
49 | /* |
50 | * RPC tasks sit here while waiting for conditions to improve. | |
51 | */ | |
a4a87499 | 52 | static struct rpc_wait_queue delay_queue; |
1da177e4 | 53 | |
1da177e4 LT |
54 | /* |
55 | * rpciod-related stuff | |
56 | */ | |
24c5d9d7 | 57 | struct workqueue_struct *rpciod_workqueue; |
1da177e4 | 58 | |
1da177e4 LT |
59 | /* |
60 | * Disable the timer for a given RPC task. Should be called with | |
61 | * queue->lock and bh_disabled in order to avoid races within | |
62 | * rpc_run_timer(). | |
63 | */ | |
5d00837b | 64 | static void |
eb276c0e | 65 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 66 | { |
36df9aae TM |
67 | if (task->tk_timeout == 0) |
68 | return; | |
46121cf7 | 69 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
1da177e4 | 70 | task->tk_timeout = 0; |
36df9aae | 71 | list_del(&task->u.tk_wait.timer_list); |
eb276c0e TM |
72 | if (list_empty(&queue->timer_list.list)) |
73 | del_timer(&queue->timer_list.timer); | |
36df9aae TM |
74 | } |
75 | ||
76 | static void | |
77 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | |
78 | { | |
79 | queue->timer_list.expires = expires; | |
80 | mod_timer(&queue->timer_list.timer, expires); | |
1da177e4 LT |
81 | } |
82 | ||
1da177e4 LT |
83 | /* |
84 | * Set up a timer for the current task. | |
85 | */ | |
5d00837b | 86 | static void |
eb276c0e | 87 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 LT |
88 | { |
89 | if (!task->tk_timeout) | |
90 | return; | |
91 | ||
55cc1d78 NMG |
92 | dprintk("RPC: %5u setting alarm for %u ms\n", |
93 | task->tk_pid, jiffies_to_msecs(task->tk_timeout)); | |
1da177e4 | 94 | |
eb276c0e TM |
95 | task->u.tk_wait.expires = jiffies + task->tk_timeout; |
96 | if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) | |
97 | rpc_set_queue_timer(queue, task->u.tk_wait.expires); | |
98 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | |
1da177e4 LT |
99 | } |
100 | ||
edd2e36f TM |
101 | static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) |
102 | { | |
103 | struct list_head *q = &queue->tasks[queue->priority]; | |
104 | struct rpc_task *task; | |
105 | ||
106 | if (!list_empty(q)) { | |
107 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); | |
108 | if (task->tk_owner == queue->owner) | |
109 | list_move_tail(&task->u.tk_wait.list, q); | |
110 | } | |
111 | } | |
112 | ||
c05eecf6 TM |
113 | static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
114 | { | |
edd2e36f TM |
115 | if (queue->priority != priority) { |
116 | /* Fairness: rotate the list when changing priority */ | |
117 | rpc_rotate_queue_owner(queue); | |
118 | queue->priority = priority; | |
119 | } | |
c05eecf6 TM |
120 | } |
121 | ||
122 | static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) | |
123 | { | |
124 | queue->owner = pid; | |
125 | queue->nr = RPC_BATCH_COUNT; | |
126 | } | |
127 | ||
128 | static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | |
129 | { | |
130 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | |
131 | rpc_set_waitqueue_owner(queue, 0); | |
132 | } | |
133 | ||
1da177e4 LT |
134 | /* |
135 | * Add new request to a priority queue. | |
136 | */ | |
3b27bad7 TM |
137 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
138 | struct rpc_task *task, | |
139 | unsigned char queue_priority) | |
1da177e4 LT |
140 | { |
141 | struct list_head *q; | |
142 | struct rpc_task *t; | |
143 | ||
144 | INIT_LIST_HEAD(&task->u.tk_wait.links); | |
3b27bad7 | 145 | if (unlikely(queue_priority > queue->maxpriority)) |
c05eecf6 TM |
146 | queue_priority = queue->maxpriority; |
147 | if (queue_priority > queue->priority) | |
148 | rpc_set_waitqueue_priority(queue, queue_priority); | |
149 | q = &queue->tasks[queue_priority]; | |
1da177e4 | 150 | list_for_each_entry(t, q, u.tk_wait.list) { |
3ff7576d | 151 | if (t->tk_owner == task->tk_owner) { |
1da177e4 LT |
152 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
153 | return; | |
154 | } | |
155 | } | |
156 | list_add_tail(&task->u.tk_wait.list, q); | |
157 | } | |
158 | ||
159 | /* | |
160 | * Add new request to wait queue. | |
161 | * | |
162 | * Swapper tasks always get inserted at the head of the queue. | |
163 | * This should avoid many nasty memory deadlocks and hopefully | |
164 | * improve overall performance. | |
165 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | |
166 | */ | |
3b27bad7 TM |
167 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
168 | struct rpc_task *task, | |
169 | unsigned char queue_priority) | |
1da177e4 | 170 | { |
2bd4eef8 WAA |
171 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
172 | if (RPC_IS_QUEUED(task)) | |
173 | return; | |
1da177e4 LT |
174 | |
175 | if (RPC_IS_PRIORITY(queue)) | |
3b27bad7 | 176 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
1da177e4 LT |
177 | else if (RPC_IS_SWAPPER(task)) |
178 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | |
179 | else | |
180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | |
96ef13b2 | 181 | task->tk_waitqueue = queue; |
e19b63da | 182 | queue->qlen++; |
1166fde6 TM |
183 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ |
184 | smp_wmb(); | |
1da177e4 LT |
185 | rpc_set_queued(task); |
186 | ||
46121cf7 CL |
187 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
188 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
189 | } |
190 | ||
191 | /* | |
192 | * Remove request from a priority queue. | |
193 | */ | |
194 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |
195 | { | |
196 | struct rpc_task *t; | |
197 | ||
198 | if (!list_empty(&task->u.tk_wait.links)) { | |
199 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | |
200 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | |
201 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | |
202 | } | |
1da177e4 LT |
203 | } |
204 | ||
205 | /* | |
206 | * Remove request from queue. | |
207 | * Note: must be called with spin lock held. | |
208 | */ | |
96ef13b2 | 209 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 210 | { |
eb276c0e | 211 | __rpc_disable_timer(queue, task); |
1da177e4 LT |
212 | if (RPC_IS_PRIORITY(queue)) |
213 | __rpc_remove_wait_queue_priority(task); | |
36df9aae | 214 | list_del(&task->u.tk_wait.list); |
e19b63da | 215 | queue->qlen--; |
46121cf7 CL |
216 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
217 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
218 | } |
219 | ||
3ff7576d | 220 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
1da177e4 LT |
221 | { |
222 | int i; | |
223 | ||
224 | spin_lock_init(&queue->lock); | |
225 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | |
226 | INIT_LIST_HEAD(&queue->tasks[i]); | |
3ff7576d | 227 | queue->maxpriority = nr_queues - 1; |
1da177e4 | 228 | rpc_reset_waitqueue_priority(queue); |
36df9aae TM |
229 | queue->qlen = 0; |
230 | setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); | |
231 | INIT_LIST_HEAD(&queue->timer_list.list); | |
2f09c242 | 232 | rpc_assign_waitqueue_name(queue, qname); |
1da177e4 LT |
233 | } |
234 | ||
235 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
236 | { | |
3ff7576d | 237 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
1da177e4 | 238 | } |
689cf5c1 | 239 | EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); |
1da177e4 LT |
240 | |
241 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
242 | { | |
3ff7576d | 243 | __rpc_init_priority_wait_queue(queue, qname, 1); |
1da177e4 | 244 | } |
e8914c65 | 245 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
1da177e4 | 246 | |
f6a1cc89 TM |
247 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
248 | { | |
36df9aae | 249 | del_timer_sync(&queue->timer_list.timer); |
f6a1cc89 TM |
250 | } |
251 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | |
252 | ||
dfd01f02 | 253 | static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) |
44c28873 | 254 | { |
416ad3c9 | 255 | freezable_schedule_unsafe(); |
dfd01f02 PZ |
256 | if (signal_pending_state(mode, current)) |
257 | return -ERESTARTSYS; | |
44c28873 TM |
258 | return 0; |
259 | } | |
260 | ||
1306729b | 261 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
c44fe705 TM |
262 | static void rpc_task_set_debuginfo(struct rpc_task *task) |
263 | { | |
264 | static atomic_t rpc_pid; | |
265 | ||
c44fe705 TM |
266 | task->tk_pid = atomic_inc_return(&rpc_pid); |
267 | } | |
268 | #else | |
269 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |
270 | { | |
271 | } | |
272 | #endif | |
273 | ||
e6b3c4db TM |
274 | static void rpc_set_active(struct rpc_task *task) |
275 | { | |
82b0a4c3 TM |
276 | trace_rpc_task_begin(task->tk_client, task, NULL); |
277 | ||
c44fe705 | 278 | rpc_task_set_debuginfo(task); |
58f9612c | 279 | set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
e6b3c4db TM |
280 | } |
281 | ||
44c28873 TM |
282 | /* |
283 | * Mark an RPC call as having completed by clearing the 'active' bit | |
bf294b41 | 284 | * and then waking up all tasks that were sleeping. |
44c28873 | 285 | */ |
bf294b41 | 286 | static int rpc_complete_task(struct rpc_task *task) |
44c28873 | 287 | { |
bf294b41 TM |
288 | void *m = &task->tk_runstate; |
289 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | |
290 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | |
291 | unsigned long flags; | |
292 | int ret; | |
293 | ||
82b0a4c3 TM |
294 | trace_rpc_task_complete(task->tk_client, task, NULL); |
295 | ||
bf294b41 | 296 | spin_lock_irqsave(&wq->lock, flags); |
e6b3c4db | 297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
bf294b41 TM |
298 | ret = atomic_dec_and_test(&task->tk_count); |
299 | if (waitqueue_active(wq)) | |
ac5be6b4 | 300 | __wake_up_locked_key(wq, TASK_NORMAL, &k); |
bf294b41 TM |
301 | spin_unlock_irqrestore(&wq->lock, flags); |
302 | return ret; | |
44c28873 TM |
303 | } |
304 | ||
305 | /* | |
306 | * Allow callers to wait for completion of an RPC call | |
bf294b41 TM |
307 | * |
308 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | |
309 | * to enforce taking of the wq->lock and hence avoid races with | |
310 | * rpc_complete_task(). | |
44c28873 | 311 | */ |
c1221321 | 312 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) |
44c28873 TM |
313 | { |
314 | if (action == NULL) | |
150030b7 | 315 | action = rpc_wait_bit_killable; |
bf294b41 | 316 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
150030b7 | 317 | action, TASK_KILLABLE); |
44c28873 | 318 | } |
e8914c65 | 319 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
44c28873 | 320 | |
1da177e4 LT |
321 | /* |
322 | * Make an RPC task runnable. | |
323 | * | |
506026c3 JL |
324 | * Note: If the task is ASYNC, and is being made runnable after sitting on an |
325 | * rpc_wait_queue, this must be called with the queue spinlock held to protect | |
326 | * the wait queue operation. | |
a3c3cac5 TM |
327 | * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), |
328 | * which is needed to ensure that __rpc_execute() doesn't loop (due to the | |
329 | * lockless RPC_IS_QUEUED() test) before we've had a chance to test | |
330 | * the RPC_TASK_RUNNING flag. | |
1da177e4 LT |
331 | */ |
332 | static void rpc_make_runnable(struct rpc_task *task) | |
333 | { | |
a3c3cac5 TM |
334 | bool need_wakeup = !rpc_test_and_set_running(task); |
335 | ||
1da177e4 | 336 | rpc_clear_queued(task); |
a3c3cac5 | 337 | if (!need_wakeup) |
cc4dc59e | 338 | return; |
1da177e4 | 339 | if (RPC_IS_ASYNC(task)) { |
65f27f38 | 340 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
cee6a537 | 341 | queue_work(rpciod_workqueue, &task->u.tk_work); |
1da177e4 | 342 | } else |
96651ab3 | 343 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
1da177e4 LT |
344 | } |
345 | ||
1da177e4 LT |
346 | /* |
347 | * Prepare for sleeping on a wait queue. | |
348 | * By always appending tasks to the list we ensure FIFO behavior. | |
349 | * NB: An RPC task will only receive interrupt-driven events as long | |
350 | * as it's on a wait queue. | |
351 | */ | |
3b27bad7 TM |
352 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
353 | struct rpc_task *task, | |
354 | rpc_action action, | |
355 | unsigned char queue_priority) | |
1da177e4 | 356 | { |
46121cf7 CL |
357 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
358 | task->tk_pid, rpc_qname(q), jiffies); | |
1da177e4 | 359 | |
82b0a4c3 TM |
360 | trace_rpc_task_sleep(task->tk_client, task, q); |
361 | ||
3b27bad7 | 362 | __rpc_add_wait_queue(q, task, queue_priority); |
1da177e4 | 363 | |
f50ad428 | 364 | WARN_ON_ONCE(task->tk_callback != NULL); |
1da177e4 | 365 | task->tk_callback = action; |
eb276c0e | 366 | __rpc_add_timer(q, task); |
1da177e4 LT |
367 | } |
368 | ||
369 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
5d00837b | 370 | rpc_action action) |
1da177e4 | 371 | { |
58f9612c | 372 | /* We shouldn't ever put an inactive task to sleep */ |
e454a7a8 WAA |
373 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
374 | if (!RPC_IS_ACTIVATED(task)) { | |
375 | task->tk_status = -EIO; | |
376 | rpc_put_task_async(task); | |
377 | return; | |
378 | } | |
e6b3c4db | 379 | |
1da177e4 LT |
380 | /* |
381 | * Protect the queue operations. | |
382 | */ | |
383 | spin_lock_bh(&q->lock); | |
3b27bad7 | 384 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
1da177e4 LT |
385 | spin_unlock_bh(&q->lock); |
386 | } | |
e8914c65 | 387 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
1da177e4 | 388 | |
3b27bad7 TM |
389 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, |
390 | rpc_action action, int priority) | |
391 | { | |
392 | /* We shouldn't ever put an inactive task to sleep */ | |
e454a7a8 WAA |
393 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
394 | if (!RPC_IS_ACTIVATED(task)) { | |
395 | task->tk_status = -EIO; | |
396 | rpc_put_task_async(task); | |
397 | return; | |
398 | } | |
3b27bad7 TM |
399 | |
400 | /* | |
401 | * Protect the queue operations. | |
402 | */ | |
403 | spin_lock_bh(&q->lock); | |
404 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | |
405 | spin_unlock_bh(&q->lock); | |
406 | } | |
1e1093c7 | 407 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |
3b27bad7 | 408 | |
1da177e4 LT |
409 | /** |
410 | * __rpc_do_wake_up_task - wake up a single rpc_task | |
96ef13b2 | 411 | * @queue: wait queue |
1da177e4 LT |
412 | * @task: task to be woken up |
413 | * | |
414 | * Caller must hold queue->lock, and have cleared the task queued flag. | |
415 | */ | |
96ef13b2 | 416 | static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 417 | { |
46121cf7 CL |
418 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
419 | task->tk_pid, jiffies); | |
1da177e4 | 420 | |
1da177e4 LT |
421 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
422 | if (!RPC_IS_ACTIVATED(task)) { | |
423 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | |
424 | return; | |
425 | } | |
426 | ||
82b0a4c3 TM |
427 | trace_rpc_task_wakeup(task->tk_client, task, queue); |
428 | ||
96ef13b2 | 429 | __rpc_remove_wait_queue(queue, task); |
1da177e4 LT |
430 | |
431 | rpc_make_runnable(task); | |
432 | ||
46121cf7 | 433 | dprintk("RPC: __rpc_wake_up_task done\n"); |
1da177e4 LT |
434 | } |
435 | ||
436 | /* | |
96ef13b2 | 437 | * Wake up a queued task while the queue lock is being held |
1da177e4 | 438 | */ |
96ef13b2 | 439 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 440 | { |
1166fde6 TM |
441 | if (RPC_IS_QUEUED(task)) { |
442 | smp_rmb(); | |
443 | if (task->tk_waitqueue == queue) | |
444 | __rpc_do_wake_up_task(queue, task); | |
445 | } | |
1da177e4 LT |
446 | } |
447 | ||
1da177e4 | 448 | /* |
96ef13b2 | 449 | * Wake up a task on a specific queue |
1da177e4 | 450 | */ |
96ef13b2 | 451 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 452 | { |
5e4424af | 453 | spin_lock_bh(&queue->lock); |
96ef13b2 | 454 | rpc_wake_up_task_queue_locked(queue, task); |
5e4424af | 455 | spin_unlock_bh(&queue->lock); |
1da177e4 | 456 | } |
96ef13b2 TM |
457 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
458 | ||
1da177e4 LT |
459 | /* |
460 | * Wake up the next task on a priority queue. | |
461 | */ | |
961a828d | 462 | static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) |
1da177e4 LT |
463 | { |
464 | struct list_head *q; | |
465 | struct rpc_task *task; | |
466 | ||
467 | /* | |
3ff7576d | 468 | * Service a batch of tasks from a single owner. |
1da177e4 LT |
469 | */ |
470 | q = &queue->tasks[queue->priority]; | |
471 | if (!list_empty(q)) { | |
472 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
3ff7576d | 473 | if (queue->owner == task->tk_owner) { |
1da177e4 LT |
474 | if (--queue->nr) |
475 | goto out; | |
476 | list_move_tail(&task->u.tk_wait.list, q); | |
477 | } | |
478 | /* | |
479 | * Check if we need to switch queues. | |
480 | */ | |
c05eecf6 | 481 | goto new_owner; |
1da177e4 LT |
482 | } |
483 | ||
484 | /* | |
485 | * Service the next queue. | |
486 | */ | |
487 | do { | |
488 | if (q == &queue->tasks[0]) | |
489 | q = &queue->tasks[queue->maxpriority]; | |
490 | else | |
491 | q = q - 1; | |
492 | if (!list_empty(q)) { | |
493 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
494 | goto new_queue; | |
495 | } | |
496 | } while (q != &queue->tasks[queue->priority]); | |
497 | ||
498 | rpc_reset_waitqueue_priority(queue); | |
499 | return NULL; | |
500 | ||
501 | new_queue: | |
502 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | |
3ff7576d TM |
503 | new_owner: |
504 | rpc_set_waitqueue_owner(queue, task->tk_owner); | |
1da177e4 | 505 | out: |
1da177e4 LT |
506 | return task; |
507 | } | |
508 | ||
961a828d TM |
509 | static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) |
510 | { | |
511 | if (RPC_IS_PRIORITY(queue)) | |
512 | return __rpc_find_next_queued_priority(queue); | |
513 | if (!list_empty(&queue->tasks[0])) | |
514 | return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); | |
515 | return NULL; | |
516 | } | |
517 | ||
1da177e4 | 518 | /* |
961a828d | 519 | * Wake up the first task on the wait queue. |
1da177e4 | 520 | */ |
961a828d TM |
521 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, |
522 | bool (*func)(struct rpc_task *, void *), void *data) | |
1da177e4 LT |
523 | { |
524 | struct rpc_task *task = NULL; | |
525 | ||
961a828d | 526 | dprintk("RPC: wake_up_first(%p \"%s\")\n", |
46121cf7 | 527 | queue, rpc_qname(queue)); |
5e4424af | 528 | spin_lock_bh(&queue->lock); |
961a828d TM |
529 | task = __rpc_find_next_queued(queue); |
530 | if (task != NULL) { | |
531 | if (func(task, data)) | |
96ef13b2 | 532 | rpc_wake_up_task_queue_locked(queue, task); |
961a828d TM |
533 | else |
534 | task = NULL; | |
1da177e4 | 535 | } |
5e4424af | 536 | spin_unlock_bh(&queue->lock); |
1da177e4 LT |
537 | |
538 | return task; | |
539 | } | |
961a828d TM |
540 | EXPORT_SYMBOL_GPL(rpc_wake_up_first); |
541 | ||
542 | static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) | |
543 | { | |
544 | return true; | |
545 | } | |
546 | ||
547 | /* | |
548 | * Wake up the next task on the wait queue. | |
549 | */ | |
550 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) | |
551 | { | |
552 | return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); | |
553 | } | |
e8914c65 | 554 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
1da177e4 LT |
555 | |
556 | /** | |
557 | * rpc_wake_up - wake up all rpc_tasks | |
558 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
559 | * | |
560 | * Grabs queue->lock | |
561 | */ | |
562 | void rpc_wake_up(struct rpc_wait_queue *queue) | |
563 | { | |
1da177e4 | 564 | struct list_head *head; |
e6d83d55 | 565 | |
5e4424af | 566 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
567 | head = &queue->tasks[queue->maxpriority]; |
568 | for (;;) { | |
540a0f75 TM |
569 | while (!list_empty(head)) { |
570 | struct rpc_task *task; | |
571 | task = list_first_entry(head, | |
572 | struct rpc_task, | |
573 | u.tk_wait.list); | |
96ef13b2 | 574 | rpc_wake_up_task_queue_locked(queue, task); |
540a0f75 | 575 | } |
1da177e4 LT |
576 | if (head == &queue->tasks[0]) |
577 | break; | |
578 | head--; | |
579 | } | |
5e4424af | 580 | spin_unlock_bh(&queue->lock); |
1da177e4 | 581 | } |
e8914c65 | 582 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
1da177e4 LT |
583 | |
584 | /** | |
585 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | |
586 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
587 | * @status: status value to set | |
588 | * | |
589 | * Grabs queue->lock | |
590 | */ | |
591 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |
592 | { | |
593 | struct list_head *head; | |
1da177e4 | 594 | |
5e4424af | 595 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
596 | head = &queue->tasks[queue->maxpriority]; |
597 | for (;;) { | |
540a0f75 TM |
598 | while (!list_empty(head)) { |
599 | struct rpc_task *task; | |
600 | task = list_first_entry(head, | |
601 | struct rpc_task, | |
602 | u.tk_wait.list); | |
1da177e4 | 603 | task->tk_status = status; |
96ef13b2 | 604 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
605 | } |
606 | if (head == &queue->tasks[0]) | |
607 | break; | |
608 | head--; | |
609 | } | |
5e4424af | 610 | spin_unlock_bh(&queue->lock); |
1da177e4 | 611 | } |
e8914c65 | 612 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
1da177e4 | 613 | |
36df9aae TM |
614 | static void __rpc_queue_timer_fn(unsigned long ptr) |
615 | { | |
616 | struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; | |
617 | struct rpc_task *task, *n; | |
618 | unsigned long expires, now, timeo; | |
619 | ||
620 | spin_lock(&queue->lock); | |
621 | expires = now = jiffies; | |
622 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { | |
623 | timeo = task->u.tk_wait.expires; | |
624 | if (time_after_eq(now, timeo)) { | |
36df9aae TM |
625 | dprintk("RPC: %5u timeout\n", task->tk_pid); |
626 | task->tk_status = -ETIMEDOUT; | |
627 | rpc_wake_up_task_queue_locked(queue, task); | |
628 | continue; | |
629 | } | |
630 | if (expires == now || time_after(expires, timeo)) | |
631 | expires = timeo; | |
632 | } | |
633 | if (!list_empty(&queue->timer_list.list)) | |
634 | rpc_set_queue_timer(queue, expires); | |
635 | spin_unlock(&queue->lock); | |
636 | } | |
637 | ||
8014793b TM |
638 | static void __rpc_atrun(struct rpc_task *task) |
639 | { | |
6bd14416 TM |
640 | if (task->tk_status == -ETIMEDOUT) |
641 | task->tk_status = 0; | |
8014793b TM |
642 | } |
643 | ||
1da177e4 LT |
644 | /* |
645 | * Run a task at a later time | |
646 | */ | |
8014793b | 647 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
1da177e4 LT |
648 | { |
649 | task->tk_timeout = delay; | |
5d00837b | 650 | rpc_sleep_on(&delay_queue, task, __rpc_atrun); |
1da177e4 | 651 | } |
e8914c65 | 652 | EXPORT_SYMBOL_GPL(rpc_delay); |
1da177e4 | 653 | |
4ce70ada TM |
654 | /* |
655 | * Helper to call task->tk_ops->rpc_call_prepare | |
656 | */ | |
aae2006e | 657 | void rpc_prepare_task(struct rpc_task *task) |
4ce70ada TM |
658 | { |
659 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | |
660 | } | |
661 | ||
7fdcf13b TM |
662 | static void |
663 | rpc_init_task_statistics(struct rpc_task *task) | |
664 | { | |
665 | /* Initialize retry counters */ | |
666 | task->tk_garb_retry = 2; | |
667 | task->tk_cred_retry = 2; | |
668 | task->tk_rebind_retry = 2; | |
669 | ||
670 | /* starting timestamp */ | |
671 | task->tk_start = ktime_get(); | |
672 | } | |
673 | ||
674 | static void | |
675 | rpc_reset_task_statistics(struct rpc_task *task) | |
676 | { | |
677 | task->tk_timeouts = 0; | |
678 | task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); | |
679 | ||
680 | rpc_init_task_statistics(task); | |
681 | } | |
682 | ||
d05fdb0c | 683 | /* |
963d8fe5 | 684 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
d05fdb0c | 685 | */ |
abbcf28f | 686 | void rpc_exit_task(struct rpc_task *task) |
d05fdb0c | 687 | { |
abbcf28f | 688 | task->tk_action = NULL; |
963d8fe5 TM |
689 | if (task->tk_ops->rpc_call_done != NULL) { |
690 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | |
d05fdb0c | 691 | if (task->tk_action != NULL) { |
abbcf28f TM |
692 | WARN_ON(RPC_ASSASSINATED(task)); |
693 | /* Always release the RPC slot and buffer memory */ | |
694 | xprt_release(task); | |
7fdcf13b | 695 | rpc_reset_task_statistics(task); |
d05fdb0c TM |
696 | } |
697 | } | |
d05fdb0c | 698 | } |
d9b6cd94 TM |
699 | |
700 | void rpc_exit(struct rpc_task *task, int status) | |
701 | { | |
702 | task->tk_status = status; | |
703 | task->tk_action = rpc_exit_task; | |
704 | if (RPC_IS_QUEUED(task)) | |
705 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | |
706 | } | |
707 | EXPORT_SYMBOL_GPL(rpc_exit); | |
d05fdb0c | 708 | |
bbd5a1f9 TM |
709 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
710 | { | |
a86dc496 | 711 | if (ops->rpc_release != NULL) |
bbd5a1f9 | 712 | ops->rpc_release(calldata); |
bbd5a1f9 TM |
713 | } |
714 | ||
1da177e4 LT |
715 | /* |
716 | * This is the RPC `scheduler' (or rather, the finite state machine). | |
717 | */ | |
2efef837 | 718 | static void __rpc_execute(struct rpc_task *task) |
1da177e4 | 719 | { |
eb9b55ab TM |
720 | struct rpc_wait_queue *queue; |
721 | int task_is_async = RPC_IS_ASYNC(task); | |
722 | int status = 0; | |
1da177e4 | 723 | |
46121cf7 CL |
724 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
725 | task->tk_pid, task->tk_flags); | |
1da177e4 | 726 | |
2bd4eef8 WAA |
727 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
728 | if (RPC_IS_QUEUED(task)) | |
729 | return; | |
1da177e4 | 730 | |
d05fdb0c | 731 | for (;;) { |
b55c5989 | 732 | void (*do_action)(struct rpc_task *); |
1da177e4 LT |
733 | |
734 | /* | |
b55c5989 | 735 | * Execute any pending callback first. |
1da177e4 | 736 | */ |
b55c5989 TM |
737 | do_action = task->tk_callback; |
738 | task->tk_callback = NULL; | |
739 | if (do_action == NULL) { | |
e020c680 TM |
740 | /* |
741 | * Perform the next FSM step. | |
b55c5989 TM |
742 | * tk_action may be NULL if the task has been killed. |
743 | * In particular, note that rpc_killall_tasks may | |
744 | * do this at any time, so beware when dereferencing. | |
e020c680 | 745 | */ |
b55c5989 TM |
746 | do_action = task->tk_action; |
747 | if (do_action == NULL) | |
1da177e4 | 748 | break; |
1da177e4 | 749 | } |
82b0a4c3 | 750 | trace_rpc_task_run_action(task->tk_client, task, task->tk_action); |
b55c5989 | 751 | do_action(task); |
1da177e4 LT |
752 | |
753 | /* | |
754 | * Lockless check for whether task is sleeping or not. | |
755 | */ | |
756 | if (!RPC_IS_QUEUED(task)) | |
757 | continue; | |
eb9b55ab TM |
758 | /* |
759 | * The queue->lock protects against races with | |
760 | * rpc_make_runnable(). | |
761 | * | |
762 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous | |
763 | * rpc_task, rpc_make_runnable() can assign it to a | |
764 | * different workqueue. We therefore cannot assume that the | |
765 | * rpc_task pointer may still be dereferenced. | |
766 | */ | |
767 | queue = task->tk_waitqueue; | |
768 | spin_lock_bh(&queue->lock); | |
769 | if (!RPC_IS_QUEUED(task)) { | |
770 | spin_unlock_bh(&queue->lock); | |
1da177e4 LT |
771 | continue; |
772 | } | |
eb9b55ab TM |
773 | rpc_clear_running(task); |
774 | spin_unlock_bh(&queue->lock); | |
775 | if (task_is_async) | |
776 | return; | |
1da177e4 LT |
777 | |
778 | /* sync task: sleep here */ | |
46121cf7 | 779 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
96651ab3 | 780 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
150030b7 MW |
781 | RPC_TASK_QUEUED, rpc_wait_bit_killable, |
782 | TASK_KILLABLE); | |
96651ab3 | 783 | if (status == -ERESTARTSYS) { |
1da177e4 LT |
784 | /* |
785 | * When a sync task receives a signal, it exits with | |
786 | * -ERESTARTSYS. In order to catch any callbacks that | |
787 | * clean up after sleeping on some queue, we don't | |
788 | * break the loop here, but go around once more. | |
789 | */ | |
46121cf7 | 790 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
96651ab3 TM |
791 | task->tk_flags |= RPC_TASK_KILLED; |
792 | rpc_exit(task, -ERESTARTSYS); | |
1da177e4 | 793 | } |
46121cf7 | 794 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
1da177e4 LT |
795 | } |
796 | ||
46121cf7 CL |
797 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
798 | task->tk_status); | |
1da177e4 LT |
799 | /* Release all resources associated with the task */ |
800 | rpc_release_task(task); | |
1da177e4 LT |
801 | } |
802 | ||
803 | /* | |
804 | * User-visible entry point to the scheduler. | |
805 | * | |
806 | * This may be called recursively if e.g. an async NFS task updates | |
807 | * the attributes and finds that dirty pages must be flushed. | |
808 | * NOTE: Upon exit of this function the task is guaranteed to be | |
809 | * released. In particular note that tk_release() will have | |
810 | * been called, so your task memory may have been freed. | |
811 | */ | |
2efef837 | 812 | void rpc_execute(struct rpc_task *task) |
1da177e4 | 813 | { |
a76580fb TM |
814 | bool is_async = RPC_IS_ASYNC(task); |
815 | ||
44c28873 | 816 | rpc_set_active(task); |
d6a1ed08 | 817 | rpc_make_runnable(task); |
a76580fb | 818 | if (!is_async) |
d6a1ed08 | 819 | __rpc_execute(task); |
1da177e4 LT |
820 | } |
821 | ||
65f27f38 | 822 | static void rpc_async_schedule(struct work_struct *work) |
1da177e4 | 823 | { |
65f27f38 | 824 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
1da177e4 LT |
825 | } |
826 | ||
02107148 CL |
827 | /** |
828 | * rpc_malloc - allocate an RPC buffer | |
829 | * @task: RPC task that will use this buffer | |
830 | * @size: requested byte size | |
1da177e4 | 831 | * |
c5a4dd8b | 832 | * To prevent rpciod from hanging, this allocator never sleeps, |
c6c8fe79 DR |
833 | * returning NULL and suppressing warning if the request cannot be serviced |
834 | * immediately. | |
c5a4dd8b CL |
835 | * The caller can arrange to sleep in a way that is safe for rpciod. |
836 | * | |
837 | * Most requests are 'small' (under 2KiB) and can be serviced from a | |
838 | * mempool, ensuring that NFS reads and writes can always proceed, | |
839 | * and that there is good locality of reference for these buffers. | |
840 | * | |
1da177e4 | 841 | * In order to avoid memory starvation triggering more writebacks of |
c5a4dd8b | 842 | * NFS requests, we avoid using GFP_KERNEL. |
1da177e4 | 843 | */ |
c5a4dd8b | 844 | void *rpc_malloc(struct rpc_task *task, size_t size) |
1da177e4 | 845 | { |
aa3d1fae | 846 | struct rpc_buffer *buf; |
c4a7ca77 | 847 | gfp_t gfp = GFP_NOIO | __GFP_NOWARN; |
a564b8f0 MG |
848 | |
849 | if (RPC_IS_SWAPPER(task)) | |
c4a7ca77 | 850 | gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; |
1da177e4 | 851 | |
aa3d1fae | 852 | size += sizeof(struct rpc_buffer); |
c5a4dd8b CL |
853 | if (size <= RPC_BUFFER_MAXSIZE) |
854 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | |
1da177e4 | 855 | else |
c5a4dd8b | 856 | buf = kmalloc(size, gfp); |
ddce40df PZ |
857 | |
858 | if (!buf) | |
859 | return NULL; | |
860 | ||
aa3d1fae | 861 | buf->len = size; |
215d0678 | 862 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
c5a4dd8b | 863 | task->tk_pid, size, buf); |
aa3d1fae | 864 | return &buf->data; |
1da177e4 | 865 | } |
12444809 | 866 | EXPORT_SYMBOL_GPL(rpc_malloc); |
1da177e4 | 867 | |
02107148 CL |
868 | /** |
869 | * rpc_free - free buffer allocated via rpc_malloc | |
c5a4dd8b | 870 | * @buffer: buffer to free |
02107148 CL |
871 | * |
872 | */ | |
c5a4dd8b | 873 | void rpc_free(void *buffer) |
1da177e4 | 874 | { |
aa3d1fae CL |
875 | size_t size; |
876 | struct rpc_buffer *buf; | |
02107148 | 877 | |
c5a4dd8b CL |
878 | if (!buffer) |
879 | return; | |
aa3d1fae CL |
880 | |
881 | buf = container_of(buffer, struct rpc_buffer, data); | |
882 | size = buf->len; | |
c5a4dd8b | 883 | |
215d0678 | 884 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
c5a4dd8b | 885 | size, buf); |
aa3d1fae | 886 | |
c5a4dd8b CL |
887 | if (size <= RPC_BUFFER_MAXSIZE) |
888 | mempool_free(buf, rpc_buffer_mempool); | |
889 | else | |
890 | kfree(buf); | |
1da177e4 | 891 | } |
12444809 | 892 | EXPORT_SYMBOL_GPL(rpc_free); |
1da177e4 LT |
893 | |
894 | /* | |
895 | * Creation and deletion of RPC task structures | |
896 | */ | |
47fe0648 | 897 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) |
1da177e4 LT |
898 | { |
899 | memset(task, 0, sizeof(*task)); | |
44c28873 | 900 | atomic_set(&task->tk_count, 1); |
84115e1c TM |
901 | task->tk_flags = task_setup_data->flags; |
902 | task->tk_ops = task_setup_data->callback_ops; | |
903 | task->tk_calldata = task_setup_data->callback_data; | |
6529eba0 | 904 | INIT_LIST_HEAD(&task->tk_task); |
1da177e4 | 905 | |
3ff7576d TM |
906 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
907 | task->tk_owner = current->tgid; | |
1da177e4 LT |
908 | |
909 | /* Initialize workqueue for async tasks */ | |
32bfb5c0 | 910 | task->tk_workqueue = task_setup_data->workqueue; |
1da177e4 | 911 | |
9d61498d TM |
912 | task->tk_xprt = xprt_get(task_setup_data->rpc_xprt); |
913 | ||
84115e1c TM |
914 | if (task->tk_ops->rpc_call_prepare != NULL) |
915 | task->tk_action = rpc_prepare_task; | |
963d8fe5 | 916 | |
7fdcf13b | 917 | rpc_init_task_statistics(task); |
ef759a2e | 918 | |
46121cf7 | 919 | dprintk("RPC: new task initialized, procpid %u\n", |
ba25f9dc | 920 | task_pid_nr(current)); |
1da177e4 LT |
921 | } |
922 | ||
923 | static struct rpc_task * | |
924 | rpc_alloc_task(void) | |
925 | { | |
a564b8f0 | 926 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); |
1da177e4 LT |
927 | } |
928 | ||
1da177e4 | 929 | /* |
90c5755f | 930 | * Create a new task for the specified client. |
1da177e4 | 931 | */ |
84115e1c | 932 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) |
1da177e4 | 933 | { |
e8f5d77c TM |
934 | struct rpc_task *task = setup_data->task; |
935 | unsigned short flags = 0; | |
936 | ||
937 | if (task == NULL) { | |
938 | task = rpc_alloc_task(); | |
19445b99 TM |
939 | if (task == NULL) { |
940 | rpc_release_calldata(setup_data->callback_ops, | |
941 | setup_data->callback_data); | |
942 | return ERR_PTR(-ENOMEM); | |
943 | } | |
e8f5d77c TM |
944 | flags = RPC_TASK_DYNAMIC; |
945 | } | |
1da177e4 | 946 | |
84115e1c | 947 | rpc_init_task(task, setup_data); |
e8f5d77c | 948 | task->tk_flags |= flags; |
46121cf7 | 949 | dprintk("RPC: allocated task %p\n", task); |
1da177e4 | 950 | return task; |
1da177e4 LT |
951 | } |
952 | ||
c6567ed1 TM |
953 | /* |
954 | * rpc_free_task - release rpc task and perform cleanups | |
955 | * | |
956 | * Note that we free up the rpc_task _after_ rpc_release_calldata() | |
957 | * in order to work around a workqueue dependency issue. | |
958 | * | |
959 | * Tejun Heo states: | |
960 | * "Workqueue currently considers two work items to be the same if they're | |
961 | * on the same address and won't execute them concurrently - ie. it | |
962 | * makes a work item which is queued again while being executed wait | |
963 | * for the previous execution to complete. | |
964 | * | |
965 | * If a work function frees the work item, and then waits for an event | |
966 | * which should be performed by another work item and *that* work item | |
967 | * recycles the freed work item, it can create a false dependency loop. | |
968 | * There really is no reliable way to detect this short of verifying | |
969 | * every memory free." | |
970 | * | |
971 | */ | |
32bfb5c0 | 972 | static void rpc_free_task(struct rpc_task *task) |
1da177e4 | 973 | { |
c6567ed1 TM |
974 | unsigned short tk_flags = task->tk_flags; |
975 | ||
976 | rpc_release_calldata(task->tk_ops, task->tk_calldata); | |
1da177e4 | 977 | |
c6567ed1 | 978 | if (tk_flags & RPC_TASK_DYNAMIC) { |
5e4424af TM |
979 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
980 | mempool_free(task, rpc_task_mempool); | |
981 | } | |
32bfb5c0 TM |
982 | } |
983 | ||
984 | static void rpc_async_release(struct work_struct *work) | |
985 | { | |
986 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | |
987 | } | |
988 | ||
bf294b41 | 989 | static void rpc_release_resources_task(struct rpc_task *task) |
32bfb5c0 | 990 | { |
87ed5003 | 991 | xprt_release(task); |
a271c5a0 | 992 | if (task->tk_msg.rpc_cred) { |
a17c2153 | 993 | put_rpccred(task->tk_msg.rpc_cred); |
a271c5a0 OH |
994 | task->tk_msg.rpc_cred = NULL; |
995 | } | |
58f9612c | 996 | rpc_task_release_client(task); |
bf294b41 TM |
997 | } |
998 | ||
999 | static void rpc_final_put_task(struct rpc_task *task, | |
1000 | struct workqueue_struct *q) | |
1001 | { | |
1002 | if (q != NULL) { | |
32bfb5c0 | 1003 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
bf294b41 | 1004 | queue_work(q, &task->u.tk_work); |
32bfb5c0 TM |
1005 | } else |
1006 | rpc_free_task(task); | |
e6b3c4db | 1007 | } |
bf294b41 TM |
1008 | |
1009 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | |
1010 | { | |
1011 | if (atomic_dec_and_test(&task->tk_count)) { | |
1012 | rpc_release_resources_task(task); | |
1013 | rpc_final_put_task(task, q); | |
1014 | } | |
1015 | } | |
1016 | ||
1017 | void rpc_put_task(struct rpc_task *task) | |
1018 | { | |
1019 | rpc_do_put_task(task, NULL); | |
1020 | } | |
e8914c65 | 1021 | EXPORT_SYMBOL_GPL(rpc_put_task); |
e6b3c4db | 1022 | |
bf294b41 TM |
1023 | void rpc_put_task_async(struct rpc_task *task) |
1024 | { | |
1025 | rpc_do_put_task(task, task->tk_workqueue); | |
1026 | } | |
1027 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | |
1028 | ||
bde8f00c | 1029 | static void rpc_release_task(struct rpc_task *task) |
e6b3c4db | 1030 | { |
46121cf7 | 1031 | dprintk("RPC: %5u release task\n", task->tk_pid); |
1da177e4 | 1032 | |
0a0c2a57 | 1033 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
1da177e4 | 1034 | |
bf294b41 | 1035 | rpc_release_resources_task(task); |
e6b3c4db | 1036 | |
bf294b41 TM |
1037 | /* |
1038 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | |
1039 | * so it should be safe to use task->tk_count as a test for whether | |
1040 | * or not any other processes still hold references to our rpc_task. | |
1041 | */ | |
1042 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | |
1043 | /* Wake up anyone who may be waiting for task completion */ | |
1044 | if (!rpc_complete_task(task)) | |
1045 | return; | |
1046 | } else { | |
1047 | if (!atomic_dec_and_test(&task->tk_count)) | |
1048 | return; | |
1049 | } | |
1050 | rpc_final_put_task(task, task->tk_workqueue); | |
1da177e4 LT |
1051 | } |
1052 | ||
b247bbf1 TM |
1053 | int rpciod_up(void) |
1054 | { | |
1055 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | |
1056 | } | |
1057 | ||
1058 | void rpciod_down(void) | |
1059 | { | |
1060 | module_put(THIS_MODULE); | |
1061 | } | |
1062 | ||
1da177e4 | 1063 | /* |
b247bbf1 | 1064 | * Start up the rpciod workqueue. |
1da177e4 | 1065 | */ |
b247bbf1 | 1066 | static int rpciod_start(void) |
1da177e4 LT |
1067 | { |
1068 | struct workqueue_struct *wq; | |
ab418d70 | 1069 | |
1da177e4 LT |
1070 | /* |
1071 | * Create the rpciod thread and wait for it to start. | |
1072 | */ | |
ab418d70 | 1073 | dprintk("RPC: creating workqueue rpciod\n"); |
127b21b8 TM |
1074 | /* Note: highpri because network receive is latency sensitive */ |
1075 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | |
1da177e4 | 1076 | rpciod_workqueue = wq; |
b247bbf1 | 1077 | return rpciod_workqueue != NULL; |
1da177e4 LT |
1078 | } |
1079 | ||
b247bbf1 | 1080 | static void rpciod_stop(void) |
1da177e4 | 1081 | { |
b247bbf1 | 1082 | struct workqueue_struct *wq = NULL; |
ab418d70 | 1083 | |
b247bbf1 TM |
1084 | if (rpciod_workqueue == NULL) |
1085 | return; | |
ab418d70 | 1086 | dprintk("RPC: destroying workqueue rpciod\n"); |
1da177e4 | 1087 | |
b247bbf1 TM |
1088 | wq = rpciod_workqueue; |
1089 | rpciod_workqueue = NULL; | |
1090 | destroy_workqueue(wq); | |
1da177e4 LT |
1091 | } |
1092 | ||
1da177e4 LT |
1093 | void |
1094 | rpc_destroy_mempool(void) | |
1095 | { | |
b247bbf1 | 1096 | rpciod_stop(); |
17a9618e JL |
1097 | mempool_destroy(rpc_buffer_mempool); |
1098 | mempool_destroy(rpc_task_mempool); | |
1099 | kmem_cache_destroy(rpc_task_slabp); | |
1100 | kmem_cache_destroy(rpc_buffer_slabp); | |
f6a1cc89 | 1101 | rpc_destroy_wait_queue(&delay_queue); |
1da177e4 LT |
1102 | } |
1103 | ||
1104 | int | |
1105 | rpc_init_mempool(void) | |
1106 | { | |
f6a1cc89 TM |
1107 | /* |
1108 | * The following is not strictly a mempool initialisation, | |
1109 | * but there is no harm in doing it here | |
1110 | */ | |
1111 | rpc_init_wait_queue(&delay_queue, "delayq"); | |
1112 | if (!rpciod_start()) | |
1113 | goto err_nomem; | |
1114 | ||
1da177e4 LT |
1115 | rpc_task_slabp = kmem_cache_create("rpc_tasks", |
1116 | sizeof(struct rpc_task), | |
1117 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1118 | NULL); |
1da177e4 LT |
1119 | if (!rpc_task_slabp) |
1120 | goto err_nomem; | |
1121 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | |
1122 | RPC_BUFFER_MAXSIZE, | |
1123 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1124 | NULL); |
1da177e4 LT |
1125 | if (!rpc_buffer_slabp) |
1126 | goto err_nomem; | |
93d2341c MD |
1127 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1128 | rpc_task_slabp); | |
1da177e4 LT |
1129 | if (!rpc_task_mempool) |
1130 | goto err_nomem; | |
93d2341c MD |
1131 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1132 | rpc_buffer_slabp); | |
1da177e4 LT |
1133 | if (!rpc_buffer_mempool) |
1134 | goto err_nomem; | |
1135 | return 0; | |
1136 | err_nomem: | |
1137 | rpc_destroy_mempool(); | |
1138 | return -ENOMEM; | |
1139 | } |