]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/sched.c | |
3 | * | |
4 | * Scheduling for synchronous and asynchronous RPC requests. | |
5 | * | |
6 | * Copyright (C) 1996 Olaf Kirch, <[email protected]> | |
cca5172a | 7 | * |
1da177e4 LT |
8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | ||
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/smp_lock.h> | |
20 | #include <linux/spinlock.h> | |
4a3e2f71 | 21 | #include <linux/mutex.h> |
1da177e4 LT |
22 | |
23 | #include <linux/sunrpc/clnt.h> | |
1da177e4 LT |
24 | |
25 | #ifdef RPC_DEBUG | |
26 | #define RPCDBG_FACILITY RPCDBG_SCHED | |
27 | #define RPC_TASK_MAGIC_ID 0xf00baa | |
1da177e4 LT |
28 | #endif |
29 | ||
30 | /* | |
31 | * RPC slabs and memory pools | |
32 | */ | |
33 | #define RPC_BUFFER_MAXSIZE (2048) | |
34 | #define RPC_BUFFER_POOLSIZE (8) | |
35 | #define RPC_TASK_POOLSIZE (8) | |
e18b890b CL |
36 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
37 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | |
ba89966c ED |
38 | static mempool_t *rpc_task_mempool __read_mostly; |
39 | static mempool_t *rpc_buffer_mempool __read_mostly; | |
1da177e4 | 40 | |
65f27f38 | 41 | static void rpc_async_schedule(struct work_struct *); |
bde8f00c | 42 | static void rpc_release_task(struct rpc_task *task); |
36df9aae | 43 | static void __rpc_queue_timer_fn(unsigned long ptr); |
1da177e4 | 44 | |
1da177e4 LT |
45 | /* |
46 | * RPC tasks sit here while waiting for conditions to improve. | |
47 | */ | |
a4a87499 | 48 | static struct rpc_wait_queue delay_queue; |
1da177e4 | 49 | |
1da177e4 LT |
50 | /* |
51 | * rpciod-related stuff | |
52 | */ | |
24c5d9d7 | 53 | struct workqueue_struct *rpciod_workqueue; |
1da177e4 | 54 | |
1da177e4 LT |
55 | /* |
56 | * Disable the timer for a given RPC task. Should be called with | |
57 | * queue->lock and bh_disabled in order to avoid races within | |
58 | * rpc_run_timer(). | |
59 | */ | |
5d00837b | 60 | static void |
eb276c0e | 61 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 62 | { |
36df9aae TM |
63 | if (task->tk_timeout == 0) |
64 | return; | |
46121cf7 | 65 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
1da177e4 | 66 | task->tk_timeout = 0; |
36df9aae | 67 | list_del(&task->u.tk_wait.timer_list); |
eb276c0e TM |
68 | if (list_empty(&queue->timer_list.list)) |
69 | del_timer(&queue->timer_list.timer); | |
36df9aae TM |
70 | } |
71 | ||
72 | static void | |
73 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | |
74 | { | |
75 | queue->timer_list.expires = expires; | |
76 | mod_timer(&queue->timer_list.timer, expires); | |
1da177e4 LT |
77 | } |
78 | ||
1da177e4 LT |
79 | /* |
80 | * Set up a timer for the current task. | |
81 | */ | |
5d00837b | 82 | static void |
eb276c0e | 83 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 LT |
84 | { |
85 | if (!task->tk_timeout) | |
86 | return; | |
87 | ||
46121cf7 | 88 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
1da177e4 LT |
89 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
90 | ||
eb276c0e TM |
91 | task->u.tk_wait.expires = jiffies + task->tk_timeout; |
92 | if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) | |
93 | rpc_set_queue_timer(queue, task->u.tk_wait.expires); | |
94 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | |
1da177e4 LT |
95 | } |
96 | ||
97 | /* | |
98 | * Add new request to a priority queue. | |
99 | */ | |
100 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | |
101 | { | |
102 | struct list_head *q; | |
103 | struct rpc_task *t; | |
104 | ||
105 | INIT_LIST_HEAD(&task->u.tk_wait.links); | |
106 | q = &queue->tasks[task->tk_priority]; | |
107 | if (unlikely(task->tk_priority > queue->maxpriority)) | |
108 | q = &queue->tasks[queue->maxpriority]; | |
109 | list_for_each_entry(t, q, u.tk_wait.list) { | |
3ff7576d | 110 | if (t->tk_owner == task->tk_owner) { |
1da177e4 LT |
111 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
112 | return; | |
113 | } | |
114 | } | |
115 | list_add_tail(&task->u.tk_wait.list, q); | |
116 | } | |
117 | ||
118 | /* | |
119 | * Add new request to wait queue. | |
120 | * | |
121 | * Swapper tasks always get inserted at the head of the queue. | |
122 | * This should avoid many nasty memory deadlocks and hopefully | |
123 | * improve overall performance. | |
124 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | |
125 | */ | |
126 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | |
127 | { | |
128 | BUG_ON (RPC_IS_QUEUED(task)); | |
129 | ||
130 | if (RPC_IS_PRIORITY(queue)) | |
131 | __rpc_add_wait_queue_priority(queue, task); | |
132 | else if (RPC_IS_SWAPPER(task)) | |
133 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | |
134 | else | |
135 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | |
96ef13b2 | 136 | task->tk_waitqueue = queue; |
e19b63da | 137 | queue->qlen++; |
1da177e4 LT |
138 | rpc_set_queued(task); |
139 | ||
46121cf7 CL |
140 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
141 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
142 | } |
143 | ||
144 | /* | |
145 | * Remove request from a priority queue. | |
146 | */ | |
147 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |
148 | { | |
149 | struct rpc_task *t; | |
150 | ||
151 | if (!list_empty(&task->u.tk_wait.links)) { | |
152 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | |
153 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | |
154 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | |
155 | } | |
1da177e4 LT |
156 | } |
157 | ||
158 | /* | |
159 | * Remove request from queue. | |
160 | * Note: must be called with spin lock held. | |
161 | */ | |
96ef13b2 | 162 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 163 | { |
eb276c0e | 164 | __rpc_disable_timer(queue, task); |
1da177e4 LT |
165 | if (RPC_IS_PRIORITY(queue)) |
166 | __rpc_remove_wait_queue_priority(task); | |
36df9aae | 167 | list_del(&task->u.tk_wait.list); |
e19b63da | 168 | queue->qlen--; |
46121cf7 CL |
169 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
170 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
171 | } |
172 | ||
173 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | |
174 | { | |
175 | queue->priority = priority; | |
176 | queue->count = 1 << (priority * 2); | |
177 | } | |
178 | ||
3ff7576d | 179 | static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) |
1da177e4 | 180 | { |
3ff7576d | 181 | queue->owner = pid; |
1da177e4 LT |
182 | queue->nr = RPC_BATCH_COUNT; |
183 | } | |
184 | ||
185 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | |
186 | { | |
187 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | |
3ff7576d | 188 | rpc_set_waitqueue_owner(queue, 0); |
1da177e4 LT |
189 | } |
190 | ||
3ff7576d | 191 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
1da177e4 LT |
192 | { |
193 | int i; | |
194 | ||
195 | spin_lock_init(&queue->lock); | |
196 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | |
197 | INIT_LIST_HEAD(&queue->tasks[i]); | |
3ff7576d | 198 | queue->maxpriority = nr_queues - 1; |
1da177e4 | 199 | rpc_reset_waitqueue_priority(queue); |
36df9aae TM |
200 | queue->qlen = 0; |
201 | setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); | |
202 | INIT_LIST_HEAD(&queue->timer_list.list); | |
1da177e4 LT |
203 | #ifdef RPC_DEBUG |
204 | queue->name = qname; | |
205 | #endif | |
206 | } | |
207 | ||
208 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
209 | { | |
3ff7576d | 210 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
1da177e4 LT |
211 | } |
212 | ||
213 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
214 | { | |
3ff7576d | 215 | __rpc_init_priority_wait_queue(queue, qname, 1); |
1da177e4 | 216 | } |
e8914c65 | 217 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
1da177e4 | 218 | |
f6a1cc89 TM |
219 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
220 | { | |
36df9aae | 221 | del_timer_sync(&queue->timer_list.timer); |
f6a1cc89 TM |
222 | } |
223 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | |
224 | ||
150030b7 | 225 | static int rpc_wait_bit_killable(void *word) |
44c28873 | 226 | { |
150030b7 | 227 | if (fatal_signal_pending(current)) |
44c28873 TM |
228 | return -ERESTARTSYS; |
229 | schedule(); | |
230 | return 0; | |
231 | } | |
232 | ||
c44fe705 TM |
233 | #ifdef RPC_DEBUG |
234 | static void rpc_task_set_debuginfo(struct rpc_task *task) | |
235 | { | |
236 | static atomic_t rpc_pid; | |
237 | ||
238 | task->tk_magic = RPC_TASK_MAGIC_ID; | |
239 | task->tk_pid = atomic_inc_return(&rpc_pid); | |
240 | } | |
241 | #else | |
242 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |
243 | { | |
244 | } | |
245 | #endif | |
246 | ||
e6b3c4db TM |
247 | static void rpc_set_active(struct rpc_task *task) |
248 | { | |
4bef61ff | 249 | struct rpc_clnt *clnt; |
e6b3c4db TM |
250 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) |
251 | return; | |
c44fe705 | 252 | rpc_task_set_debuginfo(task); |
e6b3c4db | 253 | /* Add to global list of all tasks */ |
4bef61ff TM |
254 | clnt = task->tk_client; |
255 | if (clnt != NULL) { | |
256 | spin_lock(&clnt->cl_lock); | |
257 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | |
258 | spin_unlock(&clnt->cl_lock); | |
259 | } | |
e6b3c4db TM |
260 | } |
261 | ||
44c28873 TM |
262 | /* |
263 | * Mark an RPC call as having completed by clearing the 'active' bit | |
264 | */ | |
e6b3c4db | 265 | static void rpc_mark_complete_task(struct rpc_task *task) |
44c28873 | 266 | { |
e6b3c4db TM |
267 | smp_mb__before_clear_bit(); |
268 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | |
269 | smp_mb__after_clear_bit(); | |
44c28873 TM |
270 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
271 | } | |
272 | ||
273 | /* | |
274 | * Allow callers to wait for completion of an RPC call | |
275 | */ | |
276 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | |
277 | { | |
278 | if (action == NULL) | |
150030b7 | 279 | action = rpc_wait_bit_killable; |
44c28873 | 280 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
150030b7 | 281 | action, TASK_KILLABLE); |
44c28873 | 282 | } |
e8914c65 | 283 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
44c28873 | 284 | |
1da177e4 LT |
285 | /* |
286 | * Make an RPC task runnable. | |
287 | * | |
cca5172a | 288 | * Note: If the task is ASYNC, this must be called with |
1da177e4 LT |
289 | * the spinlock held to protect the wait queue operation. |
290 | */ | |
291 | static void rpc_make_runnable(struct rpc_task *task) | |
292 | { | |
1da177e4 | 293 | rpc_clear_queued(task); |
cc4dc59e CS |
294 | if (rpc_test_and_set_running(task)) |
295 | return; | |
296 | /* We might have raced */ | |
297 | if (RPC_IS_QUEUED(task)) { | |
298 | rpc_clear_running(task); | |
1da177e4 | 299 | return; |
cc4dc59e | 300 | } |
1da177e4 LT |
301 | if (RPC_IS_ASYNC(task)) { |
302 | int status; | |
303 | ||
65f27f38 | 304 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
32bfb5c0 | 305 | status = queue_work(rpciod_workqueue, &task->u.tk_work); |
1da177e4 LT |
306 | if (status < 0) { |
307 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | |
308 | task->tk_status = status; | |
309 | return; | |
310 | } | |
311 | } else | |
96651ab3 | 312 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
1da177e4 LT |
313 | } |
314 | ||
1da177e4 LT |
315 | /* |
316 | * Prepare for sleeping on a wait queue. | |
317 | * By always appending tasks to the list we ensure FIFO behavior. | |
318 | * NB: An RPC task will only receive interrupt-driven events as long | |
319 | * as it's on a wait queue. | |
320 | */ | |
321 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
5d00837b | 322 | rpc_action action) |
1da177e4 | 323 | { |
46121cf7 CL |
324 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
325 | task->tk_pid, rpc_qname(q), jiffies); | |
1da177e4 LT |
326 | |
327 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | |
328 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | |
329 | return; | |
330 | } | |
331 | ||
1da177e4 LT |
332 | __rpc_add_wait_queue(q, task); |
333 | ||
334 | BUG_ON(task->tk_callback != NULL); | |
335 | task->tk_callback = action; | |
eb276c0e | 336 | __rpc_add_timer(q, task); |
1da177e4 LT |
337 | } |
338 | ||
339 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
5d00837b | 340 | rpc_action action) |
1da177e4 | 341 | { |
e6b3c4db TM |
342 | /* Mark the task as being activated if so needed */ |
343 | rpc_set_active(task); | |
344 | ||
1da177e4 LT |
345 | /* |
346 | * Protect the queue operations. | |
347 | */ | |
348 | spin_lock_bh(&q->lock); | |
5d00837b | 349 | __rpc_sleep_on(q, task, action); |
1da177e4 LT |
350 | spin_unlock_bh(&q->lock); |
351 | } | |
e8914c65 | 352 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
1da177e4 LT |
353 | |
354 | /** | |
355 | * __rpc_do_wake_up_task - wake up a single rpc_task | |
96ef13b2 | 356 | * @queue: wait queue |
1da177e4 LT |
357 | * @task: task to be woken up |
358 | * | |
359 | * Caller must hold queue->lock, and have cleared the task queued flag. | |
360 | */ | |
96ef13b2 | 361 | static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 362 | { |
46121cf7 CL |
363 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
364 | task->tk_pid, jiffies); | |
1da177e4 LT |
365 | |
366 | #ifdef RPC_DEBUG | |
367 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
368 | #endif | |
369 | /* Has the task been executed yet? If not, we cannot wake it up! */ | |
370 | if (!RPC_IS_ACTIVATED(task)) { | |
371 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | |
372 | return; | |
373 | } | |
374 | ||
96ef13b2 | 375 | __rpc_remove_wait_queue(queue, task); |
1da177e4 LT |
376 | |
377 | rpc_make_runnable(task); | |
378 | ||
46121cf7 | 379 | dprintk("RPC: __rpc_wake_up_task done\n"); |
1da177e4 LT |
380 | } |
381 | ||
382 | /* | |
96ef13b2 | 383 | * Wake up a queued task while the queue lock is being held |
1da177e4 | 384 | */ |
96ef13b2 | 385 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 386 | { |
f5fb7b06 TM |
387 | if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) |
388 | __rpc_do_wake_up_task(queue, task); | |
1da177e4 LT |
389 | } |
390 | ||
1da177e4 | 391 | /* |
96ef13b2 | 392 | * Wake up a task on a specific queue |
1da177e4 | 393 | */ |
96ef13b2 | 394 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 395 | { |
5e4424af | 396 | spin_lock_bh(&queue->lock); |
96ef13b2 | 397 | rpc_wake_up_task_queue_locked(queue, task); |
5e4424af | 398 | spin_unlock_bh(&queue->lock); |
1da177e4 | 399 | } |
96ef13b2 TM |
400 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
401 | ||
402 | /* | |
403 | * Wake up the specified task | |
404 | */ | |
fda13939 | 405 | static void rpc_wake_up_task(struct rpc_task *task) |
96ef13b2 TM |
406 | { |
407 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | |
408 | } | |
1da177e4 LT |
409 | |
410 | /* | |
411 | * Wake up the next task on a priority queue. | |
412 | */ | |
413 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | |
414 | { | |
415 | struct list_head *q; | |
416 | struct rpc_task *task; | |
417 | ||
418 | /* | |
3ff7576d | 419 | * Service a batch of tasks from a single owner. |
1da177e4 LT |
420 | */ |
421 | q = &queue->tasks[queue->priority]; | |
422 | if (!list_empty(q)) { | |
423 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
3ff7576d | 424 | if (queue->owner == task->tk_owner) { |
1da177e4 LT |
425 | if (--queue->nr) |
426 | goto out; | |
427 | list_move_tail(&task->u.tk_wait.list, q); | |
428 | } | |
429 | /* | |
430 | * Check if we need to switch queues. | |
431 | */ | |
432 | if (--queue->count) | |
3ff7576d | 433 | goto new_owner; |
1da177e4 LT |
434 | } |
435 | ||
436 | /* | |
437 | * Service the next queue. | |
438 | */ | |
439 | do { | |
440 | if (q == &queue->tasks[0]) | |
441 | q = &queue->tasks[queue->maxpriority]; | |
442 | else | |
443 | q = q - 1; | |
444 | if (!list_empty(q)) { | |
445 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
446 | goto new_queue; | |
447 | } | |
448 | } while (q != &queue->tasks[queue->priority]); | |
449 | ||
450 | rpc_reset_waitqueue_priority(queue); | |
451 | return NULL; | |
452 | ||
453 | new_queue: | |
454 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | |
3ff7576d TM |
455 | new_owner: |
456 | rpc_set_waitqueue_owner(queue, task->tk_owner); | |
1da177e4 | 457 | out: |
96ef13b2 | 458 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
459 | return task; |
460 | } | |
461 | ||
462 | /* | |
463 | * Wake up the next task on the wait queue. | |
464 | */ | |
465 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |
466 | { | |
467 | struct rpc_task *task = NULL; | |
468 | ||
46121cf7 CL |
469 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
470 | queue, rpc_qname(queue)); | |
5e4424af | 471 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
472 | if (RPC_IS_PRIORITY(queue)) |
473 | task = __rpc_wake_up_next_priority(queue); | |
474 | else { | |
475 | task_for_first(task, &queue->tasks[0]) | |
96ef13b2 | 476 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 | 477 | } |
5e4424af | 478 | spin_unlock_bh(&queue->lock); |
1da177e4 LT |
479 | |
480 | return task; | |
481 | } | |
e8914c65 | 482 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
1da177e4 LT |
483 | |
484 | /** | |
485 | * rpc_wake_up - wake up all rpc_tasks | |
486 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
487 | * | |
488 | * Grabs queue->lock | |
489 | */ | |
490 | void rpc_wake_up(struct rpc_wait_queue *queue) | |
491 | { | |
e6d83d55 | 492 | struct rpc_task *task, *next; |
1da177e4 | 493 | struct list_head *head; |
e6d83d55 | 494 | |
5e4424af | 495 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
496 | head = &queue->tasks[queue->maxpriority]; |
497 | for (;;) { | |
e6d83d55 | 498 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
96ef13b2 | 499 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
500 | if (head == &queue->tasks[0]) |
501 | break; | |
502 | head--; | |
503 | } | |
5e4424af | 504 | spin_unlock_bh(&queue->lock); |
1da177e4 | 505 | } |
e8914c65 | 506 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
1da177e4 LT |
507 | |
508 | /** | |
509 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | |
510 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
511 | * @status: status value to set | |
512 | * | |
513 | * Grabs queue->lock | |
514 | */ | |
515 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |
516 | { | |
e6d83d55 | 517 | struct rpc_task *task, *next; |
1da177e4 | 518 | struct list_head *head; |
1da177e4 | 519 | |
5e4424af | 520 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
521 | head = &queue->tasks[queue->maxpriority]; |
522 | for (;;) { | |
e6d83d55 | 523 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
1da177e4 | 524 | task->tk_status = status; |
96ef13b2 | 525 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
526 | } |
527 | if (head == &queue->tasks[0]) | |
528 | break; | |
529 | head--; | |
530 | } | |
5e4424af | 531 | spin_unlock_bh(&queue->lock); |
1da177e4 | 532 | } |
e8914c65 | 533 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
1da177e4 | 534 | |
36df9aae TM |
535 | static void __rpc_queue_timer_fn(unsigned long ptr) |
536 | { | |
537 | struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; | |
538 | struct rpc_task *task, *n; | |
539 | unsigned long expires, now, timeo; | |
540 | ||
541 | spin_lock(&queue->lock); | |
542 | expires = now = jiffies; | |
543 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { | |
544 | timeo = task->u.tk_wait.expires; | |
545 | if (time_after_eq(now, timeo)) { | |
36df9aae TM |
546 | dprintk("RPC: %5u timeout\n", task->tk_pid); |
547 | task->tk_status = -ETIMEDOUT; | |
548 | rpc_wake_up_task_queue_locked(queue, task); | |
549 | continue; | |
550 | } | |
551 | if (expires == now || time_after(expires, timeo)) | |
552 | expires = timeo; | |
553 | } | |
554 | if (!list_empty(&queue->timer_list.list)) | |
555 | rpc_set_queue_timer(queue, expires); | |
556 | spin_unlock(&queue->lock); | |
557 | } | |
558 | ||
8014793b TM |
559 | static void __rpc_atrun(struct rpc_task *task) |
560 | { | |
5d00837b | 561 | task->tk_status = 0; |
8014793b TM |
562 | } |
563 | ||
1da177e4 LT |
564 | /* |
565 | * Run a task at a later time | |
566 | */ | |
8014793b | 567 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
1da177e4 LT |
568 | { |
569 | task->tk_timeout = delay; | |
5d00837b | 570 | rpc_sleep_on(&delay_queue, task, __rpc_atrun); |
1da177e4 | 571 | } |
e8914c65 | 572 | EXPORT_SYMBOL_GPL(rpc_delay); |
1da177e4 | 573 | |
4ce70ada TM |
574 | /* |
575 | * Helper to call task->tk_ops->rpc_call_prepare | |
576 | */ | |
577 | static void rpc_prepare_task(struct rpc_task *task) | |
578 | { | |
579 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | |
580 | } | |
581 | ||
d05fdb0c | 582 | /* |
963d8fe5 | 583 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
d05fdb0c | 584 | */ |
abbcf28f | 585 | void rpc_exit_task(struct rpc_task *task) |
d05fdb0c | 586 | { |
abbcf28f | 587 | task->tk_action = NULL; |
963d8fe5 TM |
588 | if (task->tk_ops->rpc_call_done != NULL) { |
589 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | |
d05fdb0c | 590 | if (task->tk_action != NULL) { |
abbcf28f TM |
591 | WARN_ON(RPC_ASSASSINATED(task)); |
592 | /* Always release the RPC slot and buffer memory */ | |
593 | xprt_release(task); | |
d05fdb0c TM |
594 | } |
595 | } | |
d05fdb0c | 596 | } |
e8914c65 | 597 | EXPORT_SYMBOL_GPL(rpc_exit_task); |
d05fdb0c | 598 | |
bbd5a1f9 TM |
599 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
600 | { | |
a86dc496 | 601 | if (ops->rpc_release != NULL) |
bbd5a1f9 | 602 | ops->rpc_release(calldata); |
bbd5a1f9 TM |
603 | } |
604 | ||
1da177e4 LT |
605 | /* |
606 | * This is the RPC `scheduler' (or rather, the finite state machine). | |
607 | */ | |
2efef837 | 608 | static void __rpc_execute(struct rpc_task *task) |
1da177e4 LT |
609 | { |
610 | int status = 0; | |
611 | ||
46121cf7 CL |
612 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
613 | task->tk_pid, task->tk_flags); | |
1da177e4 LT |
614 | |
615 | BUG_ON(RPC_IS_QUEUED(task)); | |
616 | ||
d05fdb0c | 617 | for (;;) { |
1da177e4 LT |
618 | |
619 | /* | |
620 | * Execute any pending callback. | |
621 | */ | |
a486aeda | 622 | if (task->tk_callback) { |
1da177e4 | 623 | void (*save_callback)(struct rpc_task *); |
cca5172a YH |
624 | |
625 | /* | |
a486aeda BF |
626 | * We set tk_callback to NULL before calling it, |
627 | * in case it sets the tk_callback field itself: | |
1da177e4 | 628 | */ |
a486aeda BF |
629 | save_callback = task->tk_callback; |
630 | task->tk_callback = NULL; | |
1da177e4 | 631 | save_callback(task); |
1da177e4 LT |
632 | } |
633 | ||
634 | /* | |
635 | * Perform the next FSM step. | |
636 | * tk_action may be NULL when the task has been killed | |
637 | * by someone else. | |
638 | */ | |
639 | if (!RPC_IS_QUEUED(task)) { | |
abbcf28f | 640 | if (task->tk_action == NULL) |
1da177e4 | 641 | break; |
abbcf28f | 642 | task->tk_action(task); |
1da177e4 LT |
643 | } |
644 | ||
645 | /* | |
646 | * Lockless check for whether task is sleeping or not. | |
647 | */ | |
648 | if (!RPC_IS_QUEUED(task)) | |
649 | continue; | |
650 | rpc_clear_running(task); | |
651 | if (RPC_IS_ASYNC(task)) { | |
652 | /* Careful! we may have raced... */ | |
653 | if (RPC_IS_QUEUED(task)) | |
2efef837 | 654 | return; |
1da177e4 | 655 | if (rpc_test_and_set_running(task)) |
2efef837 | 656 | return; |
1da177e4 LT |
657 | continue; |
658 | } | |
659 | ||
660 | /* sync task: sleep here */ | |
46121cf7 | 661 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
96651ab3 | 662 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
150030b7 MW |
663 | RPC_TASK_QUEUED, rpc_wait_bit_killable, |
664 | TASK_KILLABLE); | |
96651ab3 | 665 | if (status == -ERESTARTSYS) { |
1da177e4 LT |
666 | /* |
667 | * When a sync task receives a signal, it exits with | |
668 | * -ERESTARTSYS. In order to catch any callbacks that | |
669 | * clean up after sleeping on some queue, we don't | |
670 | * break the loop here, but go around once more. | |
671 | */ | |
46121cf7 | 672 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
96651ab3 TM |
673 | task->tk_flags |= RPC_TASK_KILLED; |
674 | rpc_exit(task, -ERESTARTSYS); | |
675 | rpc_wake_up_task(task); | |
1da177e4 LT |
676 | } |
677 | rpc_set_running(task); | |
46121cf7 | 678 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
1da177e4 LT |
679 | } |
680 | ||
46121cf7 CL |
681 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
682 | task->tk_status); | |
1da177e4 LT |
683 | /* Release all resources associated with the task */ |
684 | rpc_release_task(task); | |
1da177e4 LT |
685 | } |
686 | ||
687 | /* | |
688 | * User-visible entry point to the scheduler. | |
689 | * | |
690 | * This may be called recursively if e.g. an async NFS task updates | |
691 | * the attributes and finds that dirty pages must be flushed. | |
692 | * NOTE: Upon exit of this function the task is guaranteed to be | |
693 | * released. In particular note that tk_release() will have | |
694 | * been called, so your task memory may have been freed. | |
695 | */ | |
2efef837 | 696 | void rpc_execute(struct rpc_task *task) |
1da177e4 | 697 | { |
44c28873 | 698 | rpc_set_active(task); |
1da177e4 | 699 | rpc_set_running(task); |
2efef837 | 700 | __rpc_execute(task); |
1da177e4 LT |
701 | } |
702 | ||
65f27f38 | 703 | static void rpc_async_schedule(struct work_struct *work) |
1da177e4 | 704 | { |
65f27f38 | 705 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
1da177e4 LT |
706 | } |
707 | ||
aa3d1fae CL |
708 | struct rpc_buffer { |
709 | size_t len; | |
710 | char data[]; | |
711 | }; | |
712 | ||
02107148 CL |
713 | /** |
714 | * rpc_malloc - allocate an RPC buffer | |
715 | * @task: RPC task that will use this buffer | |
716 | * @size: requested byte size | |
1da177e4 | 717 | * |
c5a4dd8b CL |
718 | * To prevent rpciod from hanging, this allocator never sleeps, |
719 | * returning NULL if the request cannot be serviced immediately. | |
720 | * The caller can arrange to sleep in a way that is safe for rpciod. | |
721 | * | |
722 | * Most requests are 'small' (under 2KiB) and can be serviced from a | |
723 | * mempool, ensuring that NFS reads and writes can always proceed, | |
724 | * and that there is good locality of reference for these buffers. | |
725 | * | |
1da177e4 | 726 | * In order to avoid memory starvation triggering more writebacks of |
c5a4dd8b | 727 | * NFS requests, we avoid using GFP_KERNEL. |
1da177e4 | 728 | */ |
c5a4dd8b | 729 | void *rpc_malloc(struct rpc_task *task, size_t size) |
1da177e4 | 730 | { |
aa3d1fae | 731 | struct rpc_buffer *buf; |
c5a4dd8b | 732 | gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; |
1da177e4 | 733 | |
aa3d1fae | 734 | size += sizeof(struct rpc_buffer); |
c5a4dd8b CL |
735 | if (size <= RPC_BUFFER_MAXSIZE) |
736 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | |
1da177e4 | 737 | else |
c5a4dd8b | 738 | buf = kmalloc(size, gfp); |
ddce40df PZ |
739 | |
740 | if (!buf) | |
741 | return NULL; | |
742 | ||
aa3d1fae | 743 | buf->len = size; |
215d0678 | 744 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
c5a4dd8b | 745 | task->tk_pid, size, buf); |
aa3d1fae | 746 | return &buf->data; |
1da177e4 | 747 | } |
12444809 | 748 | EXPORT_SYMBOL_GPL(rpc_malloc); |
1da177e4 | 749 | |
02107148 CL |
750 | /** |
751 | * rpc_free - free buffer allocated via rpc_malloc | |
c5a4dd8b | 752 | * @buffer: buffer to free |
02107148 CL |
753 | * |
754 | */ | |
c5a4dd8b | 755 | void rpc_free(void *buffer) |
1da177e4 | 756 | { |
aa3d1fae CL |
757 | size_t size; |
758 | struct rpc_buffer *buf; | |
02107148 | 759 | |
c5a4dd8b CL |
760 | if (!buffer) |
761 | return; | |
aa3d1fae CL |
762 | |
763 | buf = container_of(buffer, struct rpc_buffer, data); | |
764 | size = buf->len; | |
c5a4dd8b | 765 | |
215d0678 | 766 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
c5a4dd8b | 767 | size, buf); |
aa3d1fae | 768 | |
c5a4dd8b CL |
769 | if (size <= RPC_BUFFER_MAXSIZE) |
770 | mempool_free(buf, rpc_buffer_mempool); | |
771 | else | |
772 | kfree(buf); | |
1da177e4 | 773 | } |
12444809 | 774 | EXPORT_SYMBOL_GPL(rpc_free); |
1da177e4 LT |
775 | |
776 | /* | |
777 | * Creation and deletion of RPC task structures | |
778 | */ | |
47fe0648 | 779 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) |
1da177e4 LT |
780 | { |
781 | memset(task, 0, sizeof(*task)); | |
44c28873 | 782 | atomic_set(&task->tk_count, 1); |
84115e1c TM |
783 | task->tk_flags = task_setup_data->flags; |
784 | task->tk_ops = task_setup_data->callback_ops; | |
785 | task->tk_calldata = task_setup_data->callback_data; | |
6529eba0 | 786 | INIT_LIST_HEAD(&task->tk_task); |
1da177e4 LT |
787 | |
788 | /* Initialize retry counters */ | |
789 | task->tk_garb_retry = 2; | |
790 | task->tk_cred_retry = 2; | |
791 | ||
3ff7576d TM |
792 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
793 | task->tk_owner = current->tgid; | |
1da177e4 LT |
794 | |
795 | /* Initialize workqueue for async tasks */ | |
32bfb5c0 | 796 | task->tk_workqueue = task_setup_data->workqueue; |
1da177e4 | 797 | |
84115e1c TM |
798 | task->tk_client = task_setup_data->rpc_client; |
799 | if (task->tk_client != NULL) { | |
800 | kref_get(&task->tk_client->cl_kref); | |
801 | if (task->tk_client->cl_softrtry) | |
1da177e4 | 802 | task->tk_flags |= RPC_TASK_SOFT; |
1da177e4 LT |
803 | } |
804 | ||
84115e1c TM |
805 | if (task->tk_ops->rpc_call_prepare != NULL) |
806 | task->tk_action = rpc_prepare_task; | |
963d8fe5 | 807 | |
b3ef8b3b | 808 | if (task_setup_data->rpc_message != NULL) { |
4ccda2cd TM |
809 | task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc; |
810 | task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp; | |
811 | task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp; | |
b3ef8b3b | 812 | /* Bind the user cred */ |
4ccda2cd | 813 | rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags); |
b3ef8b3b TM |
814 | if (task->tk_action == NULL) |
815 | rpc_call_start(task); | |
816 | } | |
817 | ||
ef759a2e CL |
818 | /* starting timestamp */ |
819 | task->tk_start = jiffies; | |
820 | ||
46121cf7 | 821 | dprintk("RPC: new task initialized, procpid %u\n", |
ba25f9dc | 822 | task_pid_nr(current)); |
1da177e4 LT |
823 | } |
824 | ||
825 | static struct rpc_task * | |
826 | rpc_alloc_task(void) | |
827 | { | |
828 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | |
829 | } | |
830 | ||
1da177e4 | 831 | /* |
90c5755f | 832 | * Create a new task for the specified client. |
1da177e4 | 833 | */ |
84115e1c | 834 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) |
1da177e4 | 835 | { |
e8f5d77c TM |
836 | struct rpc_task *task = setup_data->task; |
837 | unsigned short flags = 0; | |
838 | ||
839 | if (task == NULL) { | |
840 | task = rpc_alloc_task(); | |
841 | if (task == NULL) | |
842 | goto out; | |
843 | flags = RPC_TASK_DYNAMIC; | |
844 | } | |
1da177e4 | 845 | |
84115e1c | 846 | rpc_init_task(task, setup_data); |
1da177e4 | 847 | |
e8f5d77c | 848 | task->tk_flags |= flags; |
46121cf7 | 849 | dprintk("RPC: allocated task %p\n", task); |
1da177e4 LT |
850 | out: |
851 | return task; | |
1da177e4 LT |
852 | } |
853 | ||
32bfb5c0 | 854 | static void rpc_free_task(struct rpc_task *task) |
1da177e4 | 855 | { |
963d8fe5 TM |
856 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
857 | void *calldata = task->tk_calldata; | |
1da177e4 | 858 | |
5e4424af TM |
859 | if (task->tk_flags & RPC_TASK_DYNAMIC) { |
860 | dprintk("RPC: %5u freeing task\n", task->tk_pid); | |
861 | mempool_free(task, rpc_task_mempool); | |
862 | } | |
32bfb5c0 TM |
863 | rpc_release_calldata(tk_ops, calldata); |
864 | } | |
865 | ||
866 | static void rpc_async_release(struct work_struct *work) | |
867 | { | |
868 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | |
869 | } | |
870 | ||
871 | void rpc_put_task(struct rpc_task *task) | |
872 | { | |
e6b3c4db TM |
873 | if (!atomic_dec_and_test(&task->tk_count)) |
874 | return; | |
875 | /* Release resources */ | |
876 | if (task->tk_rqstp) | |
877 | xprt_release(task); | |
878 | if (task->tk_msg.rpc_cred) | |
879 | rpcauth_unbindcred(task); | |
880 | if (task->tk_client) { | |
881 | rpc_release_client(task->tk_client); | |
882 | task->tk_client = NULL; | |
883 | } | |
32bfb5c0 TM |
884 | if (task->tk_workqueue != NULL) { |
885 | INIT_WORK(&task->u.tk_work, rpc_async_release); | |
886 | queue_work(task->tk_workqueue, &task->u.tk_work); | |
887 | } else | |
888 | rpc_free_task(task); | |
e6b3c4db | 889 | } |
e8914c65 | 890 | EXPORT_SYMBOL_GPL(rpc_put_task); |
e6b3c4db | 891 | |
bde8f00c | 892 | static void rpc_release_task(struct rpc_task *task) |
e6b3c4db | 893 | { |
1da177e4 LT |
894 | #ifdef RPC_DEBUG |
895 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
896 | #endif | |
46121cf7 | 897 | dprintk("RPC: %5u release task\n", task->tk_pid); |
1da177e4 | 898 | |
6529eba0 | 899 | if (!list_empty(&task->tk_task)) { |
4bef61ff | 900 | struct rpc_clnt *clnt = task->tk_client; |
6529eba0 | 901 | /* Remove from client task list */ |
4bef61ff | 902 | spin_lock(&clnt->cl_lock); |
6529eba0 | 903 | list_del(&task->tk_task); |
4bef61ff | 904 | spin_unlock(&clnt->cl_lock); |
6529eba0 | 905 | } |
1da177e4 | 906 | BUG_ON (RPC_IS_QUEUED(task)); |
1da177e4 | 907 | |
1da177e4 LT |
908 | #ifdef RPC_DEBUG |
909 | task->tk_magic = 0; | |
910 | #endif | |
e6b3c4db TM |
911 | /* Wake up anyone who is waiting for task completion */ |
912 | rpc_mark_complete_task(task); | |
913 | ||
914 | rpc_put_task(task); | |
1da177e4 LT |
915 | } |
916 | ||
1da177e4 LT |
917 | /* |
918 | * Kill all tasks for the given client. | |
919 | * XXX: kill their descendants as well? | |
920 | */ | |
4bef61ff | 921 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
1da177e4 LT |
922 | { |
923 | struct rpc_task *rovr; | |
1da177e4 | 924 | |
1da177e4 | 925 | |
4bef61ff TM |
926 | if (list_empty(&clnt->cl_tasks)) |
927 | return; | |
928 | dprintk("RPC: killing all tasks for client %p\n", clnt); | |
929 | /* | |
930 | * Spin lock all_tasks to prevent changes... | |
931 | */ | |
932 | spin_lock(&clnt->cl_lock); | |
933 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { | |
1da177e4 LT |
934 | if (! RPC_IS_ACTIVATED(rovr)) |
935 | continue; | |
6529eba0 | 936 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { |
1da177e4 LT |
937 | rovr->tk_flags |= RPC_TASK_KILLED; |
938 | rpc_exit(rovr, -EIO); | |
939 | rpc_wake_up_task(rovr); | |
940 | } | |
941 | } | |
4bef61ff | 942 | spin_unlock(&clnt->cl_lock); |
1da177e4 | 943 | } |
e8914c65 | 944 | EXPORT_SYMBOL_GPL(rpc_killall_tasks); |
1da177e4 | 945 | |
b247bbf1 TM |
946 | int rpciod_up(void) |
947 | { | |
948 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | |
949 | } | |
950 | ||
951 | void rpciod_down(void) | |
952 | { | |
953 | module_put(THIS_MODULE); | |
954 | } | |
955 | ||
1da177e4 | 956 | /* |
b247bbf1 | 957 | * Start up the rpciod workqueue. |
1da177e4 | 958 | */ |
b247bbf1 | 959 | static int rpciod_start(void) |
1da177e4 LT |
960 | { |
961 | struct workqueue_struct *wq; | |
ab418d70 | 962 | |
1da177e4 LT |
963 | /* |
964 | * Create the rpciod thread and wait for it to start. | |
965 | */ | |
ab418d70 | 966 | dprintk("RPC: creating workqueue rpciod\n"); |
1da177e4 | 967 | wq = create_workqueue("rpciod"); |
1da177e4 | 968 | rpciod_workqueue = wq; |
b247bbf1 | 969 | return rpciod_workqueue != NULL; |
1da177e4 LT |
970 | } |
971 | ||
b247bbf1 | 972 | static void rpciod_stop(void) |
1da177e4 | 973 | { |
b247bbf1 | 974 | struct workqueue_struct *wq = NULL; |
ab418d70 | 975 | |
b247bbf1 TM |
976 | if (rpciod_workqueue == NULL) |
977 | return; | |
ab418d70 | 978 | dprintk("RPC: destroying workqueue rpciod\n"); |
1da177e4 | 979 | |
b247bbf1 TM |
980 | wq = rpciod_workqueue; |
981 | rpciod_workqueue = NULL; | |
982 | destroy_workqueue(wq); | |
1da177e4 LT |
983 | } |
984 | ||
1da177e4 LT |
985 | void |
986 | rpc_destroy_mempool(void) | |
987 | { | |
b247bbf1 | 988 | rpciod_stop(); |
1da177e4 LT |
989 | if (rpc_buffer_mempool) |
990 | mempool_destroy(rpc_buffer_mempool); | |
991 | if (rpc_task_mempool) | |
992 | mempool_destroy(rpc_task_mempool); | |
1a1d92c1 AD |
993 | if (rpc_task_slabp) |
994 | kmem_cache_destroy(rpc_task_slabp); | |
995 | if (rpc_buffer_slabp) | |
996 | kmem_cache_destroy(rpc_buffer_slabp); | |
f6a1cc89 | 997 | rpc_destroy_wait_queue(&delay_queue); |
1da177e4 LT |
998 | } |
999 | ||
1000 | int | |
1001 | rpc_init_mempool(void) | |
1002 | { | |
f6a1cc89 TM |
1003 | /* |
1004 | * The following is not strictly a mempool initialisation, | |
1005 | * but there is no harm in doing it here | |
1006 | */ | |
1007 | rpc_init_wait_queue(&delay_queue, "delayq"); | |
1008 | if (!rpciod_start()) | |
1009 | goto err_nomem; | |
1010 | ||
1da177e4 LT |
1011 | rpc_task_slabp = kmem_cache_create("rpc_tasks", |
1012 | sizeof(struct rpc_task), | |
1013 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1014 | NULL); |
1da177e4 LT |
1015 | if (!rpc_task_slabp) |
1016 | goto err_nomem; | |
1017 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | |
1018 | RPC_BUFFER_MAXSIZE, | |
1019 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1020 | NULL); |
1da177e4 LT |
1021 | if (!rpc_buffer_slabp) |
1022 | goto err_nomem; | |
93d2341c MD |
1023 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1024 | rpc_task_slabp); | |
1da177e4 LT |
1025 | if (!rpc_task_mempool) |
1026 | goto err_nomem; | |
93d2341c MD |
1027 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1028 | rpc_buffer_slabp); | |
1da177e4 LT |
1029 | if (!rpc_buffer_mempool) |
1030 | goto err_nomem; | |
1031 | return 0; | |
1032 | err_nomem: | |
1033 | rpc_destroy_mempool(); | |
1034 | return -ENOMEM; | |
1035 | } |