]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/sched.c | |
3 | * | |
4 | * Scheduling for synchronous and asynchronous RPC requests. | |
5 | * | |
6 | * Copyright (C) 1996 Olaf Kirch, <[email protected]> | |
cca5172a | 7 | * |
1da177e4 LT |
8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | ||
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/smp_lock.h> | |
20 | #include <linux/spinlock.h> | |
4a3e2f71 | 21 | #include <linux/mutex.h> |
1da177e4 LT |
22 | |
23 | #include <linux/sunrpc/clnt.h> | |
1da177e4 LT |
24 | |
25 | #ifdef RPC_DEBUG | |
26 | #define RPCDBG_FACILITY RPCDBG_SCHED | |
27 | #define RPC_TASK_MAGIC_ID 0xf00baa | |
1da177e4 LT |
28 | #endif |
29 | ||
30 | /* | |
31 | * RPC slabs and memory pools | |
32 | */ | |
33 | #define RPC_BUFFER_MAXSIZE (2048) | |
34 | #define RPC_BUFFER_POOLSIZE (8) | |
35 | #define RPC_TASK_POOLSIZE (8) | |
e18b890b CL |
36 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
37 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | |
ba89966c ED |
38 | static mempool_t *rpc_task_mempool __read_mostly; |
39 | static mempool_t *rpc_buffer_mempool __read_mostly; | |
1da177e4 LT |
40 | |
41 | static void __rpc_default_timer(struct rpc_task *task); | |
65f27f38 | 42 | static void rpc_async_schedule(struct work_struct *); |
bde8f00c | 43 | static void rpc_release_task(struct rpc_task *task); |
1da177e4 | 44 | |
1da177e4 LT |
45 | /* |
46 | * RPC tasks sit here while waiting for conditions to improve. | |
47 | */ | |
48 | static RPC_WAITQ(delay_queue, "delayq"); | |
49 | ||
1da177e4 LT |
50 | /* |
51 | * rpciod-related stuff | |
52 | */ | |
24c5d9d7 | 53 | struct workqueue_struct *rpciod_workqueue; |
1da177e4 | 54 | |
1da177e4 LT |
55 | /* |
56 | * Disable the timer for a given RPC task. Should be called with | |
57 | * queue->lock and bh_disabled in order to avoid races within | |
58 | * rpc_run_timer(). | |
59 | */ | |
60 | static inline void | |
61 | __rpc_disable_timer(struct rpc_task *task) | |
62 | { | |
46121cf7 | 63 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
1da177e4 LT |
64 | task->tk_timeout_fn = NULL; |
65 | task->tk_timeout = 0; | |
66 | } | |
67 | ||
68 | /* | |
69 | * Run a timeout function. | |
70 | * We use the callback in order to allow __rpc_wake_up_task() | |
71 | * and friends to disable the timer synchronously on SMP systems | |
72 | * without calling del_timer_sync(). The latter could cause a | |
73 | * deadlock if called while we're holding spinlocks... | |
74 | */ | |
75 | static void rpc_run_timer(struct rpc_task *task) | |
76 | { | |
77 | void (*callback)(struct rpc_task *); | |
78 | ||
79 | callback = task->tk_timeout_fn; | |
80 | task->tk_timeout_fn = NULL; | |
81 | if (callback && RPC_IS_QUEUED(task)) { | |
46121cf7 | 82 | dprintk("RPC: %5u running timer\n", task->tk_pid); |
1da177e4 LT |
83 | callback(task); |
84 | } | |
85 | smp_mb__before_clear_bit(); | |
86 | clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | |
87 | smp_mb__after_clear_bit(); | |
88 | } | |
89 | ||
90 | /* | |
91 | * Set up a timer for the current task. | |
92 | */ | |
93 | static inline void | |
94 | __rpc_add_timer(struct rpc_task *task, rpc_action timer) | |
95 | { | |
96 | if (!task->tk_timeout) | |
97 | return; | |
98 | ||
46121cf7 | 99 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
1da177e4 LT |
100 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
101 | ||
102 | if (timer) | |
103 | task->tk_timeout_fn = timer; | |
104 | else | |
105 | task->tk_timeout_fn = __rpc_default_timer; | |
106 | set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | |
107 | mod_timer(&task->tk_timer, jiffies + task->tk_timeout); | |
108 | } | |
109 | ||
110 | /* | |
111 | * Delete any timer for the current task. Because we use del_timer_sync(), | |
112 | * this function should never be called while holding queue->lock. | |
113 | */ | |
114 | static void | |
115 | rpc_delete_timer(struct rpc_task *task) | |
116 | { | |
117 | if (RPC_IS_QUEUED(task)) | |
118 | return; | |
119 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { | |
120 | del_singleshot_timer_sync(&task->tk_timer); | |
46121cf7 | 121 | dprintk("RPC: %5u deleting timer\n", task->tk_pid); |
1da177e4 LT |
122 | } |
123 | } | |
124 | ||
125 | /* | |
126 | * Add new request to a priority queue. | |
127 | */ | |
128 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | |
129 | { | |
130 | struct list_head *q; | |
131 | struct rpc_task *t; | |
132 | ||
133 | INIT_LIST_HEAD(&task->u.tk_wait.links); | |
134 | q = &queue->tasks[task->tk_priority]; | |
135 | if (unlikely(task->tk_priority > queue->maxpriority)) | |
136 | q = &queue->tasks[queue->maxpriority]; | |
137 | list_for_each_entry(t, q, u.tk_wait.list) { | |
138 | if (t->tk_cookie == task->tk_cookie) { | |
139 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); | |
140 | return; | |
141 | } | |
142 | } | |
143 | list_add_tail(&task->u.tk_wait.list, q); | |
144 | } | |
145 | ||
146 | /* | |
147 | * Add new request to wait queue. | |
148 | * | |
149 | * Swapper tasks always get inserted at the head of the queue. | |
150 | * This should avoid many nasty memory deadlocks and hopefully | |
151 | * improve overall performance. | |
152 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | |
153 | */ | |
154 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | |
155 | { | |
156 | BUG_ON (RPC_IS_QUEUED(task)); | |
157 | ||
158 | if (RPC_IS_PRIORITY(queue)) | |
159 | __rpc_add_wait_queue_priority(queue, task); | |
160 | else if (RPC_IS_SWAPPER(task)) | |
161 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | |
162 | else | |
163 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | |
164 | task->u.tk_wait.rpc_waitq = queue; | |
e19b63da | 165 | queue->qlen++; |
1da177e4 LT |
166 | rpc_set_queued(task); |
167 | ||
46121cf7 CL |
168 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
169 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
170 | } |
171 | ||
172 | /* | |
173 | * Remove request from a priority queue. | |
174 | */ | |
175 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |
176 | { | |
177 | struct rpc_task *t; | |
178 | ||
179 | if (!list_empty(&task->u.tk_wait.links)) { | |
180 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | |
181 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | |
182 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | |
183 | } | |
184 | list_del(&task->u.tk_wait.list); | |
185 | } | |
186 | ||
187 | /* | |
188 | * Remove request from queue. | |
189 | * Note: must be called with spin lock held. | |
190 | */ | |
191 | static void __rpc_remove_wait_queue(struct rpc_task *task) | |
192 | { | |
193 | struct rpc_wait_queue *queue; | |
194 | queue = task->u.tk_wait.rpc_waitq; | |
195 | ||
196 | if (RPC_IS_PRIORITY(queue)) | |
197 | __rpc_remove_wait_queue_priority(task); | |
198 | else | |
199 | list_del(&task->u.tk_wait.list); | |
e19b63da | 200 | queue->qlen--; |
46121cf7 CL |
201 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
202 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
203 | } |
204 | ||
205 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | |
206 | { | |
207 | queue->priority = priority; | |
208 | queue->count = 1 << (priority * 2); | |
209 | } | |
210 | ||
211 | static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie) | |
212 | { | |
213 | queue->cookie = cookie; | |
214 | queue->nr = RPC_BATCH_COUNT; | |
215 | } | |
216 | ||
217 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | |
218 | { | |
219 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | |
220 | rpc_set_waitqueue_cookie(queue, 0); | |
221 | } | |
222 | ||
223 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio) | |
224 | { | |
225 | int i; | |
226 | ||
227 | spin_lock_init(&queue->lock); | |
228 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | |
229 | INIT_LIST_HEAD(&queue->tasks[i]); | |
230 | queue->maxpriority = maxprio; | |
231 | rpc_reset_waitqueue_priority(queue); | |
232 | #ifdef RPC_DEBUG | |
233 | queue->name = qname; | |
234 | #endif | |
235 | } | |
236 | ||
237 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
238 | { | |
239 | __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH); | |
240 | } | |
241 | ||
242 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
243 | { | |
244 | __rpc_init_priority_wait_queue(queue, qname, 0); | |
245 | } | |
246 | EXPORT_SYMBOL(rpc_init_wait_queue); | |
247 | ||
44c28873 TM |
248 | static int rpc_wait_bit_interruptible(void *word) |
249 | { | |
250 | if (signal_pending(current)) | |
251 | return -ERESTARTSYS; | |
252 | schedule(); | |
253 | return 0; | |
254 | } | |
255 | ||
c44fe705 TM |
256 | #ifdef RPC_DEBUG |
257 | static void rpc_task_set_debuginfo(struct rpc_task *task) | |
258 | { | |
259 | static atomic_t rpc_pid; | |
260 | ||
261 | task->tk_magic = RPC_TASK_MAGIC_ID; | |
262 | task->tk_pid = atomic_inc_return(&rpc_pid); | |
263 | } | |
264 | #else | |
265 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |
266 | { | |
267 | } | |
268 | #endif | |
269 | ||
e6b3c4db TM |
270 | static void rpc_set_active(struct rpc_task *task) |
271 | { | |
4bef61ff | 272 | struct rpc_clnt *clnt; |
e6b3c4db TM |
273 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) |
274 | return; | |
c44fe705 | 275 | rpc_task_set_debuginfo(task); |
e6b3c4db | 276 | /* Add to global list of all tasks */ |
4bef61ff TM |
277 | clnt = task->tk_client; |
278 | if (clnt != NULL) { | |
279 | spin_lock(&clnt->cl_lock); | |
280 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | |
281 | spin_unlock(&clnt->cl_lock); | |
282 | } | |
e6b3c4db TM |
283 | } |
284 | ||
44c28873 TM |
285 | /* |
286 | * Mark an RPC call as having completed by clearing the 'active' bit | |
287 | */ | |
e6b3c4db | 288 | static void rpc_mark_complete_task(struct rpc_task *task) |
44c28873 | 289 | { |
e6b3c4db TM |
290 | smp_mb__before_clear_bit(); |
291 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | |
292 | smp_mb__after_clear_bit(); | |
44c28873 TM |
293 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
294 | } | |
295 | ||
296 | /* | |
297 | * Allow callers to wait for completion of an RPC call | |
298 | */ | |
299 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | |
300 | { | |
301 | if (action == NULL) | |
302 | action = rpc_wait_bit_interruptible; | |
303 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | |
304 | action, TASK_INTERRUPTIBLE); | |
305 | } | |
306 | EXPORT_SYMBOL(__rpc_wait_for_completion_task); | |
307 | ||
1da177e4 LT |
308 | /* |
309 | * Make an RPC task runnable. | |
310 | * | |
cca5172a | 311 | * Note: If the task is ASYNC, this must be called with |
1da177e4 LT |
312 | * the spinlock held to protect the wait queue operation. |
313 | */ | |
314 | static void rpc_make_runnable(struct rpc_task *task) | |
315 | { | |
1da177e4 | 316 | BUG_ON(task->tk_timeout_fn); |
1da177e4 | 317 | rpc_clear_queued(task); |
cc4dc59e CS |
318 | if (rpc_test_and_set_running(task)) |
319 | return; | |
320 | /* We might have raced */ | |
321 | if (RPC_IS_QUEUED(task)) { | |
322 | rpc_clear_running(task); | |
1da177e4 | 323 | return; |
cc4dc59e | 324 | } |
1da177e4 LT |
325 | if (RPC_IS_ASYNC(task)) { |
326 | int status; | |
327 | ||
65f27f38 | 328 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
1da177e4 LT |
329 | status = queue_work(task->tk_workqueue, &task->u.tk_work); |
330 | if (status < 0) { | |
331 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | |
332 | task->tk_status = status; | |
333 | return; | |
334 | } | |
335 | } else | |
96651ab3 | 336 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
1da177e4 LT |
337 | } |
338 | ||
1da177e4 LT |
339 | /* |
340 | * Prepare for sleeping on a wait queue. | |
341 | * By always appending tasks to the list we ensure FIFO behavior. | |
342 | * NB: An RPC task will only receive interrupt-driven events as long | |
343 | * as it's on a wait queue. | |
344 | */ | |
345 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
346 | rpc_action action, rpc_action timer) | |
347 | { | |
46121cf7 CL |
348 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
349 | task->tk_pid, rpc_qname(q), jiffies); | |
1da177e4 LT |
350 | |
351 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | |
352 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | |
353 | return; | |
354 | } | |
355 | ||
1da177e4 LT |
356 | __rpc_add_wait_queue(q, task); |
357 | ||
358 | BUG_ON(task->tk_callback != NULL); | |
359 | task->tk_callback = action; | |
360 | __rpc_add_timer(task, timer); | |
361 | } | |
362 | ||
363 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
364 | rpc_action action, rpc_action timer) | |
365 | { | |
e6b3c4db TM |
366 | /* Mark the task as being activated if so needed */ |
367 | rpc_set_active(task); | |
368 | ||
1da177e4 LT |
369 | /* |
370 | * Protect the queue operations. | |
371 | */ | |
372 | spin_lock_bh(&q->lock); | |
373 | __rpc_sleep_on(q, task, action, timer); | |
374 | spin_unlock_bh(&q->lock); | |
375 | } | |
376 | ||
377 | /** | |
378 | * __rpc_do_wake_up_task - wake up a single rpc_task | |
379 | * @task: task to be woken up | |
380 | * | |
381 | * Caller must hold queue->lock, and have cleared the task queued flag. | |
382 | */ | |
383 | static void __rpc_do_wake_up_task(struct rpc_task *task) | |
384 | { | |
46121cf7 CL |
385 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
386 | task->tk_pid, jiffies); | |
1da177e4 LT |
387 | |
388 | #ifdef RPC_DEBUG | |
389 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
390 | #endif | |
391 | /* Has the task been executed yet? If not, we cannot wake it up! */ | |
392 | if (!RPC_IS_ACTIVATED(task)) { | |
393 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | |
394 | return; | |
395 | } | |
396 | ||
397 | __rpc_disable_timer(task); | |
398 | __rpc_remove_wait_queue(task); | |
399 | ||
400 | rpc_make_runnable(task); | |
401 | ||
46121cf7 | 402 | dprintk("RPC: __rpc_wake_up_task done\n"); |
1da177e4 LT |
403 | } |
404 | ||
405 | /* | |
406 | * Wake up the specified task | |
407 | */ | |
408 | static void __rpc_wake_up_task(struct rpc_task *task) | |
409 | { | |
410 | if (rpc_start_wakeup(task)) { | |
411 | if (RPC_IS_QUEUED(task)) | |
412 | __rpc_do_wake_up_task(task); | |
413 | rpc_finish_wakeup(task); | |
414 | } | |
415 | } | |
416 | ||
417 | /* | |
418 | * Default timeout handler if none specified by user | |
419 | */ | |
420 | static void | |
421 | __rpc_default_timer(struct rpc_task *task) | |
422 | { | |
46121cf7 | 423 | dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid); |
1da177e4 LT |
424 | task->tk_status = -ETIMEDOUT; |
425 | rpc_wake_up_task(task); | |
426 | } | |
427 | ||
428 | /* | |
429 | * Wake up the specified task | |
430 | */ | |
431 | void rpc_wake_up_task(struct rpc_task *task) | |
432 | { | |
8aca67f0 | 433 | rcu_read_lock_bh(); |
1da177e4 LT |
434 | if (rpc_start_wakeup(task)) { |
435 | if (RPC_IS_QUEUED(task)) { | |
436 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | |
437 | ||
8aca67f0 TM |
438 | /* Note: we're already in a bh-safe context */ |
439 | spin_lock(&queue->lock); | |
1da177e4 | 440 | __rpc_do_wake_up_task(task); |
8aca67f0 | 441 | spin_unlock(&queue->lock); |
1da177e4 LT |
442 | } |
443 | rpc_finish_wakeup(task); | |
444 | } | |
8aca67f0 | 445 | rcu_read_unlock_bh(); |
1da177e4 LT |
446 | } |
447 | ||
448 | /* | |
449 | * Wake up the next task on a priority queue. | |
450 | */ | |
451 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | |
452 | { | |
453 | struct list_head *q; | |
454 | struct rpc_task *task; | |
455 | ||
456 | /* | |
457 | * Service a batch of tasks from a single cookie. | |
458 | */ | |
459 | q = &queue->tasks[queue->priority]; | |
460 | if (!list_empty(q)) { | |
461 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
462 | if (queue->cookie == task->tk_cookie) { | |
463 | if (--queue->nr) | |
464 | goto out; | |
465 | list_move_tail(&task->u.tk_wait.list, q); | |
466 | } | |
467 | /* | |
468 | * Check if we need to switch queues. | |
469 | */ | |
470 | if (--queue->count) | |
471 | goto new_cookie; | |
472 | } | |
473 | ||
474 | /* | |
475 | * Service the next queue. | |
476 | */ | |
477 | do { | |
478 | if (q == &queue->tasks[0]) | |
479 | q = &queue->tasks[queue->maxpriority]; | |
480 | else | |
481 | q = q - 1; | |
482 | if (!list_empty(q)) { | |
483 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
484 | goto new_queue; | |
485 | } | |
486 | } while (q != &queue->tasks[queue->priority]); | |
487 | ||
488 | rpc_reset_waitqueue_priority(queue); | |
489 | return NULL; | |
490 | ||
491 | new_queue: | |
492 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | |
493 | new_cookie: | |
494 | rpc_set_waitqueue_cookie(queue, task->tk_cookie); | |
495 | out: | |
496 | __rpc_wake_up_task(task); | |
497 | return task; | |
498 | } | |
499 | ||
500 | /* | |
501 | * Wake up the next task on the wait queue. | |
502 | */ | |
503 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |
504 | { | |
505 | struct rpc_task *task = NULL; | |
506 | ||
46121cf7 CL |
507 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
508 | queue, rpc_qname(queue)); | |
8aca67f0 TM |
509 | rcu_read_lock_bh(); |
510 | spin_lock(&queue->lock); | |
1da177e4 LT |
511 | if (RPC_IS_PRIORITY(queue)) |
512 | task = __rpc_wake_up_next_priority(queue); | |
513 | else { | |
514 | task_for_first(task, &queue->tasks[0]) | |
515 | __rpc_wake_up_task(task); | |
516 | } | |
8aca67f0 TM |
517 | spin_unlock(&queue->lock); |
518 | rcu_read_unlock_bh(); | |
1da177e4 LT |
519 | |
520 | return task; | |
521 | } | |
522 | ||
523 | /** | |
524 | * rpc_wake_up - wake up all rpc_tasks | |
525 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
526 | * | |
527 | * Grabs queue->lock | |
528 | */ | |
529 | void rpc_wake_up(struct rpc_wait_queue *queue) | |
530 | { | |
e6d83d55 | 531 | struct rpc_task *task, *next; |
1da177e4 | 532 | struct list_head *head; |
e6d83d55 | 533 | |
8aca67f0 TM |
534 | rcu_read_lock_bh(); |
535 | spin_lock(&queue->lock); | |
1da177e4 LT |
536 | head = &queue->tasks[queue->maxpriority]; |
537 | for (;;) { | |
e6d83d55 | 538 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
1da177e4 | 539 | __rpc_wake_up_task(task); |
1da177e4 LT |
540 | if (head == &queue->tasks[0]) |
541 | break; | |
542 | head--; | |
543 | } | |
8aca67f0 TM |
544 | spin_unlock(&queue->lock); |
545 | rcu_read_unlock_bh(); | |
1da177e4 LT |
546 | } |
547 | ||
548 | /** | |
549 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | |
550 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
551 | * @status: status value to set | |
552 | * | |
553 | * Grabs queue->lock | |
554 | */ | |
555 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |
556 | { | |
e6d83d55 | 557 | struct rpc_task *task, *next; |
1da177e4 | 558 | struct list_head *head; |
1da177e4 | 559 | |
8aca67f0 TM |
560 | rcu_read_lock_bh(); |
561 | spin_lock(&queue->lock); | |
1da177e4 LT |
562 | head = &queue->tasks[queue->maxpriority]; |
563 | for (;;) { | |
e6d83d55 | 564 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
1da177e4 LT |
565 | task->tk_status = status; |
566 | __rpc_wake_up_task(task); | |
567 | } | |
568 | if (head == &queue->tasks[0]) | |
569 | break; | |
570 | head--; | |
571 | } | |
8aca67f0 TM |
572 | spin_unlock(&queue->lock); |
573 | rcu_read_unlock_bh(); | |
1da177e4 LT |
574 | } |
575 | ||
8014793b TM |
576 | static void __rpc_atrun(struct rpc_task *task) |
577 | { | |
578 | rpc_wake_up_task(task); | |
579 | } | |
580 | ||
1da177e4 LT |
581 | /* |
582 | * Run a task at a later time | |
583 | */ | |
8014793b | 584 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
1da177e4 LT |
585 | { |
586 | task->tk_timeout = delay; | |
587 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); | |
588 | } | |
589 | ||
4ce70ada TM |
590 | /* |
591 | * Helper to call task->tk_ops->rpc_call_prepare | |
592 | */ | |
593 | static void rpc_prepare_task(struct rpc_task *task) | |
594 | { | |
6d5fcb5a | 595 | lock_kernel(); |
4ce70ada | 596 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
6d5fcb5a | 597 | unlock_kernel(); |
4ce70ada TM |
598 | } |
599 | ||
d05fdb0c | 600 | /* |
963d8fe5 | 601 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
d05fdb0c | 602 | */ |
abbcf28f | 603 | void rpc_exit_task(struct rpc_task *task) |
d05fdb0c | 604 | { |
abbcf28f | 605 | task->tk_action = NULL; |
963d8fe5 | 606 | if (task->tk_ops->rpc_call_done != NULL) { |
6d5fcb5a | 607 | lock_kernel(); |
963d8fe5 | 608 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
6d5fcb5a | 609 | unlock_kernel(); |
d05fdb0c | 610 | if (task->tk_action != NULL) { |
abbcf28f TM |
611 | WARN_ON(RPC_ASSASSINATED(task)); |
612 | /* Always release the RPC slot and buffer memory */ | |
613 | xprt_release(task); | |
d05fdb0c TM |
614 | } |
615 | } | |
d05fdb0c | 616 | } |
abbcf28f | 617 | EXPORT_SYMBOL(rpc_exit_task); |
d05fdb0c | 618 | |
bbd5a1f9 TM |
619 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
620 | { | |
621 | if (ops->rpc_release != NULL) { | |
622 | lock_kernel(); | |
623 | ops->rpc_release(calldata); | |
624 | unlock_kernel(); | |
625 | } | |
626 | } | |
627 | ||
1da177e4 LT |
628 | /* |
629 | * This is the RPC `scheduler' (or rather, the finite state machine). | |
630 | */ | |
2efef837 | 631 | static void __rpc_execute(struct rpc_task *task) |
1da177e4 LT |
632 | { |
633 | int status = 0; | |
634 | ||
46121cf7 CL |
635 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
636 | task->tk_pid, task->tk_flags); | |
1da177e4 LT |
637 | |
638 | BUG_ON(RPC_IS_QUEUED(task)); | |
639 | ||
d05fdb0c | 640 | for (;;) { |
1da177e4 LT |
641 | /* |
642 | * Garbage collection of pending timers... | |
643 | */ | |
644 | rpc_delete_timer(task); | |
645 | ||
646 | /* | |
647 | * Execute any pending callback. | |
648 | */ | |
649 | if (RPC_DO_CALLBACK(task)) { | |
650 | /* Define a callback save pointer */ | |
651 | void (*save_callback)(struct rpc_task *); | |
cca5172a YH |
652 | |
653 | /* | |
1da177e4 LT |
654 | * If a callback exists, save it, reset it, |
655 | * call it. | |
656 | * The save is needed to stop from resetting | |
657 | * another callback set within the callback handler | |
658 | * - Dave | |
659 | */ | |
660 | save_callback=task->tk_callback; | |
661 | task->tk_callback=NULL; | |
1da177e4 | 662 | save_callback(task); |
1da177e4 LT |
663 | } |
664 | ||
665 | /* | |
666 | * Perform the next FSM step. | |
667 | * tk_action may be NULL when the task has been killed | |
668 | * by someone else. | |
669 | */ | |
670 | if (!RPC_IS_QUEUED(task)) { | |
abbcf28f | 671 | if (task->tk_action == NULL) |
1da177e4 | 672 | break; |
abbcf28f | 673 | task->tk_action(task); |
1da177e4 LT |
674 | } |
675 | ||
676 | /* | |
677 | * Lockless check for whether task is sleeping or not. | |
678 | */ | |
679 | if (!RPC_IS_QUEUED(task)) | |
680 | continue; | |
681 | rpc_clear_running(task); | |
682 | if (RPC_IS_ASYNC(task)) { | |
683 | /* Careful! we may have raced... */ | |
684 | if (RPC_IS_QUEUED(task)) | |
2efef837 | 685 | return; |
1da177e4 | 686 | if (rpc_test_and_set_running(task)) |
2efef837 | 687 | return; |
1da177e4 LT |
688 | continue; |
689 | } | |
690 | ||
691 | /* sync task: sleep here */ | |
46121cf7 | 692 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
96651ab3 TM |
693 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
694 | status = out_of_line_wait_on_bit(&task->tk_runstate, | |
695 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, | |
696 | TASK_INTERRUPTIBLE); | |
697 | if (status == -ERESTARTSYS) { | |
1da177e4 LT |
698 | /* |
699 | * When a sync task receives a signal, it exits with | |
700 | * -ERESTARTSYS. In order to catch any callbacks that | |
701 | * clean up after sleeping on some queue, we don't | |
702 | * break the loop here, but go around once more. | |
703 | */ | |
46121cf7 | 704 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
96651ab3 TM |
705 | task->tk_flags |= RPC_TASK_KILLED; |
706 | rpc_exit(task, -ERESTARTSYS); | |
707 | rpc_wake_up_task(task); | |
1da177e4 LT |
708 | } |
709 | rpc_set_running(task); | |
46121cf7 | 710 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
1da177e4 LT |
711 | } |
712 | ||
46121cf7 CL |
713 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
714 | task->tk_status); | |
1da177e4 LT |
715 | /* Release all resources associated with the task */ |
716 | rpc_release_task(task); | |
1da177e4 LT |
717 | } |
718 | ||
719 | /* | |
720 | * User-visible entry point to the scheduler. | |
721 | * | |
722 | * This may be called recursively if e.g. an async NFS task updates | |
723 | * the attributes and finds that dirty pages must be flushed. | |
724 | * NOTE: Upon exit of this function the task is guaranteed to be | |
725 | * released. In particular note that tk_release() will have | |
726 | * been called, so your task memory may have been freed. | |
727 | */ | |
2efef837 | 728 | void rpc_execute(struct rpc_task *task) |
1da177e4 | 729 | { |
44c28873 | 730 | rpc_set_active(task); |
1da177e4 | 731 | rpc_set_running(task); |
2efef837 | 732 | __rpc_execute(task); |
1da177e4 LT |
733 | } |
734 | ||
65f27f38 | 735 | static void rpc_async_schedule(struct work_struct *work) |
1da177e4 | 736 | { |
65f27f38 | 737 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
1da177e4 LT |
738 | } |
739 | ||
aa3d1fae CL |
740 | struct rpc_buffer { |
741 | size_t len; | |
742 | char data[]; | |
743 | }; | |
744 | ||
02107148 CL |
745 | /** |
746 | * rpc_malloc - allocate an RPC buffer | |
747 | * @task: RPC task that will use this buffer | |
748 | * @size: requested byte size | |
1da177e4 | 749 | * |
c5a4dd8b CL |
750 | * To prevent rpciod from hanging, this allocator never sleeps, |
751 | * returning NULL if the request cannot be serviced immediately. | |
752 | * The caller can arrange to sleep in a way that is safe for rpciod. | |
753 | * | |
754 | * Most requests are 'small' (under 2KiB) and can be serviced from a | |
755 | * mempool, ensuring that NFS reads and writes can always proceed, | |
756 | * and that there is good locality of reference for these buffers. | |
757 | * | |
1da177e4 | 758 | * In order to avoid memory starvation triggering more writebacks of |
c5a4dd8b | 759 | * NFS requests, we avoid using GFP_KERNEL. |
1da177e4 | 760 | */ |
c5a4dd8b | 761 | void *rpc_malloc(struct rpc_task *task, size_t size) |
1da177e4 | 762 | { |
aa3d1fae | 763 | struct rpc_buffer *buf; |
c5a4dd8b | 764 | gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; |
1da177e4 | 765 | |
aa3d1fae | 766 | size += sizeof(struct rpc_buffer); |
c5a4dd8b CL |
767 | if (size <= RPC_BUFFER_MAXSIZE) |
768 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | |
1da177e4 | 769 | else |
c5a4dd8b | 770 | buf = kmalloc(size, gfp); |
ddce40df PZ |
771 | |
772 | if (!buf) | |
773 | return NULL; | |
774 | ||
aa3d1fae | 775 | buf->len = size; |
215d0678 | 776 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
c5a4dd8b | 777 | task->tk_pid, size, buf); |
aa3d1fae | 778 | return &buf->data; |
1da177e4 LT |
779 | } |
780 | ||
02107148 CL |
781 | /** |
782 | * rpc_free - free buffer allocated via rpc_malloc | |
c5a4dd8b | 783 | * @buffer: buffer to free |
02107148 CL |
784 | * |
785 | */ | |
c5a4dd8b | 786 | void rpc_free(void *buffer) |
1da177e4 | 787 | { |
aa3d1fae CL |
788 | size_t size; |
789 | struct rpc_buffer *buf; | |
02107148 | 790 | |
c5a4dd8b CL |
791 | if (!buffer) |
792 | return; | |
aa3d1fae CL |
793 | |
794 | buf = container_of(buffer, struct rpc_buffer, data); | |
795 | size = buf->len; | |
c5a4dd8b | 796 | |
215d0678 | 797 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
c5a4dd8b | 798 | size, buf); |
aa3d1fae | 799 | |
c5a4dd8b CL |
800 | if (size <= RPC_BUFFER_MAXSIZE) |
801 | mempool_free(buf, rpc_buffer_mempool); | |
802 | else | |
803 | kfree(buf); | |
1da177e4 LT |
804 | } |
805 | ||
806 | /* | |
807 | * Creation and deletion of RPC task structures | |
808 | */ | |
963d8fe5 | 809 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
1da177e4 LT |
810 | { |
811 | memset(task, 0, sizeof(*task)); | |
812 | init_timer(&task->tk_timer); | |
813 | task->tk_timer.data = (unsigned long) task; | |
814 | task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; | |
44c28873 | 815 | atomic_set(&task->tk_count, 1); |
1da177e4 LT |
816 | task->tk_client = clnt; |
817 | task->tk_flags = flags; | |
963d8fe5 | 818 | task->tk_ops = tk_ops; |
4ce70ada TM |
819 | if (tk_ops->rpc_call_prepare != NULL) |
820 | task->tk_action = rpc_prepare_task; | |
963d8fe5 | 821 | task->tk_calldata = calldata; |
6529eba0 | 822 | INIT_LIST_HEAD(&task->tk_task); |
1da177e4 LT |
823 | |
824 | /* Initialize retry counters */ | |
825 | task->tk_garb_retry = 2; | |
826 | task->tk_cred_retry = 2; | |
827 | ||
828 | task->tk_priority = RPC_PRIORITY_NORMAL; | |
829 | task->tk_cookie = (unsigned long)current; | |
830 | ||
831 | /* Initialize workqueue for async tasks */ | |
832 | task->tk_workqueue = rpciod_workqueue; | |
1da177e4 LT |
833 | |
834 | if (clnt) { | |
34f52e35 | 835 | kref_get(&clnt->cl_kref); |
1da177e4 LT |
836 | if (clnt->cl_softrtry) |
837 | task->tk_flags |= RPC_TASK_SOFT; | |
838 | if (!clnt->cl_intr) | |
839 | task->tk_flags |= RPC_TASK_NOINTR; | |
840 | } | |
841 | ||
963d8fe5 TM |
842 | BUG_ON(task->tk_ops == NULL); |
843 | ||
ef759a2e CL |
844 | /* starting timestamp */ |
845 | task->tk_start = jiffies; | |
846 | ||
46121cf7 | 847 | dprintk("RPC: new task initialized, procpid %u\n", |
1da177e4 LT |
848 | current->pid); |
849 | } | |
850 | ||
851 | static struct rpc_task * | |
852 | rpc_alloc_task(void) | |
853 | { | |
854 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | |
855 | } | |
856 | ||
8aca67f0 | 857 | static void rpc_free_task(struct rcu_head *rcu) |
1da177e4 | 858 | { |
8aca67f0 | 859 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); |
46121cf7 | 860 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
1da177e4 LT |
861 | mempool_free(task, rpc_task_mempool); |
862 | } | |
863 | ||
864 | /* | |
90c5755f | 865 | * Create a new task for the specified client. |
1da177e4 | 866 | */ |
963d8fe5 | 867 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
1da177e4 LT |
868 | { |
869 | struct rpc_task *task; | |
870 | ||
871 | task = rpc_alloc_task(); | |
872 | if (!task) | |
90c5755f | 873 | goto out; |
1da177e4 | 874 | |
963d8fe5 | 875 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
1da177e4 | 876 | |
46121cf7 | 877 | dprintk("RPC: allocated task %p\n", task); |
1da177e4 LT |
878 | task->tk_flags |= RPC_TASK_DYNAMIC; |
879 | out: | |
880 | return task; | |
1da177e4 LT |
881 | } |
882 | ||
e6b3c4db TM |
883 | |
884 | void rpc_put_task(struct rpc_task *task) | |
1da177e4 | 885 | { |
963d8fe5 TM |
886 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
887 | void *calldata = task->tk_calldata; | |
1da177e4 | 888 | |
e6b3c4db TM |
889 | if (!atomic_dec_and_test(&task->tk_count)) |
890 | return; | |
891 | /* Release resources */ | |
892 | if (task->tk_rqstp) | |
893 | xprt_release(task); | |
894 | if (task->tk_msg.rpc_cred) | |
895 | rpcauth_unbindcred(task); | |
896 | if (task->tk_client) { | |
897 | rpc_release_client(task->tk_client); | |
898 | task->tk_client = NULL; | |
899 | } | |
900 | if (task->tk_flags & RPC_TASK_DYNAMIC) | |
8aca67f0 | 901 | call_rcu_bh(&task->u.tk_rcu, rpc_free_task); |
bbd5a1f9 | 902 | rpc_release_calldata(tk_ops, calldata); |
e6b3c4db TM |
903 | } |
904 | EXPORT_SYMBOL(rpc_put_task); | |
905 | ||
bde8f00c | 906 | static void rpc_release_task(struct rpc_task *task) |
e6b3c4db | 907 | { |
1da177e4 LT |
908 | #ifdef RPC_DEBUG |
909 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
910 | #endif | |
46121cf7 | 911 | dprintk("RPC: %5u release task\n", task->tk_pid); |
1da177e4 | 912 | |
6529eba0 | 913 | if (!list_empty(&task->tk_task)) { |
4bef61ff | 914 | struct rpc_clnt *clnt = task->tk_client; |
6529eba0 | 915 | /* Remove from client task list */ |
4bef61ff | 916 | spin_lock(&clnt->cl_lock); |
6529eba0 | 917 | list_del(&task->tk_task); |
4bef61ff | 918 | spin_unlock(&clnt->cl_lock); |
6529eba0 | 919 | } |
1da177e4 | 920 | BUG_ON (RPC_IS_QUEUED(task)); |
1da177e4 LT |
921 | |
922 | /* Synchronously delete any running timer */ | |
923 | rpc_delete_timer(task); | |
924 | ||
1da177e4 LT |
925 | #ifdef RPC_DEBUG |
926 | task->tk_magic = 0; | |
927 | #endif | |
e6b3c4db TM |
928 | /* Wake up anyone who is waiting for task completion */ |
929 | rpc_mark_complete_task(task); | |
930 | ||
931 | rpc_put_task(task); | |
1da177e4 LT |
932 | } |
933 | ||
1da177e4 LT |
934 | /* |
935 | * Kill all tasks for the given client. | |
936 | * XXX: kill their descendants as well? | |
937 | */ | |
4bef61ff | 938 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
1da177e4 LT |
939 | { |
940 | struct rpc_task *rovr; | |
1da177e4 | 941 | |
1da177e4 | 942 | |
4bef61ff TM |
943 | if (list_empty(&clnt->cl_tasks)) |
944 | return; | |
945 | dprintk("RPC: killing all tasks for client %p\n", clnt); | |
946 | /* | |
947 | * Spin lock all_tasks to prevent changes... | |
948 | */ | |
949 | spin_lock(&clnt->cl_lock); | |
950 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { | |
1da177e4 LT |
951 | if (! RPC_IS_ACTIVATED(rovr)) |
952 | continue; | |
6529eba0 | 953 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { |
1da177e4 LT |
954 | rovr->tk_flags |= RPC_TASK_KILLED; |
955 | rpc_exit(rovr, -EIO); | |
956 | rpc_wake_up_task(rovr); | |
957 | } | |
958 | } | |
4bef61ff | 959 | spin_unlock(&clnt->cl_lock); |
1da177e4 LT |
960 | } |
961 | ||
b247bbf1 TM |
962 | int rpciod_up(void) |
963 | { | |
964 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | |
965 | } | |
966 | ||
967 | void rpciod_down(void) | |
968 | { | |
969 | module_put(THIS_MODULE); | |
970 | } | |
971 | ||
1da177e4 | 972 | /* |
b247bbf1 | 973 | * Start up the rpciod workqueue. |
1da177e4 | 974 | */ |
b247bbf1 | 975 | static int rpciod_start(void) |
1da177e4 LT |
976 | { |
977 | struct workqueue_struct *wq; | |
ab418d70 | 978 | |
1da177e4 LT |
979 | /* |
980 | * Create the rpciod thread and wait for it to start. | |
981 | */ | |
ab418d70 | 982 | dprintk("RPC: creating workqueue rpciod\n"); |
1da177e4 | 983 | wq = create_workqueue("rpciod"); |
1da177e4 | 984 | rpciod_workqueue = wq; |
b247bbf1 | 985 | return rpciod_workqueue != NULL; |
1da177e4 LT |
986 | } |
987 | ||
b247bbf1 | 988 | static void rpciod_stop(void) |
1da177e4 | 989 | { |
b247bbf1 | 990 | struct workqueue_struct *wq = NULL; |
ab418d70 | 991 | |
b247bbf1 TM |
992 | if (rpciod_workqueue == NULL) |
993 | return; | |
ab418d70 | 994 | dprintk("RPC: destroying workqueue rpciod\n"); |
1da177e4 | 995 | |
b247bbf1 TM |
996 | wq = rpciod_workqueue; |
997 | rpciod_workqueue = NULL; | |
998 | destroy_workqueue(wq); | |
1da177e4 LT |
999 | } |
1000 | ||
1da177e4 LT |
1001 | void |
1002 | rpc_destroy_mempool(void) | |
1003 | { | |
b247bbf1 | 1004 | rpciod_stop(); |
1da177e4 LT |
1005 | if (rpc_buffer_mempool) |
1006 | mempool_destroy(rpc_buffer_mempool); | |
1007 | if (rpc_task_mempool) | |
1008 | mempool_destroy(rpc_task_mempool); | |
1a1d92c1 AD |
1009 | if (rpc_task_slabp) |
1010 | kmem_cache_destroy(rpc_task_slabp); | |
1011 | if (rpc_buffer_slabp) | |
1012 | kmem_cache_destroy(rpc_buffer_slabp); | |
1da177e4 LT |
1013 | } |
1014 | ||
1015 | int | |
1016 | rpc_init_mempool(void) | |
1017 | { | |
1018 | rpc_task_slabp = kmem_cache_create("rpc_tasks", | |
1019 | sizeof(struct rpc_task), | |
1020 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1021 | NULL); |
1da177e4 LT |
1022 | if (!rpc_task_slabp) |
1023 | goto err_nomem; | |
1024 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | |
1025 | RPC_BUFFER_MAXSIZE, | |
1026 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1027 | NULL); |
1da177e4 LT |
1028 | if (!rpc_buffer_slabp) |
1029 | goto err_nomem; | |
93d2341c MD |
1030 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1031 | rpc_task_slabp); | |
1da177e4 LT |
1032 | if (!rpc_task_mempool) |
1033 | goto err_nomem; | |
93d2341c MD |
1034 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1035 | rpc_buffer_slabp); | |
1da177e4 LT |
1036 | if (!rpc_buffer_mempool) |
1037 | goto err_nomem; | |
b247bbf1 TM |
1038 | if (!rpciod_start()) |
1039 | goto err_nomem; | |
1da177e4 LT |
1040 | return 0; |
1041 | err_nomem: | |
1042 | rpc_destroy_mempool(); | |
1043 | return -ENOMEM; | |
1044 | } |