]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/svc.c | |
3 | * | |
4 | * High-level RPC service routines | |
5 | * | |
6 | * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> | |
bfd24160 GB |
7 | * |
8 | * Multiple threads pools and NUMAisation | |
9 | * Copyright (c) 2006 Silicon Graphics, Inc. | |
10 | * by Greg Banks <[email protected]> | |
1da177e4 LT |
11 | */ |
12 | ||
13 | #include <linux/linkage.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/net.h> | |
17 | #include <linux/in.h> | |
18 | #include <linux/mm.h> | |
a7455442 GB |
19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | |
9867d76c | 21 | #include <linux/kthread.h> |
5a0e3ad6 | 22 | #include <linux/slab.h> |
1da177e4 LT |
23 | |
24 | #include <linux/sunrpc/types.h> | |
25 | #include <linux/sunrpc/xdr.h> | |
26 | #include <linux/sunrpc/stats.h> | |
27 | #include <linux/sunrpc/svcsock.h> | |
28 | #include <linux/sunrpc/clnt.h> | |
4d6bbb62 | 29 | #include <linux/sunrpc/bc_xprt.h> |
1da177e4 LT |
30 | |
31 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | |
1da177e4 | 32 | |
5247fab5 | 33 | static void svc_unregister(const struct svc_serv *serv, struct net *net); |
7252d575 | 34 | |
42a7fc4a GB |
35 | #define svc_serv_is_pooled(serv) ((serv)->sv_function) |
36 | ||
bfd24160 GB |
37 | /* |
38 | * Mode for mapping cpus to pools. | |
39 | */ | |
40 | enum { | |
42a7fc4a | 41 | SVC_POOL_AUTO = -1, /* choose one of the others */ |
bfd24160 GB |
42 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool |
43 | * (legacy & UP mode) */ | |
44 | SVC_POOL_PERCPU, /* one pool per cpu */ | |
45 | SVC_POOL_PERNODE /* one pool per numa node */ | |
46 | }; | |
42a7fc4a | 47 | #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL |
bfd24160 GB |
48 | |
49 | /* | |
50 | * Structure for mapping cpus to pools and vice versa. | |
51 | * Setup once during sunrpc initialisation. | |
52 | */ | |
53 | static struct svc_pool_map { | |
42a7fc4a | 54 | int count; /* How many svc_servs use us */ |
bfd24160 GB |
55 | int mode; /* Note: int not enum to avoid |
56 | * warnings about "enumeration value | |
57 | * not handled in switch" */ | |
58 | unsigned int npools; | |
59 | unsigned int *pool_to; /* maps pool id to cpu or node */ | |
60 | unsigned int *to_pool; /* maps cpu or node to pool id */ | |
61 | } svc_pool_map = { | |
42a7fc4a GB |
62 | .count = 0, |
63 | .mode = SVC_POOL_DEFAULT | |
bfd24160 | 64 | }; |
42a7fc4a GB |
65 | static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ |
66 | ||
67 | static int | |
68 | param_set_pool_mode(const char *val, struct kernel_param *kp) | |
69 | { | |
70 | int *ip = (int *)kp->arg; | |
71 | struct svc_pool_map *m = &svc_pool_map; | |
72 | int err; | |
73 | ||
74 | mutex_lock(&svc_pool_map_mutex); | |
75 | ||
76 | err = -EBUSY; | |
77 | if (m->count) | |
78 | goto out; | |
79 | ||
80 | err = 0; | |
81 | if (!strncmp(val, "auto", 4)) | |
82 | *ip = SVC_POOL_AUTO; | |
83 | else if (!strncmp(val, "global", 6)) | |
84 | *ip = SVC_POOL_GLOBAL; | |
85 | else if (!strncmp(val, "percpu", 6)) | |
86 | *ip = SVC_POOL_PERCPU; | |
87 | else if (!strncmp(val, "pernode", 7)) | |
88 | *ip = SVC_POOL_PERNODE; | |
89 | else | |
90 | err = -EINVAL; | |
91 | ||
92 | out: | |
93 | mutex_unlock(&svc_pool_map_mutex); | |
94 | return err; | |
95 | } | |
96 | ||
97 | static int | |
98 | param_get_pool_mode(char *buf, struct kernel_param *kp) | |
99 | { | |
100 | int *ip = (int *)kp->arg; | |
101 | ||
102 | switch (*ip) | |
103 | { | |
104 | case SVC_POOL_AUTO: | |
105 | return strlcpy(buf, "auto", 20); | |
106 | case SVC_POOL_GLOBAL: | |
107 | return strlcpy(buf, "global", 20); | |
108 | case SVC_POOL_PERCPU: | |
109 | return strlcpy(buf, "percpu", 20); | |
110 | case SVC_POOL_PERNODE: | |
111 | return strlcpy(buf, "pernode", 20); | |
112 | default: | |
113 | return sprintf(buf, "%d", *ip); | |
114 | } | |
115 | } | |
bfd24160 | 116 | |
42a7fc4a GB |
117 | module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, |
118 | &svc_pool_map.mode, 0644); | |
bfd24160 GB |
119 | |
120 | /* | |
121 | * Detect best pool mapping mode heuristically, | |
122 | * according to the machine's topology. | |
123 | */ | |
124 | static int | |
125 | svc_pool_map_choose_mode(void) | |
126 | { | |
127 | unsigned int node; | |
128 | ||
62bc62a8 | 129 | if (nr_online_nodes > 1) { |
bfd24160 GB |
130 | /* |
131 | * Actually have multiple NUMA nodes, | |
132 | * so split pools on NUMA node boundaries | |
133 | */ | |
134 | return SVC_POOL_PERNODE; | |
135 | } | |
136 | ||
72c33688 | 137 | node = first_online_node; |
bfd24160 GB |
138 | if (nr_cpus_node(node) > 2) { |
139 | /* | |
140 | * Non-trivial SMP, or CONFIG_NUMA on | |
141 | * non-NUMA hardware, e.g. with a generic | |
142 | * x86_64 kernel on Xeons. In this case we | |
143 | * want to divide the pools on cpu boundaries. | |
144 | */ | |
145 | return SVC_POOL_PERCPU; | |
146 | } | |
147 | ||
148 | /* default: one global pool */ | |
149 | return SVC_POOL_GLOBAL; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Allocate the to_pool[] and pool_to[] arrays. | |
154 | * Returns 0 on success or an errno. | |
155 | */ | |
156 | static int | |
157 | svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) | |
158 | { | |
159 | m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); | |
160 | if (!m->to_pool) | |
161 | goto fail; | |
162 | m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); | |
163 | if (!m->pool_to) | |
164 | goto fail_free; | |
165 | ||
166 | return 0; | |
167 | ||
168 | fail_free: | |
169 | kfree(m->to_pool); | |
61c8504c | 170 | m->to_pool = NULL; |
bfd24160 GB |
171 | fail: |
172 | return -ENOMEM; | |
173 | } | |
174 | ||
175 | /* | |
176 | * Initialise the pool map for SVC_POOL_PERCPU mode. | |
177 | * Returns number of pools or <0 on error. | |
178 | */ | |
179 | static int | |
180 | svc_pool_map_init_percpu(struct svc_pool_map *m) | |
181 | { | |
53b8a315 | 182 | unsigned int maxpools = nr_cpu_ids; |
bfd24160 GB |
183 | unsigned int pidx = 0; |
184 | unsigned int cpu; | |
185 | int err; | |
186 | ||
187 | err = svc_pool_map_alloc_arrays(m, maxpools); | |
188 | if (err) | |
189 | return err; | |
190 | ||
191 | for_each_online_cpu(cpu) { | |
192 | BUG_ON(pidx > maxpools); | |
193 | m->to_pool[cpu] = pidx; | |
194 | m->pool_to[pidx] = cpu; | |
195 | pidx++; | |
196 | } | |
197 | /* cpus brought online later all get mapped to pool0, sorry */ | |
198 | ||
199 | return pidx; | |
200 | }; | |
201 | ||
202 | ||
203 | /* | |
204 | * Initialise the pool map for SVC_POOL_PERNODE mode. | |
205 | * Returns number of pools or <0 on error. | |
206 | */ | |
207 | static int | |
208 | svc_pool_map_init_pernode(struct svc_pool_map *m) | |
209 | { | |
74c7aa8b | 210 | unsigned int maxpools = nr_node_ids; |
bfd24160 GB |
211 | unsigned int pidx = 0; |
212 | unsigned int node; | |
213 | int err; | |
214 | ||
215 | err = svc_pool_map_alloc_arrays(m, maxpools); | |
216 | if (err) | |
217 | return err; | |
218 | ||
219 | for_each_node_with_cpus(node) { | |
220 | /* some architectures (e.g. SN2) have cpuless nodes */ | |
221 | BUG_ON(pidx > maxpools); | |
222 | m->to_pool[node] = pidx; | |
223 | m->pool_to[pidx] = node; | |
224 | pidx++; | |
225 | } | |
226 | /* nodes brought online later all get mapped to pool0, sorry */ | |
227 | ||
228 | return pidx; | |
229 | } | |
230 | ||
231 | ||
232 | /* | |
42a7fc4a GB |
233 | * Add a reference to the global map of cpus to pools (and |
234 | * vice versa). Initialise the map if we're the first user. | |
235 | * Returns the number of pools. | |
bfd24160 GB |
236 | */ |
237 | static unsigned int | |
42a7fc4a | 238 | svc_pool_map_get(void) |
bfd24160 GB |
239 | { |
240 | struct svc_pool_map *m = &svc_pool_map; | |
241 | int npools = -1; | |
242 | ||
42a7fc4a GB |
243 | mutex_lock(&svc_pool_map_mutex); |
244 | ||
245 | if (m->count++) { | |
246 | mutex_unlock(&svc_pool_map_mutex); | |
bfd24160 | 247 | return m->npools; |
42a7fc4a | 248 | } |
bfd24160 | 249 | |
42a7fc4a GB |
250 | if (m->mode == SVC_POOL_AUTO) |
251 | m->mode = svc_pool_map_choose_mode(); | |
bfd24160 GB |
252 | |
253 | switch (m->mode) { | |
254 | case SVC_POOL_PERCPU: | |
255 | npools = svc_pool_map_init_percpu(m); | |
256 | break; | |
257 | case SVC_POOL_PERNODE: | |
258 | npools = svc_pool_map_init_pernode(m); | |
259 | break; | |
260 | } | |
261 | ||
262 | if (npools < 0) { | |
263 | /* default, or memory allocation failure */ | |
264 | npools = 1; | |
265 | m->mode = SVC_POOL_GLOBAL; | |
266 | } | |
267 | m->npools = npools; | |
268 | ||
42a7fc4a | 269 | mutex_unlock(&svc_pool_map_mutex); |
bfd24160 GB |
270 | return m->npools; |
271 | } | |
272 | ||
42a7fc4a GB |
273 | |
274 | /* | |
275 | * Drop a reference to the global map of cpus to pools. | |
276 | * When the last reference is dropped, the map data is | |
277 | * freed; this allows the sysadmin to change the pool | |
278 | * mode using the pool_mode module option without | |
279 | * rebooting or re-loading sunrpc.ko. | |
280 | */ | |
281 | static void | |
282 | svc_pool_map_put(void) | |
283 | { | |
284 | struct svc_pool_map *m = &svc_pool_map; | |
285 | ||
286 | mutex_lock(&svc_pool_map_mutex); | |
287 | ||
288 | if (!--m->count) { | |
42a7fc4a | 289 | kfree(m->to_pool); |
61c8504c | 290 | m->to_pool = NULL; |
42a7fc4a | 291 | kfree(m->pool_to); |
61c8504c | 292 | m->pool_to = NULL; |
42a7fc4a GB |
293 | m->npools = 0; |
294 | } | |
295 | ||
296 | mutex_unlock(&svc_pool_map_mutex); | |
297 | } | |
298 | ||
299 | ||
11fd165c ED |
300 | static int svc_pool_map_get_node(unsigned int pidx) |
301 | { | |
302 | const struct svc_pool_map *m = &svc_pool_map; | |
303 | ||
304 | if (m->count) { | |
305 | if (m->mode == SVC_POOL_PERCPU) | |
306 | return cpu_to_node(m->pool_to[pidx]); | |
307 | if (m->mode == SVC_POOL_PERNODE) | |
308 | return m->pool_to[pidx]; | |
309 | } | |
310 | return NUMA_NO_NODE; | |
311 | } | |
bfd24160 | 312 | /* |
9867d76c | 313 | * Set the given thread's cpus_allowed mask so that it |
bfd24160 | 314 | * will only run on cpus in the given pool. |
bfd24160 | 315 | */ |
9867d76c JL |
316 | static inline void |
317 | svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) | |
bfd24160 GB |
318 | { |
319 | struct svc_pool_map *m = &svc_pool_map; | |
9867d76c | 320 | unsigned int node = m->pool_to[pidx]; |
bfd24160 GB |
321 | |
322 | /* | |
323 | * The caller checks for sv_nrpools > 1, which | |
42a7fc4a | 324 | * implies that we've been initialized. |
bfd24160 | 325 | */ |
1bd58aaf WAA |
326 | WARN_ON_ONCE(m->count == 0); |
327 | if (m->count == 0) | |
328 | return; | |
bfd24160 | 329 | |
9867d76c | 330 | switch (m->mode) { |
bfd24160 | 331 | case SVC_POOL_PERCPU: |
c5f59f08 | 332 | { |
aa85ea5b | 333 | set_cpus_allowed_ptr(task, cpumask_of(node)); |
9867d76c | 334 | break; |
c5f59f08 | 335 | } |
bfd24160 | 336 | case SVC_POOL_PERNODE: |
c5f59f08 | 337 | { |
a70f7302 | 338 | set_cpus_allowed_ptr(task, cpumask_of_node(node)); |
9867d76c | 339 | break; |
bfd24160 | 340 | } |
c5f59f08 | 341 | } |
bfd24160 GB |
342 | } |
343 | ||
344 | /* | |
345 | * Use the mapping mode to choose a pool for a given CPU. | |
346 | * Used when enqueueing an incoming RPC. Always returns | |
347 | * a non-NULL pool pointer. | |
348 | */ | |
349 | struct svc_pool * | |
350 | svc_pool_for_cpu(struct svc_serv *serv, int cpu) | |
351 | { | |
352 | struct svc_pool_map *m = &svc_pool_map; | |
353 | unsigned int pidx = 0; | |
354 | ||
355 | /* | |
42a7fc4a | 356 | * An uninitialised map happens in a pure client when |
bfd24160 GB |
357 | * lockd is brought up, so silently treat it the |
358 | * same as SVC_POOL_GLOBAL. | |
359 | */ | |
42a7fc4a GB |
360 | if (svc_serv_is_pooled(serv)) { |
361 | switch (m->mode) { | |
362 | case SVC_POOL_PERCPU: | |
363 | pidx = m->to_pool[cpu]; | |
364 | break; | |
365 | case SVC_POOL_PERNODE: | |
366 | pidx = m->to_pool[cpu_to_node(cpu)]; | |
367 | break; | |
368 | } | |
bfd24160 GB |
369 | } |
370 | return &serv->sv_pools[pidx % serv->sv_nrpools]; | |
371 | } | |
372 | ||
bb2224df | 373 | int svc_rpcb_setup(struct svc_serv *serv, struct net *net) |
d9908560 SK |
374 | { |
375 | int err; | |
376 | ||
bee42f68 | 377 | err = rpcb_create_local(net); |
d9908560 SK |
378 | if (err) |
379 | return err; | |
380 | ||
381 | /* Remove any stale portmap registrations */ | |
bee42f68 | 382 | svc_unregister(serv, net); |
d9908560 SK |
383 | return 0; |
384 | } | |
bb2224df | 385 | EXPORT_SYMBOL_GPL(svc_rpcb_setup); |
d9908560 | 386 | |
5ecebb7c | 387 | void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) |
d9908560 | 388 | { |
5ecebb7c SK |
389 | svc_unregister(serv, net); |
390 | rpcb_put_local(net); | |
d9908560 | 391 | } |
16d05870 | 392 | EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); |
d9908560 SK |
393 | |
394 | static int svc_uses_rpcbind(struct svc_serv *serv) | |
395 | { | |
396 | struct svc_program *progp; | |
397 | unsigned int i; | |
398 | ||
399 | for (progp = serv->sv_program; progp; progp = progp->pg_next) { | |
400 | for (i = 0; i < progp->pg_nvers; i++) { | |
401 | if (progp->pg_vers[i] == NULL) | |
402 | continue; | |
403 | if (progp->pg_vers[i]->vs_hidden == 0) | |
404 | return 1; | |
405 | } | |
406 | } | |
407 | ||
408 | return 0; | |
409 | } | |
bfd24160 | 410 | |
9793f7c8 SK |
411 | int svc_bind(struct svc_serv *serv, struct net *net) |
412 | { | |
413 | if (!svc_uses_rpcbind(serv)) | |
414 | return 0; | |
415 | return svc_rpcb_setup(serv, net); | |
416 | } | |
417 | EXPORT_SYMBOL_GPL(svc_bind); | |
418 | ||
1da177e4 LT |
419 | /* |
420 | * Create an RPC service | |
421 | */ | |
a7455442 GB |
422 | static struct svc_serv * |
423 | __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | |
5ecebb7c | 424 | void (*shutdown)(struct svc_serv *serv, struct net *net)) |
1da177e4 LT |
425 | { |
426 | struct svc_serv *serv; | |
ea339d46 | 427 | unsigned int vers; |
1da177e4 | 428 | unsigned int xdrsize; |
3262c816 | 429 | unsigned int i; |
1da177e4 | 430 | |
0da974f4 | 431 | if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) |
1da177e4 | 432 | return NULL; |
9ba02638 | 433 | serv->sv_name = prog->pg_name; |
1da177e4 LT |
434 | serv->sv_program = prog; |
435 | serv->sv_nrthreads = 1; | |
436 | serv->sv_stats = prog->pg_stats; | |
c6b0a9f8 N |
437 | if (bufsize > RPCSVC_MAXPAYLOAD) |
438 | bufsize = RPCSVC_MAXPAYLOAD; | |
439 | serv->sv_max_payload = bufsize? bufsize : 4096; | |
440 | serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); | |
bc591ccf | 441 | serv->sv_shutdown = shutdown; |
1da177e4 | 442 | xdrsize = 0; |
9ba02638 AG |
443 | while (prog) { |
444 | prog->pg_lovers = prog->pg_nvers-1; | |
445 | for (vers=0; vers<prog->pg_nvers ; vers++) | |
446 | if (prog->pg_vers[vers]) { | |
447 | prog->pg_hivers = vers; | |
448 | if (prog->pg_lovers > vers) | |
449 | prog->pg_lovers = vers; | |
450 | if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) | |
451 | xdrsize = prog->pg_vers[vers]->vs_xdrsize; | |
452 | } | |
453 | prog = prog->pg_next; | |
454 | } | |
1da177e4 | 455 | serv->sv_xdrsize = xdrsize; |
1da177e4 LT |
456 | INIT_LIST_HEAD(&serv->sv_tempsocks); |
457 | INIT_LIST_HEAD(&serv->sv_permsocks); | |
36bdfc8b | 458 | init_timer(&serv->sv_temptimer); |
1da177e4 LT |
459 | spin_lock_init(&serv->sv_lock); |
460 | ||
a7455442 | 461 | serv->sv_nrpools = npools; |
3262c816 | 462 | serv->sv_pools = |
cd861280 | 463 | kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), |
3262c816 GB |
464 | GFP_KERNEL); |
465 | if (!serv->sv_pools) { | |
466 | kfree(serv); | |
467 | return NULL; | |
468 | } | |
469 | ||
470 | for (i = 0; i < serv->sv_nrpools; i++) { | |
471 | struct svc_pool *pool = &serv->sv_pools[i]; | |
472 | ||
46121cf7 | 473 | dprintk("svc: initialising pool %u for %s\n", |
3262c816 GB |
474 | i, serv->sv_name); |
475 | ||
476 | pool->sp_id = i; | |
477 | INIT_LIST_HEAD(&pool->sp_threads); | |
478 | INIT_LIST_HEAD(&pool->sp_sockets); | |
a7455442 | 479 | INIT_LIST_HEAD(&pool->sp_all_threads); |
3262c816 GB |
480 | spin_lock_init(&pool->sp_lock); |
481 | } | |
482 | ||
9793f7c8 SK |
483 | if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown)) |
484 | serv->sv_shutdown = svc_rpcb_cleanup; | |
1da177e4 LT |
485 | |
486 | return serv; | |
487 | } | |
488 | ||
a7455442 GB |
489 | struct svc_serv * |
490 | svc_create(struct svc_program *prog, unsigned int bufsize, | |
5ecebb7c | 491 | void (*shutdown)(struct svc_serv *serv, struct net *net)) |
a7455442 | 492 | { |
49a9072f | 493 | return __svc_create(prog, bufsize, /*npools*/1, shutdown); |
a7455442 | 494 | } |
24c3767e | 495 | EXPORT_SYMBOL_GPL(svc_create); |
a7455442 GB |
496 | |
497 | struct svc_serv * | |
498 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |
5ecebb7c | 499 | void (*shutdown)(struct svc_serv *serv, struct net *net), |
a75c5d01 | 500 | svc_thread_fn func, struct module *mod) |
a7455442 GB |
501 | { |
502 | struct svc_serv *serv; | |
42a7fc4a | 503 | unsigned int npools = svc_pool_map_get(); |
a7455442 | 504 | |
49a9072f | 505 | serv = __svc_create(prog, bufsize, npools, shutdown); |
a7455442 GB |
506 | |
507 | if (serv != NULL) { | |
508 | serv->sv_function = func; | |
a7455442 GB |
509 | serv->sv_module = mod; |
510 | } | |
511 | ||
512 | return serv; | |
513 | } | |
24c3767e | 514 | EXPORT_SYMBOL_GPL(svc_create_pooled); |
a7455442 | 515 | |
074d0f67 SK |
516 | void svc_shutdown_net(struct svc_serv *serv, struct net *net) |
517 | { | |
074d0f67 SK |
518 | svc_close_net(serv, net); |
519 | ||
520 | if (serv->sv_shutdown) | |
521 | serv->sv_shutdown(serv, net); | |
522 | } | |
523 | EXPORT_SYMBOL_GPL(svc_shutdown_net); | |
524 | ||
1da177e4 | 525 | /* |
bedbdd8b NB |
526 | * Destroy an RPC service. Should be called with appropriate locking to |
527 | * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. | |
1da177e4 LT |
528 | */ |
529 | void | |
530 | svc_destroy(struct svc_serv *serv) | |
531 | { | |
46121cf7 | 532 | dprintk("svc: svc_destroy(%s, %d)\n", |
1da177e4 LT |
533 | serv->sv_program->pg_name, |
534 | serv->sv_nrthreads); | |
535 | ||
536 | if (serv->sv_nrthreads) { | |
537 | if (--(serv->sv_nrthreads) != 0) { | |
538 | svc_sock_update_bufs(serv); | |
539 | return; | |
540 | } | |
541 | } else | |
542 | printk("svc_destroy: no threads for serv=%p!\n", serv); | |
543 | ||
36bdfc8b | 544 | del_timer_sync(&serv->sv_temptimer); |
074d0f67 | 545 | |
7b147f1f SK |
546 | /* |
547 | * The last user is gone and thus all sockets have to be destroyed to | |
548 | * the point. Check this. | |
549 | */ | |
550 | BUG_ON(!list_empty(&serv->sv_permsocks)); | |
551 | BUG_ON(!list_empty(&serv->sv_tempsocks)); | |
cda1fd4a | 552 | |
1da177e4 LT |
553 | cache_clean_deferred(serv); |
554 | ||
42a7fc4a GB |
555 | if (svc_serv_is_pooled(serv)) |
556 | svc_pool_map_put(); | |
557 | ||
3262c816 | 558 | kfree(serv->sv_pools); |
1da177e4 LT |
559 | kfree(serv); |
560 | } | |
24c3767e | 561 | EXPORT_SYMBOL_GPL(svc_destroy); |
1da177e4 LT |
562 | |
563 | /* | |
564 | * Allocate an RPC server's buffer space. | |
565 | * We allocate pages and place them in rq_argpages. | |
566 | */ | |
567 | static int | |
11fd165c | 568 | svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) |
1da177e4 | 569 | { |
0dc220f0 | 570 | unsigned int pages, arghi; |
cca5172a | 571 | |
ba17686f AA |
572 | /* bc_xprt uses fore channel allocated buffers */ |
573 | if (svc_is_backchannel(rqstp)) | |
574 | return 1; | |
575 | ||
c6b0a9f8 N |
576 | pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. |
577 | * We assume one is at most one page | |
578 | */ | |
1da177e4 | 579 | arghi = 0; |
b25cd058 WAA |
580 | WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); |
581 | if (pages > RPCSVC_MAXPAGES) | |
582 | pages = RPCSVC_MAXPAGES; | |
1da177e4 | 583 | while (pages) { |
11fd165c | 584 | struct page *p = alloc_pages_node(node, GFP_KERNEL, 0); |
1da177e4 LT |
585 | if (!p) |
586 | break; | |
44524359 | 587 | rqstp->rq_pages[arghi++] = p; |
1da177e4 LT |
588 | pages--; |
589 | } | |
0dc220f0 | 590 | return pages == 0; |
1da177e4 LT |
591 | } |
592 | ||
593 | /* | |
594 | * Release an RPC server buffer | |
595 | */ | |
596 | static void | |
597 | svc_release_buffer(struct svc_rqst *rqstp) | |
598 | { | |
50c8bb13 CL |
599 | unsigned int i; |
600 | ||
601 | for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) | |
44524359 N |
602 | if (rqstp->rq_pages[i]) |
603 | put_page(rqstp->rq_pages[i]); | |
1da177e4 LT |
604 | } |
605 | ||
0113ab34 | 606 | struct svc_rqst * |
11fd165c | 607 | svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) |
1da177e4 LT |
608 | { |
609 | struct svc_rqst *rqstp; | |
1da177e4 | 610 | |
11fd165c | 611 | rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); |
1da177e4 | 612 | if (!rqstp) |
0113ab34 | 613 | goto out_enomem; |
1da177e4 | 614 | |
1da177e4 LT |
615 | init_waitqueue_head(&rqstp->rq_wait); |
616 | ||
1da177e4 | 617 | serv->sv_nrthreads++; |
3262c816 GB |
618 | spin_lock_bh(&pool->sp_lock); |
619 | pool->sp_nrthreads++; | |
a7455442 | 620 | list_add(&rqstp->rq_all, &pool->sp_all_threads); |
3262c816 | 621 | spin_unlock_bh(&pool->sp_lock); |
1da177e4 | 622 | rqstp->rq_server = serv; |
3262c816 | 623 | rqstp->rq_pool = pool; |
bfd24160 | 624 | |
11fd165c | 625 | rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); |
0113ab34 JL |
626 | if (!rqstp->rq_argp) |
627 | goto out_thread; | |
628 | ||
11fd165c | 629 | rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); |
0113ab34 JL |
630 | if (!rqstp->rq_resp) |
631 | goto out_thread; | |
632 | ||
11fd165c | 633 | if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) |
0113ab34 JL |
634 | goto out_thread; |
635 | ||
636 | return rqstp; | |
637 | out_thread: | |
638 | svc_exit_thread(rqstp); | |
639 | out_enomem: | |
640 | return ERR_PTR(-ENOMEM); | |
641 | } | |
24c3767e | 642 | EXPORT_SYMBOL_GPL(svc_prepare_thread); |
0113ab34 | 643 | |
a7455442 GB |
644 | /* |
645 | * Choose a pool in which to create a new thread, for svc_set_num_threads | |
646 | */ | |
647 | static inline struct svc_pool * | |
648 | choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) | |
649 | { | |
650 | if (pool != NULL) | |
651 | return pool; | |
652 | ||
cca5172a | 653 | return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; |
a7455442 GB |
654 | } |
655 | ||
656 | /* | |
657 | * Choose a thread to kill, for svc_set_num_threads | |
658 | */ | |
659 | static inline struct task_struct * | |
660 | choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) | |
661 | { | |
662 | unsigned int i; | |
663 | struct task_struct *task = NULL; | |
664 | ||
665 | if (pool != NULL) { | |
666 | spin_lock_bh(&pool->sp_lock); | |
667 | } else { | |
668 | /* choose a pool in round-robin fashion */ | |
cca5172a YH |
669 | for (i = 0; i < serv->sv_nrpools; i++) { |
670 | pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; | |
a7455442 | 671 | spin_lock_bh(&pool->sp_lock); |
cca5172a YH |
672 | if (!list_empty(&pool->sp_all_threads)) |
673 | goto found_pool; | |
a7455442 | 674 | spin_unlock_bh(&pool->sp_lock); |
cca5172a | 675 | } |
a7455442 GB |
676 | return NULL; |
677 | } | |
678 | ||
679 | found_pool: | |
680 | if (!list_empty(&pool->sp_all_threads)) { | |
681 | struct svc_rqst *rqstp; | |
682 | ||
683 | /* | |
684 | * Remove from the pool->sp_all_threads list | |
685 | * so we don't try to kill it again. | |
686 | */ | |
687 | rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); | |
688 | list_del_init(&rqstp->rq_all); | |
689 | task = rqstp->rq_task; | |
cca5172a | 690 | } |
a7455442 GB |
691 | spin_unlock_bh(&pool->sp_lock); |
692 | ||
693 | return task; | |
694 | } | |
695 | ||
696 | /* | |
697 | * Create or destroy enough new threads to make the number | |
698 | * of threads the given number. If `pool' is non-NULL, applies | |
699 | * only to threads in that pool, otherwise round-robins between | |
94cf3179 BF |
700 | * all pools. Caller must ensure that mutual exclusion between this and |
701 | * server startup or shutdown. | |
a7455442 GB |
702 | * |
703 | * Destroying threads relies on the service threads filling in | |
704 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv | |
705 | * has been created using svc_create_pooled(). | |
706 | * | |
707 | * Based on code that used to be in nfsd_svc() but tweaked | |
708 | * to be pool-aware. | |
709 | */ | |
710 | int | |
711 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |
712 | { | |
9867d76c JL |
713 | struct svc_rqst *rqstp; |
714 | struct task_struct *task; | |
715 | struct svc_pool *chosen_pool; | |
a7455442 GB |
716 | int error = 0; |
717 | unsigned int state = serv->sv_nrthreads-1; | |
11fd165c | 718 | int node; |
a7455442 GB |
719 | |
720 | if (pool == NULL) { | |
721 | /* The -1 assumes caller has done a svc_get() */ | |
722 | nrservs -= (serv->sv_nrthreads-1); | |
723 | } else { | |
724 | spin_lock_bh(&pool->sp_lock); | |
725 | nrservs -= pool->sp_nrthreads; | |
726 | spin_unlock_bh(&pool->sp_lock); | |
727 | } | |
728 | ||
729 | /* create new threads */ | |
730 | while (nrservs > 0) { | |
731 | nrservs--; | |
9867d76c JL |
732 | chosen_pool = choose_pool(serv, pool, &state); |
733 | ||
11fd165c ED |
734 | node = svc_pool_map_get_node(chosen_pool->sp_id); |
735 | rqstp = svc_prepare_thread(serv, chosen_pool, node); | |
9867d76c JL |
736 | if (IS_ERR(rqstp)) { |
737 | error = PTR_ERR(rqstp); | |
738 | break; | |
739 | } | |
740 | ||
a7455442 | 741 | __module_get(serv->sv_module); |
11fd165c ED |
742 | task = kthread_create_on_node(serv->sv_function, rqstp, |
743 | node, serv->sv_name); | |
9867d76c JL |
744 | if (IS_ERR(task)) { |
745 | error = PTR_ERR(task); | |
a7455442 | 746 | module_put(serv->sv_module); |
9867d76c | 747 | svc_exit_thread(rqstp); |
a7455442 GB |
748 | break; |
749 | } | |
9867d76c JL |
750 | |
751 | rqstp->rq_task = task; | |
752 | if (serv->sv_nrpools > 1) | |
753 | svc_pool_map_set_cpumask(task, chosen_pool->sp_id); | |
754 | ||
755 | svc_sock_update_bufs(serv); | |
756 | wake_up_process(task); | |
a7455442 GB |
757 | } |
758 | /* destroy old threads */ | |
759 | while (nrservs < 0 && | |
9867d76c | 760 | (task = choose_victim(serv, pool, &state)) != NULL) { |
a75c5d01 | 761 | send_sig(SIGINT, task, 1); |
a7455442 GB |
762 | nrservs++; |
763 | } | |
764 | ||
765 | return error; | |
766 | } | |
24c3767e | 767 | EXPORT_SYMBOL_GPL(svc_set_num_threads); |
a7455442 | 768 | |
3262c816 | 769 | /* |
bedbdd8b NB |
770 | * Called from a server thread as it's exiting. Caller must hold the BKL or |
771 | * the "service mutex", whichever is appropriate for the service. | |
1da177e4 LT |
772 | */ |
773 | void | |
774 | svc_exit_thread(struct svc_rqst *rqstp) | |
775 | { | |
776 | struct svc_serv *serv = rqstp->rq_server; | |
3262c816 | 777 | struct svc_pool *pool = rqstp->rq_pool; |
1da177e4 LT |
778 | |
779 | svc_release_buffer(rqstp); | |
a51482bd JJ |
780 | kfree(rqstp->rq_resp); |
781 | kfree(rqstp->rq_argp); | |
782 | kfree(rqstp->rq_auth_data); | |
3262c816 GB |
783 | |
784 | spin_lock_bh(&pool->sp_lock); | |
785 | pool->sp_nrthreads--; | |
a7455442 | 786 | list_del(&rqstp->rq_all); |
3262c816 GB |
787 | spin_unlock_bh(&pool->sp_lock); |
788 | ||
1da177e4 LT |
789 | kfree(rqstp); |
790 | ||
791 | /* Release the server */ | |
792 | if (serv) | |
793 | svc_destroy(serv); | |
794 | } | |
24c3767e | 795 | EXPORT_SYMBOL_GPL(svc_exit_thread); |
1da177e4 LT |
796 | |
797 | /* | |
2c7eb0b2 CL |
798 | * Register an "inet" protocol family netid with the local |
799 | * rpcbind daemon via an rpcbind v4 SET request. | |
a26cfad6 | 800 | * |
2c7eb0b2 CL |
801 | * No netconfig infrastructure is available in the kernel, so |
802 | * we map IP_ protocol numbers to netids by hand. | |
a26cfad6 | 803 | * |
2c7eb0b2 CL |
804 | * Returns zero on success; a negative errno value is returned |
805 | * if any error occurs. | |
1da177e4 | 806 | */ |
5247fab5 SK |
807 | static int __svc_rpcb_register4(struct net *net, const u32 program, |
808 | const u32 version, | |
2c7eb0b2 CL |
809 | const unsigned short protocol, |
810 | const unsigned short port) | |
a26cfad6 | 811 | { |
cadc0fa5 | 812 | const struct sockaddr_in sin = { |
a26cfad6 CL |
813 | .sin_family = AF_INET, |
814 | .sin_addr.s_addr = htonl(INADDR_ANY), | |
815 | .sin_port = htons(port), | |
816 | }; | |
cadc0fa5 CL |
817 | const char *netid; |
818 | int error; | |
2c7eb0b2 CL |
819 | |
820 | switch (protocol) { | |
821 | case IPPROTO_UDP: | |
822 | netid = RPCBIND_NETID_UDP; | |
823 | break; | |
824 | case IPPROTO_TCP: | |
825 | netid = RPCBIND_NETID_TCP; | |
826 | break; | |
827 | default: | |
ba5c35e0 | 828 | return -ENOPROTOOPT; |
2c7eb0b2 CL |
829 | } |
830 | ||
5247fab5 | 831 | error = rpcb_v4_register(net, program, version, |
cadc0fa5 CL |
832 | (const struct sockaddr *)&sin, netid); |
833 | ||
834 | /* | |
835 | * User space didn't support rpcbind v4, so retry this | |
836 | * registration request with the legacy rpcbind v2 protocol. | |
837 | */ | |
838 | if (error == -EPROTONOSUPPORT) | |
5247fab5 | 839 | error = rpcb_register(net, program, version, protocol, port); |
cadc0fa5 CL |
840 | |
841 | return error; | |
2c7eb0b2 CL |
842 | } |
843 | ||
dfd56b8b | 844 | #if IS_ENABLED(CONFIG_IPV6) |
2c7eb0b2 CL |
845 | /* |
846 | * Register an "inet6" protocol family netid with the local | |
847 | * rpcbind daemon via an rpcbind v4 SET request. | |
848 | * | |
849 | * No netconfig infrastructure is available in the kernel, so | |
850 | * we map IP_ protocol numbers to netids by hand. | |
851 | * | |
852 | * Returns zero on success; a negative errno value is returned | |
853 | * if any error occurs. | |
854 | */ | |
5247fab5 SK |
855 | static int __svc_rpcb_register6(struct net *net, const u32 program, |
856 | const u32 version, | |
2c7eb0b2 CL |
857 | const unsigned short protocol, |
858 | const unsigned short port) | |
859 | { | |
cadc0fa5 | 860 | const struct sockaddr_in6 sin6 = { |
a26cfad6 CL |
861 | .sin6_family = AF_INET6, |
862 | .sin6_addr = IN6ADDR_ANY_INIT, | |
863 | .sin6_port = htons(port), | |
864 | }; | |
cadc0fa5 CL |
865 | const char *netid; |
866 | int error; | |
a26cfad6 | 867 | |
2c7eb0b2 CL |
868 | switch (protocol) { |
869 | case IPPROTO_UDP: | |
870 | netid = RPCBIND_NETID_UDP6; | |
a26cfad6 | 871 | break; |
2c7eb0b2 | 872 | case IPPROTO_TCP: |
a26cfad6 | 873 | netid = RPCBIND_NETID_TCP6; |
a26cfad6 CL |
874 | break; |
875 | default: | |
ba5c35e0 | 876 | return -ENOPROTOOPT; |
2c7eb0b2 CL |
877 | } |
878 | ||
5247fab5 | 879 | error = rpcb_v4_register(net, program, version, |
cadc0fa5 CL |
880 | (const struct sockaddr *)&sin6, netid); |
881 | ||
882 | /* | |
883 | * User space didn't support rpcbind version 4, so we won't | |
884 | * use a PF_INET6 listener. | |
885 | */ | |
886 | if (error == -EPROTONOSUPPORT) | |
887 | error = -EAFNOSUPPORT; | |
888 | ||
889 | return error; | |
2c7eb0b2 | 890 | } |
dfd56b8b | 891 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
2c7eb0b2 CL |
892 | |
893 | /* | |
894 | * Register a kernel RPC service via rpcbind version 4. | |
895 | * | |
896 | * Returns zero on success; a negative errno value is returned | |
897 | * if any error occurs. | |
898 | */ | |
5247fab5 | 899 | static int __svc_register(struct net *net, const char *progname, |
363f724c | 900 | const u32 program, const u32 version, |
4b62e58c | 901 | const int family, |
2c7eb0b2 CL |
902 | const unsigned short protocol, |
903 | const unsigned short port) | |
904 | { | |
363f724c | 905 | int error = -EAFNOSUPPORT; |
2c7eb0b2 CL |
906 | |
907 | switch (family) { | |
4b62e58c | 908 | case PF_INET: |
5247fab5 | 909 | error = __svc_rpcb_register4(net, program, version, |
2c7eb0b2 | 910 | protocol, port); |
cadc0fa5 | 911 | break; |
dfd56b8b | 912 | #if IS_ENABLED(CONFIG_IPV6) |
4b62e58c | 913 | case PF_INET6: |
5247fab5 | 914 | error = __svc_rpcb_register6(net, program, version, |
2c7eb0b2 | 915 | protocol, port); |
dfd56b8b | 916 | #endif |
a26cfad6 CL |
917 | } |
918 | ||
363f724c CL |
919 | if (error < 0) |
920 | printk(KERN_WARNING "svc: failed to register %sv%u RPC " | |
921 | "service (errno %d).\n", progname, version, -error); | |
922 | return error; | |
a26cfad6 | 923 | } |
2c7eb0b2 | 924 | |
a26cfad6 CL |
925 | /** |
926 | * svc_register - register an RPC service with the local portmapper | |
927 | * @serv: svc_serv struct for the service to register | |
5247fab5 | 928 | * @net: net namespace for the service to register |
4b62e58c | 929 | * @family: protocol family of service's listener socket |
a26cfad6 CL |
930 | * @proto: transport protocol number to advertise |
931 | * @port: port to advertise | |
932 | * | |
4b62e58c | 933 | * Service is registered for any address in the passed-in protocol family |
a26cfad6 | 934 | */ |
5247fab5 SK |
935 | int svc_register(const struct svc_serv *serv, struct net *net, |
936 | const int family, const unsigned short proto, | |
937 | const unsigned short port) | |
1da177e4 LT |
938 | { |
939 | struct svc_program *progp; | |
ea339d46 | 940 | unsigned int i; |
14aeb211 | 941 | int error = 0; |
1da177e4 | 942 | |
0af39507 WAA |
943 | WARN_ON_ONCE(proto == 0 && port == 0); |
944 | if (proto == 0 && port == 0) | |
945 | return -EINVAL; | |
1da177e4 | 946 | |
bc5fea42 OK |
947 | for (progp = serv->sv_program; progp; progp = progp->pg_next) { |
948 | for (i = 0; i < progp->pg_nvers; i++) { | |
949 | if (progp->pg_vers[i] == NULL) | |
950 | continue; | |
951 | ||
2c7eb0b2 | 952 | dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n", |
bc5fea42 | 953 | progp->pg_name, |
2c7eb0b2 | 954 | i, |
bc5fea42 OK |
955 | proto == IPPROTO_UDP? "udp" : "tcp", |
956 | port, | |
4b62e58c | 957 | family, |
bc5fea42 OK |
958 | progp->pg_vers[i]->vs_hidden? |
959 | " (but not telling portmap)" : ""); | |
960 | ||
961 | if (progp->pg_vers[i]->vs_hidden) | |
962 | continue; | |
963 | ||
5247fab5 | 964 | error = __svc_register(net, progp->pg_name, progp->pg_prog, |
363f724c | 965 | i, family, proto, port); |
bc5fea42 OK |
966 | if (error < 0) |
967 | break; | |
1da177e4 LT |
968 | } |
969 | } | |
970 | ||
7252d575 CL |
971 | return error; |
972 | } | |
973 | ||
d5a8620f CL |
974 | /* |
975 | * If user space is running rpcbind, it should take the v4 UNSET | |
976 | * and clear everything for this [program, version]. If user space | |
977 | * is running portmap, it will reject the v4 UNSET, but won't have | |
978 | * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient | |
979 | * in this case to clear all existing entries for [program, version]. | |
980 | */ | |
5247fab5 | 981 | static void __svc_unregister(struct net *net, const u32 program, const u32 version, |
f6fb3f6f CL |
982 | const char *progname) |
983 | { | |
f6fb3f6f CL |
984 | int error; |
985 | ||
5247fab5 | 986 | error = rpcb_v4_register(net, program, version, NULL, ""); |
f6fb3f6f | 987 | |
d5a8620f CL |
988 | /* |
989 | * User space didn't support rpcbind v4, so retry this | |
990 | * request with the legacy rpcbind v2 protocol. | |
991 | */ | |
992 | if (error == -EPROTONOSUPPORT) | |
5247fab5 | 993 | error = rpcb_register(net, program, version, 0, 0); |
f6fb3f6f | 994 | |
f6fb3f6f CL |
995 | dprintk("svc: %s(%sv%u), error %d\n", |
996 | __func__, progname, version, error); | |
997 | } | |
998 | ||
7252d575 | 999 | /* |
f6fb3f6f CL |
1000 | * All netids, bind addresses and ports registered for [program, version] |
1001 | * are removed from the local rpcbind database (if the service is not | |
1002 | * hidden) to make way for a new instance of the service. | |
7252d575 | 1003 | * |
f6fb3f6f CL |
1004 | * The result of unregistration is reported via dprintk for those who want |
1005 | * verification of the result, but is otherwise not important. | |
7252d575 | 1006 | */ |
5247fab5 | 1007 | static void svc_unregister(const struct svc_serv *serv, struct net *net) |
7252d575 CL |
1008 | { |
1009 | struct svc_program *progp; | |
1010 | unsigned long flags; | |
1011 | unsigned int i; | |
7252d575 CL |
1012 | |
1013 | clear_thread_flag(TIF_SIGPENDING); | |
1014 | ||
1015 | for (progp = serv->sv_program; progp; progp = progp->pg_next) { | |
1016 | for (i = 0; i < progp->pg_nvers; i++) { | |
1017 | if (progp->pg_vers[i] == NULL) | |
1018 | continue; | |
1019 | if (progp->pg_vers[i]->vs_hidden) | |
1020 | continue; | |
1021 | ||
7402ab19 CL |
1022 | dprintk("svc: attempting to unregister %sv%u\n", |
1023 | progp->pg_name, i); | |
5247fab5 | 1024 | __svc_unregister(net, progp->pg_prog, i, progp->pg_name); |
7252d575 | 1025 | } |
1da177e4 LT |
1026 | } |
1027 | ||
7252d575 CL |
1028 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
1029 | recalc_sigpending(); | |
1030 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
1da177e4 LT |
1031 | } |
1032 | ||
354ecbb9 | 1033 | /* |
7032a3dd | 1034 | * dprintk the given error with the address of the client that caused it. |
354ecbb9 | 1035 | */ |
624ab464 | 1036 | #ifdef RPC_DEBUG |
b9075fa9 | 1037 | static __printf(2, 3) |
e87cc472 | 1038 | void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) |
354ecbb9 | 1039 | { |
e87cc472 | 1040 | struct va_format vaf; |
354ecbb9 | 1041 | va_list args; |
354ecbb9 DDAG |
1042 | char buf[RPC_MAX_ADDRBUFLEN]; |
1043 | ||
e87cc472 | 1044 | va_start(args, fmt); |
354ecbb9 | 1045 | |
e87cc472 JP |
1046 | vaf.fmt = fmt; |
1047 | vaf.va = &args; | |
354ecbb9 | 1048 | |
7032a3dd | 1049 | dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); |
354ecbb9 | 1050 | |
e87cc472 | 1051 | va_end(args); |
354ecbb9 | 1052 | } |
624ab464 BF |
1053 | #else |
1054 | static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} | |
1055 | #endif | |
354ecbb9 | 1056 | |
1da177e4 | 1057 | /* |
1cad7ea6 | 1058 | * Common routine for processing the RPC request. |
1da177e4 | 1059 | */ |
1cad7ea6 RL |
1060 | static int |
1061 | svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) | |
1da177e4 LT |
1062 | { |
1063 | struct svc_program *progp; | |
1064 | struct svc_version *versp = NULL; /* compiler food */ | |
1065 | struct svc_procedure *procp = NULL; | |
6fb2b47f | 1066 | struct svc_serv *serv = rqstp->rq_server; |
1da177e4 | 1067 | kxdrproc_t xdr; |
d8ed029d | 1068 | __be32 *statp; |
1cad7ea6 | 1069 | u32 prog, vers, proc; |
d8ed029d | 1070 | __be32 auth_stat, rpc_stat; |
1da177e4 | 1071 | int auth_res; |
8f8e05c5 | 1072 | __be32 *reply_statp; |
1da177e4 LT |
1073 | |
1074 | rpc_stat = rpc_success; | |
1075 | ||
1076 | if (argv->iov_len < 6*4) | |
1077 | goto err_short_len; | |
1078 | ||
5c04c46a | 1079 | /* Will be turned off only in gss privacy case: */ |
cf8208d0 | 1080 | rqstp->rq_splice_ok = 1; |
2f425878 AA |
1081 | /* Will be turned off only when NFSv4 Sessions are used */ |
1082 | rqstp->rq_usedeferral = 1; | |
9e701c61 | 1083 | rqstp->rq_dropme = false; |
e831fe65 TT |
1084 | |
1085 | /* Setup reply header */ | |
1086 | rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); | |
1da177e4 | 1087 | |
1da177e4 LT |
1088 | svc_putu32(resv, rqstp->rq_xid); |
1089 | ||
76994313 | 1090 | vers = svc_getnl(argv); |
1da177e4 LT |
1091 | |
1092 | /* First words of reply: */ | |
76994313 | 1093 | svc_putnl(resv, 1); /* REPLY */ |
1da177e4 | 1094 | |
1da177e4 LT |
1095 | if (vers != 2) /* RPC version number */ |
1096 | goto err_bad_rpc; | |
1097 | ||
1098 | /* Save position in case we later decide to reject: */ | |
8f8e05c5 | 1099 | reply_statp = resv->iov_base + resv->iov_len; |
1da177e4 | 1100 | |
76994313 | 1101 | svc_putnl(resv, 0); /* ACCEPT */ |
1da177e4 | 1102 | |
76994313 AD |
1103 | rqstp->rq_prog = prog = svc_getnl(argv); /* program number */ |
1104 | rqstp->rq_vers = vers = svc_getnl(argv); /* version number */ | |
1105 | rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */ | |
1da177e4 LT |
1106 | |
1107 | progp = serv->sv_program; | |
80d188a6 N |
1108 | |
1109 | for (progp = serv->sv_program; progp; progp = progp->pg_next) | |
1110 | if (prog == progp->pg_prog) | |
1111 | break; | |
1112 | ||
1da177e4 LT |
1113 | /* |
1114 | * Decode auth data, and add verifier to reply buffer. | |
1115 | * We do this before anything else in order to get a decent | |
1116 | * auth verifier. | |
1117 | */ | |
1118 | auth_res = svc_authenticate(rqstp, &auth_stat); | |
1119 | /* Also give the program a chance to reject this call: */ | |
80d188a6 | 1120 | if (auth_res == SVC_OK && progp) { |
1da177e4 LT |
1121 | auth_stat = rpc_autherr_badcred; |
1122 | auth_res = progp->pg_authenticate(rqstp); | |
1123 | } | |
1124 | switch (auth_res) { | |
1125 | case SVC_OK: | |
1126 | break; | |
1127 | case SVC_GARBAGE: | |
dd35210e | 1128 | goto err_garbage; |
1da177e4 LT |
1129 | case SVC_SYSERR: |
1130 | rpc_stat = rpc_system_err; | |
1131 | goto err_bad; | |
1132 | case SVC_DENIED: | |
1133 | goto err_bad_auth; | |
1ebede86 N |
1134 | case SVC_CLOSE: |
1135 | if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) | |
1136 | svc_close_xprt(rqstp->rq_xprt); | |
1da177e4 LT |
1137 | case SVC_DROP: |
1138 | goto dropit; | |
1139 | case SVC_COMPLETE: | |
1140 | goto sendit; | |
1141 | } | |
80d188a6 | 1142 | |
9ba02638 | 1143 | if (progp == NULL) |
1da177e4 LT |
1144 | goto err_bad_prog; |
1145 | ||
1146 | if (vers >= progp->pg_nvers || | |
1147 | !(versp = progp->pg_vers[vers])) | |
1148 | goto err_bad_vers; | |
1149 | ||
1150 | procp = versp->vs_proc + proc; | |
1151 | if (proc >= versp->vs_nproc || !procp->pc_func) | |
1152 | goto err_bad_proc; | |
1da177e4 LT |
1153 | rqstp->rq_procinfo = procp; |
1154 | ||
1155 | /* Syntactic check complete */ | |
1156 | serv->sv_stats->rpccnt++; | |
1157 | ||
1158 | /* Build the reply header. */ | |
1159 | statp = resv->iov_base +resv->iov_len; | |
76994313 | 1160 | svc_putnl(resv, RPC_SUCCESS); |
1da177e4 LT |
1161 | |
1162 | /* Bump per-procedure stats counter */ | |
1163 | procp->pc_count++; | |
1164 | ||
1165 | /* Initialize storage for argp and resp */ | |
1166 | memset(rqstp->rq_argp, 0, procp->pc_argsize); | |
1167 | memset(rqstp->rq_resp, 0, procp->pc_ressize); | |
1168 | ||
cca5172a | 1169 | /* un-reserve some of the out-queue now that we have a |
1da177e4 LT |
1170 | * better idea of reply size |
1171 | */ | |
1172 | if (procp->pc_xdrressize) | |
cd123012 | 1173 | svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); |
1da177e4 LT |
1174 | |
1175 | /* Call the function that processes the request. */ | |
1176 | if (!versp->vs_dispatch) { | |
1177 | /* Decode arguments */ | |
1178 | xdr = procp->pc_decode; | |
1179 | if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp)) | |
1180 | goto err_garbage; | |
1181 | ||
1182 | *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); | |
1183 | ||
1184 | /* Encode reply */ | |
9e701c61 | 1185 | if (rqstp->rq_dropme) { |
d343fce1 N |
1186 | if (procp->pc_release) |
1187 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); | |
1188 | goto dropit; | |
1189 | } | |
f64f9e71 JP |
1190 | if (*statp == rpc_success && |
1191 | (xdr = procp->pc_encode) && | |
1192 | !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { | |
1da177e4 LT |
1193 | dprintk("svc: failed to encode reply\n"); |
1194 | /* serv->sv_stats->rpcsystemerr++; */ | |
1195 | *statp = rpc_system_err; | |
1196 | } | |
1197 | } else { | |
1198 | dprintk("svc: calling dispatcher\n"); | |
1199 | if (!versp->vs_dispatch(rqstp, statp)) { | |
1200 | /* Release reply info */ | |
1201 | if (procp->pc_release) | |
1202 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); | |
1203 | goto dropit; | |
1204 | } | |
1205 | } | |
1206 | ||
1207 | /* Check RPC status result */ | |
1208 | if (*statp != rpc_success) | |
1209 | resv->iov_len = ((void*)statp) - resv->iov_base + 4; | |
1210 | ||
1211 | /* Release reply info */ | |
1212 | if (procp->pc_release) | |
1213 | procp->pc_release(rqstp, NULL, rqstp->rq_resp); | |
1214 | ||
1215 | if (procp->pc_encode == NULL) | |
1216 | goto dropit; | |
1217 | ||
1218 | sendit: | |
1219 | if (svc_authorise(rqstp)) | |
1220 | goto dropit; | |
1cad7ea6 | 1221 | return 1; /* Caller can now send it */ |
1da177e4 LT |
1222 | |
1223 | dropit: | |
1224 | svc_authorise(rqstp); /* doesn't hurt to call this twice */ | |
1225 | dprintk("svc: svc_process dropit\n"); | |
1da177e4 LT |
1226 | return 0; |
1227 | ||
1228 | err_short_len: | |
354ecbb9 DDAG |
1229 | svc_printk(rqstp, "short len %Zd, dropping request\n", |
1230 | argv->iov_len); | |
34e9a63b | 1231 | |
1da177e4 LT |
1232 | goto dropit; /* drop request */ |
1233 | ||
1da177e4 LT |
1234 | err_bad_rpc: |
1235 | serv->sv_stats->rpcbadfmt++; | |
76994313 AD |
1236 | svc_putnl(resv, 1); /* REJECT */ |
1237 | svc_putnl(resv, 0); /* RPC_MISMATCH */ | |
1238 | svc_putnl(resv, 2); /* Only RPCv2 supported */ | |
1239 | svc_putnl(resv, 2); | |
1da177e4 LT |
1240 | goto sendit; |
1241 | ||
1242 | err_bad_auth: | |
1243 | dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); | |
1244 | serv->sv_stats->rpcbadauth++; | |
1245 | /* Restore write pointer to location of accept status: */ | |
8f8e05c5 | 1246 | xdr_ressize_check(rqstp, reply_statp); |
76994313 AD |
1247 | svc_putnl(resv, 1); /* REJECT */ |
1248 | svc_putnl(resv, 1); /* AUTH_ERROR */ | |
1249 | svc_putnl(resv, ntohl(auth_stat)); /* status */ | |
1da177e4 LT |
1250 | goto sendit; |
1251 | ||
1252 | err_bad_prog: | |
9ba02638 | 1253 | dprintk("svc: unknown program %d\n", prog); |
1da177e4 | 1254 | serv->sv_stats->rpcbadfmt++; |
76994313 | 1255 | svc_putnl(resv, RPC_PROG_UNAVAIL); |
1da177e4 LT |
1256 | goto sendit; |
1257 | ||
1258 | err_bad_vers: | |
354ecbb9 | 1259 | svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", |
34e9a63b N |
1260 | vers, prog, progp->pg_name); |
1261 | ||
1da177e4 | 1262 | serv->sv_stats->rpcbadfmt++; |
76994313 AD |
1263 | svc_putnl(resv, RPC_PROG_MISMATCH); |
1264 | svc_putnl(resv, progp->pg_lovers); | |
1265 | svc_putnl(resv, progp->pg_hivers); | |
1da177e4 LT |
1266 | goto sendit; |
1267 | ||
1268 | err_bad_proc: | |
354ecbb9 | 1269 | svc_printk(rqstp, "unknown procedure (%d)\n", proc); |
34e9a63b | 1270 | |
1da177e4 | 1271 | serv->sv_stats->rpcbadfmt++; |
76994313 | 1272 | svc_putnl(resv, RPC_PROC_UNAVAIL); |
1da177e4 LT |
1273 | goto sendit; |
1274 | ||
1275 | err_garbage: | |
354ecbb9 | 1276 | svc_printk(rqstp, "failed to decode args\n"); |
34e9a63b | 1277 | |
1da177e4 LT |
1278 | rpc_stat = rpc_garbage_args; |
1279 | err_bad: | |
1280 | serv->sv_stats->rpcbadfmt++; | |
76994313 | 1281 | svc_putnl(resv, ntohl(rpc_stat)); |
1da177e4 LT |
1282 | goto sendit; |
1283 | } | |
24c3767e | 1284 | EXPORT_SYMBOL_GPL(svc_process); |
7adae489 | 1285 | |
1cad7ea6 RL |
1286 | /* |
1287 | * Process the RPC request. | |
1288 | */ | |
1289 | int | |
1290 | svc_process(struct svc_rqst *rqstp) | |
1291 | { | |
1292 | struct kvec *argv = &rqstp->rq_arg.head[0]; | |
1293 | struct kvec *resv = &rqstp->rq_res.head[0]; | |
1294 | struct svc_serv *serv = rqstp->rq_server; | |
1295 | u32 dir; | |
1cad7ea6 RL |
1296 | |
1297 | /* | |
1298 | * Setup response xdr_buf. | |
1299 | * Initially it has just one page | |
1300 | */ | |
afc59400 | 1301 | rqstp->rq_next_page = &rqstp->rq_respages[1]; |
1cad7ea6 RL |
1302 | resv->iov_base = page_address(rqstp->rq_respages[0]); |
1303 | resv->iov_len = 0; | |
1304 | rqstp->rq_res.pages = rqstp->rq_respages + 1; | |
1305 | rqstp->rq_res.len = 0; | |
1306 | rqstp->rq_res.page_base = 0; | |
1307 | rqstp->rq_res.page_len = 0; | |
1308 | rqstp->rq_res.buflen = PAGE_SIZE; | |
1309 | rqstp->rq_res.tail[0].iov_base = NULL; | |
1310 | rqstp->rq_res.tail[0].iov_len = 0; | |
1311 | ||
1312 | rqstp->rq_xid = svc_getu32(argv); | |
1313 | ||
1314 | dir = svc_getnl(argv); | |
1315 | if (dir != 0) { | |
1316 | /* direction != CALL */ | |
1317 | svc_printk(rqstp, "bad direction %d, dropping request\n", dir); | |
1318 | serv->sv_stats->rpcbadfmt++; | |
1319 | svc_drop(rqstp); | |
1320 | return 0; | |
1321 | } | |
1322 | ||
4b5b3ba1 AA |
1323 | /* Returns 1 for send, 0 for drop */ |
1324 | if (svc_process_common(rqstp, argv, resv)) | |
1325 | return svc_send(rqstp); | |
1326 | else { | |
1327 | svc_drop(rqstp); | |
1328 | return 0; | |
1329 | } | |
1cad7ea6 RL |
1330 | } |
1331 | ||
9e00abc3 | 1332 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
4d6bbb62 RL |
1333 | /* |
1334 | * Process a backchannel RPC request that arrived over an existing | |
1335 | * outbound connection | |
1336 | */ | |
1337 | int | |
1338 | bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |
1339 | struct svc_rqst *rqstp) | |
1340 | { | |
1341 | struct kvec *argv = &rqstp->rq_arg.head[0]; | |
1342 | struct kvec *resv = &rqstp->rq_res.head[0]; | |
4d6bbb62 RL |
1343 | |
1344 | /* Build the svc_rqst used by the common processing routine */ | |
4a19de0f | 1345 | rqstp->rq_xprt = serv->sv_bc_xprt; |
4d6bbb62 RL |
1346 | rqstp->rq_xid = req->rq_xid; |
1347 | rqstp->rq_prot = req->rq_xprt->prot; | |
1348 | rqstp->rq_server = serv; | |
1349 | ||
1350 | rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); | |
1351 | memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); | |
1352 | memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); | |
1353 | memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); | |
1354 | ||
1355 | /* reset result send buffer "put" position */ | |
1356 | resv->iov_len = 0; | |
1357 | ||
1358 | if (rqstp->rq_prot != IPPROTO_TCP) { | |
1359 | printk(KERN_ERR "No support for Non-TCP transports!\n"); | |
1360 | BUG(); | |
1361 | } | |
1362 | ||
1363 | /* | |
1364 | * Skip the next two words because they've already been | |
1365 | * processed in the trasport | |
1366 | */ | |
1367 | svc_getu32(argv); /* XID */ | |
1368 | svc_getnl(argv); /* CALLDIR */ | |
1369 | ||
4b5b3ba1 AA |
1370 | /* Returns 1 for send, 0 for drop */ |
1371 | if (svc_process_common(rqstp, argv, resv)) { | |
1372 | memcpy(&req->rq_snd_buf, &rqstp->rq_res, | |
1373 | sizeof(req->rq_snd_buf)); | |
1374 | return bc_send(req); | |
1375 | } else { | |
b3b02ae5 TM |
1376 | /* drop request */ |
1377 | xprt_free_bc_request(req); | |
4b5b3ba1 AA |
1378 | return 0; |
1379 | } | |
4d6bbb62 | 1380 | } |
0d961aa9 | 1381 | EXPORT_SYMBOL_GPL(bc_svc_process); |
9e00abc3 | 1382 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
4d6bbb62 | 1383 | |
7adae489 GB |
1384 | /* |
1385 | * Return (transport-specific) limit on the rpc payload. | |
1386 | */ | |
1387 | u32 svc_max_payload(const struct svc_rqst *rqstp) | |
1388 | { | |
49023155 | 1389 | u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; |
7adae489 | 1390 | |
c6b0a9f8 N |
1391 | if (rqstp->rq_server->sv_max_payload < max) |
1392 | max = rqstp->rq_server->sv_max_payload; | |
7adae489 GB |
1393 | return max; |
1394 | } | |
1395 | EXPORT_SYMBOL_GPL(svc_max_payload); |