1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/svc.c
5 * High-level RPC service routines
9 * Multiple threads pools and NUMAisation
10 * Copyright (c) 2006 Silicon Graphics, Inc.
14 #include <linux/linkage.h>
15 #include <linux/sched/signal.h>
16 #include <linux/errno.h>
17 #include <linux/net.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
32 #include <trace/events/sunrpc.h>
34 #define RPCDBG_FACILITY RPCDBG_SVCDSP
36 static void svc_unregister(const struct svc_serv *serv, struct net *net);
38 #define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function)
40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
43 * Structure for mapping cpus to pools and vice versa.
44 * Setup once during sunrpc initialisation.
46 struct svc_pool_map svc_pool_map = {
47 .mode = SVC_POOL_DEFAULT
49 EXPORT_SYMBOL_GPL(svc_pool_map);
51 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
54 param_set_pool_mode(const char *val, const struct kernel_param *kp)
56 int *ip = (int *)kp->arg;
57 struct svc_pool_map *m = &svc_pool_map;
60 mutex_lock(&svc_pool_map_mutex);
67 if (!strncmp(val, "auto", 4))
69 else if (!strncmp(val, "global", 6))
70 *ip = SVC_POOL_GLOBAL;
71 else if (!strncmp(val, "percpu", 6))
72 *ip = SVC_POOL_PERCPU;
73 else if (!strncmp(val, "pernode", 7))
74 *ip = SVC_POOL_PERNODE;
79 mutex_unlock(&svc_pool_map_mutex);
84 param_get_pool_mode(char *buf, const struct kernel_param *kp)
86 int *ip = (int *)kp->arg;
91 return strlcpy(buf, "auto", 20);
93 return strlcpy(buf, "global", 20);
95 return strlcpy(buf, "percpu", 20);
96 case SVC_POOL_PERNODE:
97 return strlcpy(buf, "pernode", 20);
99 return sprintf(buf, "%d", *ip);
103 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
104 &svc_pool_map.mode, 0644);
107 * Detect best pool mapping mode heuristically,
108 * according to the machine's topology.
111 svc_pool_map_choose_mode(void)
115 if (nr_online_nodes > 1) {
117 * Actually have multiple NUMA nodes,
118 * so split pools on NUMA node boundaries
120 return SVC_POOL_PERNODE;
123 node = first_online_node;
124 if (nr_cpus_node(node) > 2) {
126 * Non-trivial SMP, or CONFIG_NUMA on
127 * non-NUMA hardware, e.g. with a generic
128 * x86_64 kernel on Xeons. In this case we
129 * want to divide the pools on cpu boundaries.
131 return SVC_POOL_PERCPU;
134 /* default: one global pool */
135 return SVC_POOL_GLOBAL;
139 * Allocate the to_pool[] and pool_to[] arrays.
140 * Returns 0 on success or an errno.
143 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
145 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
148 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
162 * Initialise the pool map for SVC_POOL_PERCPU mode.
163 * Returns number of pools or <0 on error.
166 svc_pool_map_init_percpu(struct svc_pool_map *m)
168 unsigned int maxpools = nr_cpu_ids;
169 unsigned int pidx = 0;
173 err = svc_pool_map_alloc_arrays(m, maxpools);
177 for_each_online_cpu(cpu) {
178 BUG_ON(pidx >= maxpools);
179 m->to_pool[cpu] = pidx;
180 m->pool_to[pidx] = cpu;
183 /* cpus brought online later all get mapped to pool0, sorry */
190 * Initialise the pool map for SVC_POOL_PERNODE mode.
191 * Returns number of pools or <0 on error.
194 svc_pool_map_init_pernode(struct svc_pool_map *m)
196 unsigned int maxpools = nr_node_ids;
197 unsigned int pidx = 0;
201 err = svc_pool_map_alloc_arrays(m, maxpools);
205 for_each_node_with_cpus(node) {
206 /* some architectures (e.g. SN2) have cpuless nodes */
207 BUG_ON(pidx > maxpools);
208 m->to_pool[node] = pidx;
209 m->pool_to[pidx] = node;
212 /* nodes brought online later all get mapped to pool0, sorry */
219 * Add a reference to the global map of cpus to pools (and
220 * vice versa). Initialise the map if we're the first user.
221 * Returns the number of pools.
224 svc_pool_map_get(void)
226 struct svc_pool_map *m = &svc_pool_map;
229 mutex_lock(&svc_pool_map_mutex);
232 mutex_unlock(&svc_pool_map_mutex);
236 if (m->mode == SVC_POOL_AUTO)
237 m->mode = svc_pool_map_choose_mode();
240 case SVC_POOL_PERCPU:
241 npools = svc_pool_map_init_percpu(m);
243 case SVC_POOL_PERNODE:
244 npools = svc_pool_map_init_pernode(m);
249 /* default, or memory allocation failure */
251 m->mode = SVC_POOL_GLOBAL;
255 mutex_unlock(&svc_pool_map_mutex);
258 EXPORT_SYMBOL_GPL(svc_pool_map_get);
261 * Drop a reference to the global map of cpus to pools.
262 * When the last reference is dropped, the map data is
263 * freed; this allows the sysadmin to change the pool
264 * mode using the pool_mode module option without
265 * rebooting or re-loading sunrpc.ko.
268 svc_pool_map_put(void)
270 struct svc_pool_map *m = &svc_pool_map;
272 mutex_lock(&svc_pool_map_mutex);
282 mutex_unlock(&svc_pool_map_mutex);
284 EXPORT_SYMBOL_GPL(svc_pool_map_put);
286 static int svc_pool_map_get_node(unsigned int pidx)
288 const struct svc_pool_map *m = &svc_pool_map;
291 if (m->mode == SVC_POOL_PERCPU)
292 return cpu_to_node(m->pool_to[pidx]);
293 if (m->mode == SVC_POOL_PERNODE)
294 return m->pool_to[pidx];
299 * Set the given thread's cpus_allowed mask so that it
300 * will only run on cpus in the given pool.
303 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
305 struct svc_pool_map *m = &svc_pool_map;
306 unsigned int node = m->pool_to[pidx];
309 * The caller checks for sv_nrpools > 1, which
310 * implies that we've been initialized.
312 WARN_ON_ONCE(m->count == 0);
317 case SVC_POOL_PERCPU:
319 set_cpus_allowed_ptr(task, cpumask_of(node));
322 case SVC_POOL_PERNODE:
324 set_cpus_allowed_ptr(task, cpumask_of_node(node));
331 * Use the mapping mode to choose a pool for a given CPU.
332 * Used when enqueueing an incoming RPC. Always returns
333 * a non-NULL pool pointer.
336 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
338 struct svc_pool_map *m = &svc_pool_map;
339 unsigned int pidx = 0;
342 * An uninitialised map happens in a pure client when
343 * lockd is brought up, so silently treat it the
344 * same as SVC_POOL_GLOBAL.
346 if (svc_serv_is_pooled(serv)) {
348 case SVC_POOL_PERCPU:
349 pidx = m->to_pool[cpu];
351 case SVC_POOL_PERNODE:
352 pidx = m->to_pool[cpu_to_node(cpu)];
356 return &serv->sv_pools[pidx % serv->sv_nrpools];
359 int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
363 err = rpcb_create_local(net);
367 /* Remove any stale portmap registrations */
368 svc_unregister(serv, net);
371 EXPORT_SYMBOL_GPL(svc_rpcb_setup);
373 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
375 svc_unregister(serv, net);
378 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
380 static int svc_uses_rpcbind(struct svc_serv *serv)
382 struct svc_program *progp;
385 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
386 for (i = 0; i < progp->pg_nvers; i++) {
387 if (progp->pg_vers[i] == NULL)
389 if (!progp->pg_vers[i]->vs_hidden)
397 int svc_bind(struct svc_serv *serv, struct net *net)
399 if (!svc_uses_rpcbind(serv))
401 return svc_rpcb_setup(serv, net);
403 EXPORT_SYMBOL_GPL(svc_bind);
405 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
407 __svc_init_bc(struct svc_serv *serv)
409 INIT_LIST_HEAD(&serv->sv_cb_list);
410 spin_lock_init(&serv->sv_cb_lock);
411 init_waitqueue_head(&serv->sv_cb_waitq);
415 __svc_init_bc(struct svc_serv *serv)
421 * Create an RPC service
423 static struct svc_serv *
424 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
425 const struct svc_serv_ops *ops)
427 struct svc_serv *serv;
429 unsigned int xdrsize;
432 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
434 serv->sv_name = prog->pg_name;
435 serv->sv_program = prog;
436 serv->sv_nrthreads = 1;
437 serv->sv_stats = prog->pg_stats;
438 if (bufsize > RPCSVC_MAXPAYLOAD)
439 bufsize = RPCSVC_MAXPAYLOAD;
440 serv->sv_max_payload = bufsize? bufsize : 4096;
441 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
445 prog->pg_lovers = prog->pg_nvers-1;
446 for (vers=0; vers<prog->pg_nvers ; vers++)
447 if (prog->pg_vers[vers]) {
448 prog->pg_hivers = vers;
449 if (prog->pg_lovers > vers)
450 prog->pg_lovers = vers;
451 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
452 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
454 prog = prog->pg_next;
456 serv->sv_xdrsize = xdrsize;
457 INIT_LIST_HEAD(&serv->sv_tempsocks);
458 INIT_LIST_HEAD(&serv->sv_permsocks);
459 timer_setup(&serv->sv_temptimer, NULL, 0);
460 spin_lock_init(&serv->sv_lock);
464 serv->sv_nrpools = npools;
466 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
468 if (!serv->sv_pools) {
473 for (i = 0; i < serv->sv_nrpools; i++) {
474 struct svc_pool *pool = &serv->sv_pools[i];
476 dprintk("svc: initialising pool %u for %s\n",
480 INIT_LIST_HEAD(&pool->sp_sockets);
481 INIT_LIST_HEAD(&pool->sp_all_threads);
482 spin_lock_init(&pool->sp_lock);
489 svc_create(struct svc_program *prog, unsigned int bufsize,
490 const struct svc_serv_ops *ops)
492 return __svc_create(prog, bufsize, /*npools*/1, ops);
494 EXPORT_SYMBOL_GPL(svc_create);
497 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
498 const struct svc_serv_ops *ops)
500 struct svc_serv *serv;
501 unsigned int npools = svc_pool_map_get();
503 serv = __svc_create(prog, bufsize, npools, ops);
511 EXPORT_SYMBOL_GPL(svc_create_pooled);
513 void svc_shutdown_net(struct svc_serv *serv, struct net *net)
515 svc_close_net(serv, net);
517 if (serv->sv_ops->svo_shutdown)
518 serv->sv_ops->svo_shutdown(serv, net);
520 EXPORT_SYMBOL_GPL(svc_shutdown_net);
523 * Destroy an RPC service. Should be called with appropriate locking to
524 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
527 svc_destroy(struct svc_serv *serv)
529 dprintk("svc: svc_destroy(%s, %d)\n",
530 serv->sv_program->pg_name,
533 if (serv->sv_nrthreads) {
534 if (--(serv->sv_nrthreads) != 0) {
535 svc_sock_update_bufs(serv);
539 printk("svc_destroy: no threads for serv=%p!\n", serv);
541 del_timer_sync(&serv->sv_temptimer);
544 * The last user is gone and thus all sockets have to be destroyed to
545 * the point. Check this.
547 BUG_ON(!list_empty(&serv->sv_permsocks));
548 BUG_ON(!list_empty(&serv->sv_tempsocks));
550 cache_clean_deferred(serv);
552 if (svc_serv_is_pooled(serv))
555 kfree(serv->sv_pools);
558 EXPORT_SYMBOL_GPL(svc_destroy);
561 * Allocate an RPC server's buffer space.
562 * We allocate pages and place them in rq_argpages.
565 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
567 unsigned int pages, arghi;
569 /* bc_xprt uses fore channel allocated buffers */
570 if (svc_is_backchannel(rqstp))
573 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
574 * We assume one is at most one page
577 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
578 if (pages > RPCSVC_MAXPAGES)
579 pages = RPCSVC_MAXPAGES;
581 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
584 rqstp->rq_pages[arghi++] = p;
591 * Release an RPC server buffer
594 svc_release_buffer(struct svc_rqst *rqstp)
598 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
599 if (rqstp->rq_pages[i])
600 put_page(rqstp->rq_pages[i]);
604 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
606 struct svc_rqst *rqstp;
608 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
612 __set_bit(RQ_BUSY, &rqstp->rq_flags);
613 spin_lock_init(&rqstp->rq_lock);
614 rqstp->rq_server = serv;
615 rqstp->rq_pool = pool;
617 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
621 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
625 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
630 svc_rqst_free(rqstp);
633 EXPORT_SYMBOL_GPL(svc_rqst_alloc);
636 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
638 struct svc_rqst *rqstp;
640 rqstp = svc_rqst_alloc(serv, pool, node);
642 return ERR_PTR(-ENOMEM);
644 serv->sv_nrthreads++;
645 spin_lock_bh(&pool->sp_lock);
646 pool->sp_nrthreads++;
647 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
648 spin_unlock_bh(&pool->sp_lock);
651 EXPORT_SYMBOL_GPL(svc_prepare_thread);
654 * Choose a pool in which to create a new thread, for svc_set_num_threads
656 static inline struct svc_pool *
657 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
662 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
666 * Choose a thread to kill, for svc_set_num_threads
668 static inline struct task_struct *
669 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
672 struct task_struct *task = NULL;
675 spin_lock_bh(&pool->sp_lock);
677 /* choose a pool in round-robin fashion */
678 for (i = 0; i < serv->sv_nrpools; i++) {
679 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
680 spin_lock_bh(&pool->sp_lock);
681 if (!list_empty(&pool->sp_all_threads))
683 spin_unlock_bh(&pool->sp_lock);
689 if (!list_empty(&pool->sp_all_threads)) {
690 struct svc_rqst *rqstp;
693 * Remove from the pool->sp_all_threads list
694 * so we don't try to kill it again.
696 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
697 set_bit(RQ_VICTIM, &rqstp->rq_flags);
698 list_del_rcu(&rqstp->rq_all);
699 task = rqstp->rq_task;
701 spin_unlock_bh(&pool->sp_lock);
706 /* create new threads */
708 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
710 struct svc_rqst *rqstp;
711 struct task_struct *task;
712 struct svc_pool *chosen_pool;
713 unsigned int state = serv->sv_nrthreads-1;
718 chosen_pool = choose_pool(serv, pool, &state);
720 node = svc_pool_map_get_node(chosen_pool->sp_id);
721 rqstp = svc_prepare_thread(serv, chosen_pool, node);
723 return PTR_ERR(rqstp);
725 __module_get(serv->sv_ops->svo_module);
726 task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
727 node, "%s", serv->sv_name);
729 module_put(serv->sv_ops->svo_module);
730 svc_exit_thread(rqstp);
731 return PTR_ERR(task);
734 rqstp->rq_task = task;
735 if (serv->sv_nrpools > 1)
736 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
738 svc_sock_update_bufs(serv);
739 wake_up_process(task);
740 } while (nrservs > 0);
746 /* destroy old threads */
748 svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
750 struct task_struct *task;
751 unsigned int state = serv->sv_nrthreads-1;
753 /* destroy old threads */
755 task = choose_victim(serv, pool, &state);
758 send_sig(SIGINT, task, 1);
760 } while (nrservs < 0);
766 * Create or destroy enough new threads to make the number
767 * of threads the given number. If `pool' is non-NULL, applies
768 * only to threads in that pool, otherwise round-robins between
769 * all pools. Caller must ensure that mutual exclusion between this and
770 * server startup or shutdown.
772 * Destroying threads relies on the service threads filling in
773 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
774 * has been created using svc_create_pooled().
776 * Based on code that used to be in nfsd_svc() but tweaked
780 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
783 /* The -1 assumes caller has done a svc_get() */
784 nrservs -= (serv->sv_nrthreads-1);
786 spin_lock_bh(&pool->sp_lock);
787 nrservs -= pool->sp_nrthreads;
788 spin_unlock_bh(&pool->sp_lock);
792 return svc_start_kthreads(serv, pool, nrservs);
794 return svc_signal_kthreads(serv, pool, nrservs);
797 EXPORT_SYMBOL_GPL(svc_set_num_threads);
799 /* destroy old threads */
801 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
803 struct task_struct *task;
804 unsigned int state = serv->sv_nrthreads-1;
806 /* destroy old threads */
808 task = choose_victim(serv, pool, &state);
813 } while (nrservs < 0);
818 svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
821 /* The -1 assumes caller has done a svc_get() */
822 nrservs -= (serv->sv_nrthreads-1);
824 spin_lock_bh(&pool->sp_lock);
825 nrservs -= pool->sp_nrthreads;
826 spin_unlock_bh(&pool->sp_lock);
830 return svc_start_kthreads(serv, pool, nrservs);
832 return svc_stop_kthreads(serv, pool, nrservs);
835 EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
838 * Called from a server thread as it's exiting. Caller must hold the "service
839 * mutex" for the service.
842 svc_rqst_free(struct svc_rqst *rqstp)
844 svc_release_buffer(rqstp);
845 kfree(rqstp->rq_resp);
846 kfree(rqstp->rq_argp);
847 kfree(rqstp->rq_auth_data);
848 kfree_rcu(rqstp, rq_rcu_head);
850 EXPORT_SYMBOL_GPL(svc_rqst_free);
853 svc_exit_thread(struct svc_rqst *rqstp)
855 struct svc_serv *serv = rqstp->rq_server;
856 struct svc_pool *pool = rqstp->rq_pool;
858 spin_lock_bh(&pool->sp_lock);
859 pool->sp_nrthreads--;
860 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
861 list_del_rcu(&rqstp->rq_all);
862 spin_unlock_bh(&pool->sp_lock);
864 svc_rqst_free(rqstp);
866 /* Release the server */
870 EXPORT_SYMBOL_GPL(svc_exit_thread);
873 * Register an "inet" protocol family netid with the local
874 * rpcbind daemon via an rpcbind v4 SET request.
876 * No netconfig infrastructure is available in the kernel, so
877 * we map IP_ protocol numbers to netids by hand.
879 * Returns zero on success; a negative errno value is returned
880 * if any error occurs.
882 static int __svc_rpcb_register4(struct net *net, const u32 program,
884 const unsigned short protocol,
885 const unsigned short port)
887 const struct sockaddr_in sin = {
888 .sin_family = AF_INET,
889 .sin_addr.s_addr = htonl(INADDR_ANY),
890 .sin_port = htons(port),
897 netid = RPCBIND_NETID_UDP;
900 netid = RPCBIND_NETID_TCP;
906 error = rpcb_v4_register(net, program, version,
907 (const struct sockaddr *)&sin, netid);
910 * User space didn't support rpcbind v4, so retry this
911 * registration request with the legacy rpcbind v2 protocol.
913 if (error == -EPROTONOSUPPORT)
914 error = rpcb_register(net, program, version, protocol, port);
919 #if IS_ENABLED(CONFIG_IPV6)
921 * Register an "inet6" protocol family netid with the local
922 * rpcbind daemon via an rpcbind v4 SET request.
924 * No netconfig infrastructure is available in the kernel, so
925 * we map IP_ protocol numbers to netids by hand.
927 * Returns zero on success; a negative errno value is returned
928 * if any error occurs.
930 static int __svc_rpcb_register6(struct net *net, const u32 program,
932 const unsigned short protocol,
933 const unsigned short port)
935 const struct sockaddr_in6 sin6 = {
936 .sin6_family = AF_INET6,
937 .sin6_addr = IN6ADDR_ANY_INIT,
938 .sin6_port = htons(port),
945 netid = RPCBIND_NETID_UDP6;
948 netid = RPCBIND_NETID_TCP6;
954 error = rpcb_v4_register(net, program, version,
955 (const struct sockaddr *)&sin6, netid);
958 * User space didn't support rpcbind version 4, so we won't
959 * use a PF_INET6 listener.
961 if (error == -EPROTONOSUPPORT)
962 error = -EAFNOSUPPORT;
966 #endif /* IS_ENABLED(CONFIG_IPV6) */
969 * Register a kernel RPC service via rpcbind version 4.
971 * Returns zero on success; a negative errno value is returned
972 * if any error occurs.
974 static int __svc_register(struct net *net, const char *progname,
975 const u32 program, const u32 version,
977 const unsigned short protocol,
978 const unsigned short port)
980 int error = -EAFNOSUPPORT;
984 error = __svc_rpcb_register4(net, program, version,
987 #if IS_ENABLED(CONFIG_IPV6)
989 error = __svc_rpcb_register6(net, program, version,
997 int svc_rpcbind_set_version(struct net *net,
998 const struct svc_program *progp,
999 u32 version, int family,
1000 unsigned short proto,
1001 unsigned short port)
1003 dprintk("svc: svc_register(%sv%d, %s, %u, %u)\n",
1004 progp->pg_name, version,
1005 proto == IPPROTO_UDP? "udp" : "tcp",
1008 return __svc_register(net, progp->pg_name, progp->pg_prog,
1009 version, family, proto, port);
1012 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
1014 int svc_generic_rpcbind_set(struct net *net,
1015 const struct svc_program *progp,
1016 u32 version, int family,
1017 unsigned short proto,
1018 unsigned short port)
1020 const struct svc_version *vers = progp->pg_vers[version];
1026 if (vers->vs_hidden) {
1027 dprintk("svc: svc_register(%sv%d, %s, %u, %u)"
1028 " (but not telling portmap)\n",
1029 progp->pg_name, version,
1030 proto == IPPROTO_UDP? "udp" : "tcp",
1036 * Don't register a UDP port if we need congestion
1039 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1042 error = svc_rpcbind_set_version(net, progp, version,
1043 family, proto, port);
1045 return (vers->vs_rpcb_optnl) ? 0 : error;
1047 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1050 * svc_register - register an RPC service with the local portmapper
1051 * @serv: svc_serv struct for the service to register
1052 * @net: net namespace for the service to register
1053 * @family: protocol family of service's listener socket
1054 * @proto: transport protocol number to advertise
1055 * @port: port to advertise
1057 * Service is registered for any address in the passed-in protocol family
1059 int svc_register(const struct svc_serv *serv, struct net *net,
1060 const int family, const unsigned short proto,
1061 const unsigned short port)
1063 struct svc_program *progp;
1067 WARN_ON_ONCE(proto == 0 && port == 0);
1068 if (proto == 0 && port == 0)
1071 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1072 for (i = 0; i < progp->pg_nvers; i++) {
1074 error = progp->pg_rpcbind_set(net, progp, i,
1075 family, proto, port);
1077 printk(KERN_WARNING "svc: failed to register "
1078 "%sv%u RPC service (errno %d).\n",
1079 progp->pg_name, i, -error);
1089 * If user space is running rpcbind, it should take the v4 UNSET
1090 * and clear everything for this [program, version]. If user space
1091 * is running portmap, it will reject the v4 UNSET, but won't have
1092 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
1093 * in this case to clear all existing entries for [program, version].
1095 static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1096 const char *progname)
1100 error = rpcb_v4_register(net, program, version, NULL, "");
1103 * User space didn't support rpcbind v4, so retry this
1104 * request with the legacy rpcbind v2 protocol.
1106 if (error == -EPROTONOSUPPORT)
1107 error = rpcb_register(net, program, version, 0, 0);
1109 dprintk("svc: %s(%sv%u), error %d\n",
1110 __func__, progname, version, error);
1114 * All netids, bind addresses and ports registered for [program, version]
1115 * are removed from the local rpcbind database (if the service is not
1116 * hidden) to make way for a new instance of the service.
1118 * The result of unregistration is reported via dprintk for those who want
1119 * verification of the result, but is otherwise not important.
1121 static void svc_unregister(const struct svc_serv *serv, struct net *net)
1123 struct svc_program *progp;
1124 unsigned long flags;
1127 clear_thread_flag(TIF_SIGPENDING);
1129 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1130 for (i = 0; i < progp->pg_nvers; i++) {
1131 if (progp->pg_vers[i] == NULL)
1133 if (progp->pg_vers[i]->vs_hidden)
1136 dprintk("svc: attempting to unregister %sv%u\n",
1138 __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1142 spin_lock_irqsave(¤t->sighand->siglock, flags);
1143 recalc_sigpending();
1144 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
1148 * dprintk the given error with the address of the client that caused it.
1150 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1151 static __printf(2, 3)
1152 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1154 struct va_format vaf;
1156 char buf[RPC_MAX_ADDRBUFLEN];
1158 va_start(args, fmt);
1163 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1168 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1172 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
1174 set_bit(RQ_AUTHERR, &rqstp->rq_flags);
1177 EXPORT_SYMBOL_GPL(svc_return_autherr);
1180 svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
1182 if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
1188 svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
1190 struct kvec *argv = &rqstp->rq_arg.head[0];
1191 struct kvec *resv = &rqstp->rq_res.head[0];
1192 const struct svc_procedure *procp = rqstp->rq_procinfo;
1196 * XXX: why do we ignore the return value?
1198 if (procp->pc_decode &&
1199 !procp->pc_decode(rqstp, argv->iov_base)) {
1200 *statp = rpc_garbage_args;
1204 *statp = procp->pc_func(rqstp);
1206 if (*statp == rpc_drop_reply ||
1207 test_bit(RQ_DROPME, &rqstp->rq_flags))
1210 if (test_bit(RQ_AUTHERR, &rqstp->rq_flags))
1213 if (*statp != rpc_success)
1217 if (procp->pc_encode &&
1218 !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
1219 dprintk("svc: failed to encode reply\n");
1220 /* serv->sv_stats->rpcsystemerr++; */
1221 *statp = rpc_system_err;
1227 svc_generic_init_request(struct svc_rqst *rqstp,
1228 const struct svc_program *progp,
1229 struct svc_process_info *ret)
1231 const struct svc_version *versp = NULL; /* compiler food */
1232 const struct svc_procedure *procp = NULL;
1234 if (rqstp->rq_vers >= progp->pg_nvers )
1236 versp = progp->pg_vers[rqstp->rq_vers];
1241 * Some protocol versions (namely NFSv4) require some form of
1242 * congestion control. (See RFC 7530 section 3.1 paragraph 2)
1243 * In other words, UDP is not allowed. We mark those when setting
1244 * up the svc_xprt, and verify that here.
1246 * The spec is not very clear about what error should be returned
1247 * when someone tries to access a server that is listening on UDP
1248 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1251 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1252 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1255 if (rqstp->rq_proc >= versp->vs_nproc)
1257 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1261 /* Initialize storage for argp and resp */
1262 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1263 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1265 /* Bump per-procedure stats counter */
1266 versp->vs_count[rqstp->rq_proc]++;
1268 ret->dispatch = versp->vs_dispatch;
1271 ret->mismatch.lovers = progp->pg_lovers;
1272 ret->mismatch.hivers = progp->pg_hivers;
1273 return rpc_prog_mismatch;
1275 return rpc_proc_unavail;
1277 EXPORT_SYMBOL_GPL(svc_generic_init_request);
1280 * Common routine for processing the RPC request.
1283 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1285 struct svc_program *progp;
1286 const struct svc_procedure *procp = NULL;
1287 struct svc_serv *serv = rqstp->rq_server;
1288 struct svc_process_info process;
1291 __be32 auth_stat, rpc_stat;
1293 __be32 *reply_statp;
1295 rpc_stat = rpc_success;
1297 if (argv->iov_len < 6*4)
1300 /* Will be turned off by GSS integrity and privacy services */
1301 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1302 /* Will be turned off only when NFSv4 Sessions are used */
1303 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1304 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1306 svc_putu32(resv, rqstp->rq_xid);
1308 vers = svc_getnl(argv);
1310 /* First words of reply: */
1311 svc_putnl(resv, 1); /* REPLY */
1313 if (vers != 2) /* RPC version number */
1316 /* Save position in case we later decide to reject: */
1317 reply_statp = resv->iov_base + resv->iov_len;
1319 svc_putnl(resv, 0); /* ACCEPT */
1321 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1322 rqstp->rq_vers = svc_getnl(argv); /* version number */
1323 rqstp->rq_proc = svc_getnl(argv); /* procedure number */
1325 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1326 if (prog == progp->pg_prog)
1330 * Decode auth data, and add verifier to reply buffer.
1331 * We do this before anything else in order to get a decent
1334 auth_res = svc_authenticate(rqstp, &auth_stat);
1335 /* Also give the program a chance to reject this call: */
1336 if (auth_res == SVC_OK && progp) {
1337 auth_stat = rpc_autherr_badcred;
1338 auth_res = progp->pg_authenticate(rqstp);
1340 if (auth_res != SVC_OK)
1341 trace_svc_authenticate(rqstp, auth_res, auth_stat);
1348 rpc_stat = rpc_system_err;
1363 rpc_stat = progp->pg_init_request(rqstp, progp, &process);
1367 case rpc_prog_unavail:
1369 case rpc_prog_mismatch:
1371 case rpc_proc_unavail:
1375 procp = rqstp->rq_procinfo;
1376 /* Should this check go into the dispatcher? */
1377 if (!procp || !procp->pc_func)
1380 /* Syntactic check complete */
1381 serv->sv_stats->rpccnt++;
1382 trace_svc_process(rqstp, progp->pg_name);
1384 /* Build the reply header. */
1385 statp = resv->iov_base +resv->iov_len;
1386 svc_putnl(resv, RPC_SUCCESS);
1388 /* un-reserve some of the out-queue now that we have a
1389 * better idea of reply size
1391 if (procp->pc_xdrressize)
1392 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1394 /* Call the function that processes the request. */
1395 if (!process.dispatch) {
1396 if (!svc_generic_dispatch(rqstp, statp))
1397 goto release_dropit;
1398 if (*statp == rpc_garbage_args)
1400 auth_stat = svc_get_autherr(rqstp, statp);
1401 if (auth_stat != rpc_auth_ok)
1402 goto err_release_bad_auth;
1404 dprintk("svc: calling dispatcher\n");
1405 if (!process.dispatch(rqstp, statp))
1406 goto release_dropit; /* Release reply info */
1409 /* Check RPC status result */
1410 if (*statp != rpc_success)
1411 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1413 /* Release reply info */
1414 if (procp->pc_release)
1415 procp->pc_release(rqstp);
1417 if (procp->pc_encode == NULL)
1421 if (svc_authorise(rqstp))
1423 return 1; /* Caller can now send it */
1426 if (procp->pc_release)
1427 procp->pc_release(rqstp);
1429 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1430 dprintk("svc: svc_process dropit\n");
1434 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1435 svc_close_xprt(rqstp->rq_xprt);
1436 dprintk("svc: svc_process close\n");
1440 svc_printk(rqstp, "short len %zd, dropping request\n",
1445 serv->sv_stats->rpcbadfmt++;
1446 svc_putnl(resv, 1); /* REJECT */
1447 svc_putnl(resv, 0); /* RPC_MISMATCH */
1448 svc_putnl(resv, 2); /* Only RPCv2 supported */
1452 err_release_bad_auth:
1453 if (procp->pc_release)
1454 procp->pc_release(rqstp);
1456 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1457 serv->sv_stats->rpcbadauth++;
1458 /* Restore write pointer to location of accept status: */
1459 xdr_ressize_check(rqstp, reply_statp);
1460 svc_putnl(resv, 1); /* REJECT */
1461 svc_putnl(resv, 1); /* AUTH_ERROR */
1462 svc_putnl(resv, ntohl(auth_stat)); /* status */
1466 dprintk("svc: unknown program %d\n", prog);
1467 serv->sv_stats->rpcbadfmt++;
1468 svc_putnl(resv, RPC_PROG_UNAVAIL);
1472 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1473 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1475 serv->sv_stats->rpcbadfmt++;
1476 svc_putnl(resv, RPC_PROG_MISMATCH);
1477 svc_putnl(resv, process.mismatch.lovers);
1478 svc_putnl(resv, process.mismatch.hivers);
1482 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1484 serv->sv_stats->rpcbadfmt++;
1485 svc_putnl(resv, RPC_PROC_UNAVAIL);
1489 svc_printk(rqstp, "failed to decode args\n");
1491 rpc_stat = rpc_garbage_args;
1493 serv->sv_stats->rpcbadfmt++;
1494 svc_putnl(resv, ntohl(rpc_stat));
1499 * Process the RPC request.
1502 svc_process(struct svc_rqst *rqstp)
1504 struct kvec *argv = &rqstp->rq_arg.head[0];
1505 struct kvec *resv = &rqstp->rq_res.head[0];
1506 struct svc_serv *serv = rqstp->rq_server;
1510 * Setup response xdr_buf.
1511 * Initially it has just one page
1513 rqstp->rq_next_page = &rqstp->rq_respages[1];
1514 resv->iov_base = page_address(rqstp->rq_respages[0]);
1516 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1517 rqstp->rq_res.len = 0;
1518 rqstp->rq_res.page_base = 0;
1519 rqstp->rq_res.page_len = 0;
1520 rqstp->rq_res.buflen = PAGE_SIZE;
1521 rqstp->rq_res.tail[0].iov_base = NULL;
1522 rqstp->rq_res.tail[0].iov_len = 0;
1524 dir = svc_getnl(argv);
1526 /* direction != CALL */
1527 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1528 serv->sv_stats->rpcbadfmt++;
1532 /* Returns 1 for send, 0 for drop */
1533 if (likely(svc_process_common(rqstp, argv, resv)))
1534 return svc_send(rqstp);
1540 EXPORT_SYMBOL_GPL(svc_process);
1542 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1544 * Process a backchannel RPC request that arrived over an existing
1545 * outbound connection
1548 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1549 struct svc_rqst *rqstp)
1551 struct kvec *argv = &rqstp->rq_arg.head[0];
1552 struct kvec *resv = &rqstp->rq_res.head[0];
1553 struct rpc_task *task;
1557 dprintk("svc: %s(%p)\n", __func__, req);
1559 /* Build the svc_rqst used by the common processing routine */
1560 rqstp->rq_xid = req->rq_xid;
1561 rqstp->rq_prot = req->rq_xprt->prot;
1562 rqstp->rq_server = serv;
1563 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1565 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1566 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1567 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1568 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1570 /* Adjust the argument buffer length */
1571 rqstp->rq_arg.len = req->rq_private_buf.len;
1572 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1573 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1574 rqstp->rq_arg.page_len = 0;
1575 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1576 rqstp->rq_arg.page_len)
1577 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1578 rqstp->rq_arg.head[0].iov_len;
1580 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1581 rqstp->rq_arg.page_len;
1583 /* reset result send buffer "put" position */
1587 * Skip the next two words because they've already been
1588 * processed in the transport
1590 svc_getu32(argv); /* XID */
1591 svc_getnl(argv); /* CALLDIR */
1593 /* Parse and execute the bc call */
1594 proc_error = svc_process_common(rqstp, argv, resv);
1596 atomic_dec(&req->rq_xprt->bc_slot_count);
1598 /* Processing error: drop the request */
1599 xprt_free_bc_request(req);
1603 /* Finally, send the reply synchronously */
1604 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1605 task = rpc_run_bc_task(req);
1607 error = PTR_ERR(task);
1611 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1612 error = task->tk_status;
1616 dprintk("svc: %s(), error=%d\n", __func__, error);
1619 EXPORT_SYMBOL_GPL(bc_svc_process);
1620 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1623 * Return (transport-specific) limit on the rpc payload.
1625 u32 svc_max_payload(const struct svc_rqst *rqstp)
1627 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1629 if (rqstp->rq_server->sv_max_payload < max)
1630 max = rqstp->rq_server->sv_max_payload;
1633 EXPORT_SYMBOL_GPL(svc_max_payload);
1636 * svc_encode_read_payload - mark a range of bytes as a READ payload
1637 * @rqstp: svc_rqst to operate on
1638 * @offset: payload's byte offset in rqstp->rq_res
1639 * @length: size of payload, in bytes
1641 * Returns zero on success, or a negative errno if a permanent
1644 int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
1645 unsigned int length)
1647 return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
1649 EXPORT_SYMBOL_GPL(svc_encode_read_payload);
1652 * svc_fill_write_vector - Construct data argument for VFS write call
1653 * @rqstp: svc_rqst to operate on
1654 * @pages: list of pages containing data payload
1655 * @first: buffer containing first section of write payload
1656 * @total: total number of bytes of write payload
1658 * Fills in rqstp::rq_vec, and returns the number of elements.
1660 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
1661 struct kvec *first, size_t total)
1663 struct kvec *vec = rqstp->rq_vec;
1666 /* Some types of transport can present the write payload
1667 * entirely in rq_arg.pages. In this case, @first is empty.
1670 if (first->iov_len) {
1671 vec[i].iov_base = first->iov_base;
1672 vec[i].iov_len = min_t(size_t, total, first->iov_len);
1673 total -= vec[i].iov_len;
1678 vec[i].iov_base = page_address(*pages);
1679 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
1680 total -= vec[i].iov_len;
1685 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1688 EXPORT_SYMBOL_GPL(svc_fill_write_vector);
1691 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1692 * @rqstp: svc_rqst to operate on
1693 * @first: buffer containing first section of pathname
1694 * @p: buffer containing remaining section of pathname
1695 * @total: total length of the pathname argument
1697 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1698 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1699 * the returned string.
1701 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1702 void *p, size_t total)
1704 size_t len, remaining;
1707 result = kmalloc(total + 1, GFP_KERNEL);
1709 return ERR_PTR(-ESERVERFAULT);
1714 len = min_t(size_t, total, first->iov_len);
1716 memcpy(dst, first->iov_base, len);
1722 len = min_t(size_t, remaining, PAGE_SIZE);
1723 memcpy(dst, p, len);
1729 /* Sanity check: Linux doesn't allow the pathname argument to
1730 * contain a NUL byte.
1732 if (strlen(result) != total) {
1734 return ERR_PTR(-EINVAL);
1738 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);