2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22 #include <linux/slab.h>
24 #include <linux/sunrpc/types.h>
25 #include <linux/sunrpc/xdr.h>
26 #include <linux/sunrpc/stats.h>
27 #include <linux/sunrpc/svcsock.h>
28 #include <linux/sunrpc/clnt.h>
29 #include <linux/sunrpc/bc_xprt.h>
31 #define RPCDBG_FACILITY RPCDBG_SVCDSP
33 static void svc_unregister(const struct svc_serv *serv);
35 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
38 * Mode for mapping cpus to pools.
41 SVC_POOL_AUTO = -1, /* choose one of the others */
42 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
43 * (legacy & UP mode) */
44 SVC_POOL_PERCPU, /* one pool per cpu */
45 SVC_POOL_PERNODE /* one pool per numa node */
47 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
50 * Structure for mapping cpus to pools and vice versa.
51 * Setup once during sunrpc initialisation.
53 static struct svc_pool_map {
54 int count; /* How many svc_servs use us */
55 int mode; /* Note: int not enum to avoid
56 * warnings about "enumeration value
57 * not handled in switch" */
59 unsigned int *pool_to; /* maps pool id to cpu or node */
60 unsigned int *to_pool; /* maps cpu or node to pool id */
63 .mode = SVC_POOL_DEFAULT
65 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
68 param_set_pool_mode(const char *val, struct kernel_param *kp)
70 int *ip = (int *)kp->arg;
71 struct svc_pool_map *m = &svc_pool_map;
74 mutex_lock(&svc_pool_map_mutex);
81 if (!strncmp(val, "auto", 4))
83 else if (!strncmp(val, "global", 6))
84 *ip = SVC_POOL_GLOBAL;
85 else if (!strncmp(val, "percpu", 6))
86 *ip = SVC_POOL_PERCPU;
87 else if (!strncmp(val, "pernode", 7))
88 *ip = SVC_POOL_PERNODE;
93 mutex_unlock(&svc_pool_map_mutex);
98 param_get_pool_mode(char *buf, struct kernel_param *kp)
100 int *ip = (int *)kp->arg;
105 return strlcpy(buf, "auto", 20);
106 case SVC_POOL_GLOBAL:
107 return strlcpy(buf, "global", 20);
108 case SVC_POOL_PERCPU:
109 return strlcpy(buf, "percpu", 20);
110 case SVC_POOL_PERNODE:
111 return strlcpy(buf, "pernode", 20);
113 return sprintf(buf, "%d", *ip);
117 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
118 &svc_pool_map.mode, 0644);
121 * Detect best pool mapping mode heuristically,
122 * according to the machine's topology.
125 svc_pool_map_choose_mode(void)
129 if (nr_online_nodes > 1) {
131 * Actually have multiple NUMA nodes,
132 * so split pools on NUMA node boundaries
134 return SVC_POOL_PERNODE;
137 node = first_online_node;
138 if (nr_cpus_node(node) > 2) {
140 * Non-trivial SMP, or CONFIG_NUMA on
141 * non-NUMA hardware, e.g. with a generic
142 * x86_64 kernel on Xeons. In this case we
143 * want to divide the pools on cpu boundaries.
145 return SVC_POOL_PERCPU;
148 /* default: one global pool */
149 return SVC_POOL_GLOBAL;
153 * Allocate the to_pool[] and pool_to[] arrays.
154 * Returns 0 on success or an errno.
157 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
159 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
162 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
176 * Initialise the pool map for SVC_POOL_PERCPU mode.
177 * Returns number of pools or <0 on error.
180 svc_pool_map_init_percpu(struct svc_pool_map *m)
182 unsigned int maxpools = nr_cpu_ids;
183 unsigned int pidx = 0;
187 err = svc_pool_map_alloc_arrays(m, maxpools);
191 for_each_online_cpu(cpu) {
192 BUG_ON(pidx > maxpools);
193 m->to_pool[cpu] = pidx;
194 m->pool_to[pidx] = cpu;
197 /* cpus brought online later all get mapped to pool0, sorry */
204 * Initialise the pool map for SVC_POOL_PERNODE mode.
205 * Returns number of pools or <0 on error.
208 svc_pool_map_init_pernode(struct svc_pool_map *m)
210 unsigned int maxpools = nr_node_ids;
211 unsigned int pidx = 0;
215 err = svc_pool_map_alloc_arrays(m, maxpools);
219 for_each_node_with_cpus(node) {
220 /* some architectures (e.g. SN2) have cpuless nodes */
221 BUG_ON(pidx > maxpools);
222 m->to_pool[node] = pidx;
223 m->pool_to[pidx] = node;
226 /* nodes brought online later all get mapped to pool0, sorry */
233 * Add a reference to the global map of cpus to pools (and
234 * vice versa). Initialise the map if we're the first user.
235 * Returns the number of pools.
238 svc_pool_map_get(void)
240 struct svc_pool_map *m = &svc_pool_map;
243 mutex_lock(&svc_pool_map_mutex);
246 mutex_unlock(&svc_pool_map_mutex);
250 if (m->mode == SVC_POOL_AUTO)
251 m->mode = svc_pool_map_choose_mode();
254 case SVC_POOL_PERCPU:
255 npools = svc_pool_map_init_percpu(m);
257 case SVC_POOL_PERNODE:
258 npools = svc_pool_map_init_pernode(m);
263 /* default, or memory allocation failure */
265 m->mode = SVC_POOL_GLOBAL;
269 mutex_unlock(&svc_pool_map_mutex);
275 * Drop a reference to the global map of cpus to pools.
276 * When the last reference is dropped, the map data is
277 * freed; this allows the sysadmin to change the pool
278 * mode using the pool_mode module option without
279 * rebooting or re-loading sunrpc.ko.
282 svc_pool_map_put(void)
284 struct svc_pool_map *m = &svc_pool_map;
286 mutex_lock(&svc_pool_map_mutex);
289 m->mode = SVC_POOL_DEFAULT;
297 mutex_unlock(&svc_pool_map_mutex);
301 static int svc_pool_map_get_node(unsigned int pidx)
303 const struct svc_pool_map *m = &svc_pool_map;
306 if (m->mode == SVC_POOL_PERCPU)
307 return cpu_to_node(m->pool_to[pidx]);
308 if (m->mode == SVC_POOL_PERNODE)
309 return m->pool_to[pidx];
314 * Set the given thread's cpus_allowed mask so that it
315 * will only run on cpus in the given pool.
318 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
320 struct svc_pool_map *m = &svc_pool_map;
321 unsigned int node = m->pool_to[pidx];
324 * The caller checks for sv_nrpools > 1, which
325 * implies that we've been initialized.
327 BUG_ON(m->count == 0);
330 case SVC_POOL_PERCPU:
332 set_cpus_allowed_ptr(task, cpumask_of(node));
335 case SVC_POOL_PERNODE:
337 set_cpus_allowed_ptr(task, cpumask_of_node(node));
344 * Use the mapping mode to choose a pool for a given CPU.
345 * Used when enqueueing an incoming RPC. Always returns
346 * a non-NULL pool pointer.
349 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
351 struct svc_pool_map *m = &svc_pool_map;
352 unsigned int pidx = 0;
355 * An uninitialised map happens in a pure client when
356 * lockd is brought up, so silently treat it the
357 * same as SVC_POOL_GLOBAL.
359 if (svc_serv_is_pooled(serv)) {
361 case SVC_POOL_PERCPU:
362 pidx = m->to_pool[cpu];
364 case SVC_POOL_PERNODE:
365 pidx = m->to_pool[cpu_to_node(cpu)];
369 return &serv->sv_pools[pidx % serv->sv_nrpools];
372 static int svc_rpcb_setup(struct svc_serv *serv)
376 err = rpcb_create_local();
380 /* Remove any stale portmap registrations */
381 svc_unregister(serv);
385 void svc_rpcb_cleanup(struct svc_serv *serv)
387 svc_unregister(serv);
390 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
392 static int svc_uses_rpcbind(struct svc_serv *serv)
394 struct svc_program *progp;
397 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
398 for (i = 0; i < progp->pg_nvers; i++) {
399 if (progp->pg_vers[i] == NULL)
401 if (progp->pg_vers[i]->vs_hidden == 0)
410 * Create an RPC service
412 static struct svc_serv *
413 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
414 void (*shutdown)(struct svc_serv *serv))
416 struct svc_serv *serv;
418 unsigned int xdrsize;
421 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
423 serv->sv_name = prog->pg_name;
424 serv->sv_program = prog;
425 serv->sv_nrthreads = 1;
426 serv->sv_stats = prog->pg_stats;
427 if (bufsize > RPCSVC_MAXPAYLOAD)
428 bufsize = RPCSVC_MAXPAYLOAD;
429 serv->sv_max_payload = bufsize? bufsize : 4096;
430 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
431 serv->sv_shutdown = shutdown;
434 prog->pg_lovers = prog->pg_nvers-1;
435 for (vers=0; vers<prog->pg_nvers ; vers++)
436 if (prog->pg_vers[vers]) {
437 prog->pg_hivers = vers;
438 if (prog->pg_lovers > vers)
439 prog->pg_lovers = vers;
440 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
441 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
443 prog = prog->pg_next;
445 serv->sv_xdrsize = xdrsize;
446 INIT_LIST_HEAD(&serv->sv_tempsocks);
447 INIT_LIST_HEAD(&serv->sv_permsocks);
448 init_timer(&serv->sv_temptimer);
449 spin_lock_init(&serv->sv_lock);
451 serv->sv_nrpools = npools;
453 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
455 if (!serv->sv_pools) {
460 for (i = 0; i < serv->sv_nrpools; i++) {
461 struct svc_pool *pool = &serv->sv_pools[i];
463 dprintk("svc: initialising pool %u for %s\n",
467 INIT_LIST_HEAD(&pool->sp_threads);
468 INIT_LIST_HEAD(&pool->sp_sockets);
469 INIT_LIST_HEAD(&pool->sp_all_threads);
470 spin_lock_init(&pool->sp_lock);
473 if (svc_uses_rpcbind(serv)) {
474 if (svc_rpcb_setup(serv) < 0) {
475 kfree(serv->sv_pools);
479 if (!serv->sv_shutdown)
480 serv->sv_shutdown = svc_rpcb_cleanup;
487 svc_create(struct svc_program *prog, unsigned int bufsize,
488 void (*shutdown)(struct svc_serv *serv))
490 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
492 EXPORT_SYMBOL_GPL(svc_create);
495 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
496 void (*shutdown)(struct svc_serv *serv),
497 svc_thread_fn func, struct module *mod)
499 struct svc_serv *serv;
500 unsigned int npools = svc_pool_map_get();
502 serv = __svc_create(prog, bufsize, npools, shutdown);
505 serv->sv_function = func;
506 serv->sv_module = mod;
511 EXPORT_SYMBOL_GPL(svc_create_pooled);
514 * Destroy an RPC service. Should be called with appropriate locking to
515 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
518 svc_destroy(struct svc_serv *serv)
520 dprintk("svc: svc_destroy(%s, %d)\n",
521 serv->sv_program->pg_name,
524 if (serv->sv_nrthreads) {
525 if (--(serv->sv_nrthreads) != 0) {
526 svc_sock_update_bufs(serv);
530 printk("svc_destroy: no threads for serv=%p!\n", serv);
532 del_timer_sync(&serv->sv_temptimer);
534 * The set of xprts (contained in the sv_tempsocks and
535 * sv_permsocks lists) is now constant, since it is modified
536 * only by accepting new sockets (done by service threads in
537 * svc_recv) or aging old ones (done by sv_temptimer), or
538 * configuration changes (excluded by whatever locking the
539 * caller is using--nfsd_mutex in the case of nfsd). So it's
540 * safe to traverse those lists and shut everything down:
544 if (serv->sv_shutdown)
545 serv->sv_shutdown(serv);
547 cache_clean_deferred(serv);
549 if (svc_serv_is_pooled(serv))
552 kfree(serv->sv_pools);
555 EXPORT_SYMBOL_GPL(svc_destroy);
558 * Allocate an RPC server's buffer space.
559 * We allocate pages and place them in rq_argpages.
562 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
564 unsigned int pages, arghi;
566 /* bc_xprt uses fore channel allocated buffers */
567 if (svc_is_backchannel(rqstp))
570 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
571 * We assume one is at most one page
574 BUG_ON(pages > RPCSVC_MAXPAGES);
576 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
579 rqstp->rq_pages[arghi++] = p;
586 * Release an RPC server buffer
589 svc_release_buffer(struct svc_rqst *rqstp)
593 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
594 if (rqstp->rq_pages[i])
595 put_page(rqstp->rq_pages[i]);
599 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
601 struct svc_rqst *rqstp;
603 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
607 init_waitqueue_head(&rqstp->rq_wait);
609 serv->sv_nrthreads++;
610 spin_lock_bh(&pool->sp_lock);
611 pool->sp_nrthreads++;
612 list_add(&rqstp->rq_all, &pool->sp_all_threads);
613 spin_unlock_bh(&pool->sp_lock);
614 rqstp->rq_server = serv;
615 rqstp->rq_pool = pool;
617 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
621 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
625 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
630 svc_exit_thread(rqstp);
632 return ERR_PTR(-ENOMEM);
634 EXPORT_SYMBOL_GPL(svc_prepare_thread);
637 * Choose a pool in which to create a new thread, for svc_set_num_threads
639 static inline struct svc_pool *
640 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
645 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
649 * Choose a thread to kill, for svc_set_num_threads
651 static inline struct task_struct *
652 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
655 struct task_struct *task = NULL;
658 spin_lock_bh(&pool->sp_lock);
660 /* choose a pool in round-robin fashion */
661 for (i = 0; i < serv->sv_nrpools; i++) {
662 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
663 spin_lock_bh(&pool->sp_lock);
664 if (!list_empty(&pool->sp_all_threads))
666 spin_unlock_bh(&pool->sp_lock);
672 if (!list_empty(&pool->sp_all_threads)) {
673 struct svc_rqst *rqstp;
676 * Remove from the pool->sp_all_threads list
677 * so we don't try to kill it again.
679 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
680 list_del_init(&rqstp->rq_all);
681 task = rqstp->rq_task;
683 spin_unlock_bh(&pool->sp_lock);
689 * Create or destroy enough new threads to make the number
690 * of threads the given number. If `pool' is non-NULL, applies
691 * only to threads in that pool, otherwise round-robins between
692 * all pools. Must be called with a svc_get() reference and
693 * the BKL or another lock to protect access to svc_serv fields.
695 * Destroying threads relies on the service threads filling in
696 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
697 * has been created using svc_create_pooled().
699 * Based on code that used to be in nfsd_svc() but tweaked
703 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
705 struct svc_rqst *rqstp;
706 struct task_struct *task;
707 struct svc_pool *chosen_pool;
709 unsigned int state = serv->sv_nrthreads-1;
713 /* The -1 assumes caller has done a svc_get() */
714 nrservs -= (serv->sv_nrthreads-1);
716 spin_lock_bh(&pool->sp_lock);
717 nrservs -= pool->sp_nrthreads;
718 spin_unlock_bh(&pool->sp_lock);
721 /* create new threads */
722 while (nrservs > 0) {
724 chosen_pool = choose_pool(serv, pool, &state);
726 node = svc_pool_map_get_node(chosen_pool->sp_id);
727 rqstp = svc_prepare_thread(serv, chosen_pool, node);
729 error = PTR_ERR(rqstp);
733 __module_get(serv->sv_module);
734 task = kthread_create_on_node(serv->sv_function, rqstp,
735 node, serv->sv_name);
737 error = PTR_ERR(task);
738 module_put(serv->sv_module);
739 svc_exit_thread(rqstp);
743 rqstp->rq_task = task;
744 if (serv->sv_nrpools > 1)
745 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
747 svc_sock_update_bufs(serv);
748 wake_up_process(task);
750 /* destroy old threads */
751 while (nrservs < 0 &&
752 (task = choose_victim(serv, pool, &state)) != NULL) {
753 send_sig(SIGINT, task, 1);
759 EXPORT_SYMBOL_GPL(svc_set_num_threads);
762 * Called from a server thread as it's exiting. Caller must hold the BKL or
763 * the "service mutex", whichever is appropriate for the service.
766 svc_exit_thread(struct svc_rqst *rqstp)
768 struct svc_serv *serv = rqstp->rq_server;
769 struct svc_pool *pool = rqstp->rq_pool;
771 svc_release_buffer(rqstp);
772 kfree(rqstp->rq_resp);
773 kfree(rqstp->rq_argp);
774 kfree(rqstp->rq_auth_data);
776 spin_lock_bh(&pool->sp_lock);
777 pool->sp_nrthreads--;
778 list_del(&rqstp->rq_all);
779 spin_unlock_bh(&pool->sp_lock);
783 /* Release the server */
787 EXPORT_SYMBOL_GPL(svc_exit_thread);
790 * Register an "inet" protocol family netid with the local
791 * rpcbind daemon via an rpcbind v4 SET request.
793 * No netconfig infrastructure is available in the kernel, so
794 * we map IP_ protocol numbers to netids by hand.
796 * Returns zero on success; a negative errno value is returned
797 * if any error occurs.
799 static int __svc_rpcb_register4(const u32 program, const u32 version,
800 const unsigned short protocol,
801 const unsigned short port)
803 const struct sockaddr_in sin = {
804 .sin_family = AF_INET,
805 .sin_addr.s_addr = htonl(INADDR_ANY),
806 .sin_port = htons(port),
813 netid = RPCBIND_NETID_UDP;
816 netid = RPCBIND_NETID_TCP;
822 error = rpcb_v4_register(program, version,
823 (const struct sockaddr *)&sin, netid);
826 * User space didn't support rpcbind v4, so retry this
827 * registration request with the legacy rpcbind v2 protocol.
829 if (error == -EPROTONOSUPPORT)
830 error = rpcb_register(program, version, protocol, port);
835 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
837 * Register an "inet6" protocol family netid with the local
838 * rpcbind daemon via an rpcbind v4 SET request.
840 * No netconfig infrastructure is available in the kernel, so
841 * we map IP_ protocol numbers to netids by hand.
843 * Returns zero on success; a negative errno value is returned
844 * if any error occurs.
846 static int __svc_rpcb_register6(const u32 program, const u32 version,
847 const unsigned short protocol,
848 const unsigned short port)
850 const struct sockaddr_in6 sin6 = {
851 .sin6_family = AF_INET6,
852 .sin6_addr = IN6ADDR_ANY_INIT,
853 .sin6_port = htons(port),
860 netid = RPCBIND_NETID_UDP6;
863 netid = RPCBIND_NETID_TCP6;
869 error = rpcb_v4_register(program, version,
870 (const struct sockaddr *)&sin6, netid);
873 * User space didn't support rpcbind version 4, so we won't
874 * use a PF_INET6 listener.
876 if (error == -EPROTONOSUPPORT)
877 error = -EAFNOSUPPORT;
881 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
884 * Register a kernel RPC service via rpcbind version 4.
886 * Returns zero on success; a negative errno value is returned
887 * if any error occurs.
889 static int __svc_register(const char *progname,
890 const u32 program, const u32 version,
892 const unsigned short protocol,
893 const unsigned short port)
895 int error = -EAFNOSUPPORT;
899 error = __svc_rpcb_register4(program, version,
902 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
904 error = __svc_rpcb_register6(program, version,
906 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
910 printk(KERN_WARNING "svc: failed to register %sv%u RPC "
911 "service (errno %d).\n", progname, version, -error);
916 * svc_register - register an RPC service with the local portmapper
917 * @serv: svc_serv struct for the service to register
918 * @family: protocol family of service's listener socket
919 * @proto: transport protocol number to advertise
920 * @port: port to advertise
922 * Service is registered for any address in the passed-in protocol family
924 int svc_register(const struct svc_serv *serv, const int family,
925 const unsigned short proto, const unsigned short port)
927 struct svc_program *progp;
931 BUG_ON(proto == 0 && port == 0);
933 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
934 for (i = 0; i < progp->pg_nvers; i++) {
935 if (progp->pg_vers[i] == NULL)
938 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
941 proto == IPPROTO_UDP? "udp" : "tcp",
944 progp->pg_vers[i]->vs_hidden?
945 " (but not telling portmap)" : "");
947 if (progp->pg_vers[i]->vs_hidden)
950 error = __svc_register(progp->pg_name, progp->pg_prog,
951 i, family, proto, port);
961 * If user space is running rpcbind, it should take the v4 UNSET
962 * and clear everything for this [program, version]. If user space
963 * is running portmap, it will reject the v4 UNSET, but won't have
964 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
965 * in this case to clear all existing entries for [program, version].
967 static void __svc_unregister(const u32 program, const u32 version,
968 const char *progname)
972 error = rpcb_v4_register(program, version, NULL, "");
975 * User space didn't support rpcbind v4, so retry this
976 * request with the legacy rpcbind v2 protocol.
978 if (error == -EPROTONOSUPPORT)
979 error = rpcb_register(program, version, 0, 0);
981 dprintk("svc: %s(%sv%u), error %d\n",
982 __func__, progname, version, error);
986 * All netids, bind addresses and ports registered for [program, version]
987 * are removed from the local rpcbind database (if the service is not
988 * hidden) to make way for a new instance of the service.
990 * The result of unregistration is reported via dprintk for those who want
991 * verification of the result, but is otherwise not important.
993 static void svc_unregister(const struct svc_serv *serv)
995 struct svc_program *progp;
999 clear_thread_flag(TIF_SIGPENDING);
1001 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1002 for (i = 0; i < progp->pg_nvers; i++) {
1003 if (progp->pg_vers[i] == NULL)
1005 if (progp->pg_vers[i]->vs_hidden)
1008 dprintk("svc: attempting to unregister %sv%u\n",
1010 __svc_unregister(progp->pg_prog, i, progp->pg_name);
1014 spin_lock_irqsave(¤t->sighand->siglock, flags);
1015 recalc_sigpending();
1016 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
1020 * Printk the given error with the address of the client that caused it.
1022 static __printf(2, 3)
1023 int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1027 char buf[RPC_MAX_ADDRBUFLEN];
1029 if (!net_ratelimit())
1032 printk(KERN_WARNING "svc: %s: ",
1033 svc_print_addr(rqstp, buf, sizeof(buf)));
1035 va_start(args, fmt);
1036 r = vprintk(fmt, args);
1043 * Common routine for processing the RPC request.
1046 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1048 struct svc_program *progp;
1049 struct svc_version *versp = NULL; /* compiler food */
1050 struct svc_procedure *procp = NULL;
1051 struct svc_serv *serv = rqstp->rq_server;
1054 u32 prog, vers, proc;
1055 __be32 auth_stat, rpc_stat;
1057 __be32 *reply_statp;
1059 rpc_stat = rpc_success;
1061 if (argv->iov_len < 6*4)
1064 /* Will be turned off only in gss privacy case: */
1065 rqstp->rq_splice_ok = 1;
1066 /* Will be turned off only when NFSv4 Sessions are used */
1067 rqstp->rq_usedeferral = 1;
1068 rqstp->rq_dropme = false;
1070 /* Setup reply header */
1071 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1073 svc_putu32(resv, rqstp->rq_xid);
1075 vers = svc_getnl(argv);
1077 /* First words of reply: */
1078 svc_putnl(resv, 1); /* REPLY */
1080 if (vers != 2) /* RPC version number */
1083 /* Save position in case we later decide to reject: */
1084 reply_statp = resv->iov_base + resv->iov_len;
1086 svc_putnl(resv, 0); /* ACCEPT */
1088 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1089 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
1090 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
1092 progp = serv->sv_program;
1094 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1095 if (prog == progp->pg_prog)
1099 * Decode auth data, and add verifier to reply buffer.
1100 * We do this before anything else in order to get a decent
1103 auth_res = svc_authenticate(rqstp, &auth_stat);
1104 /* Also give the program a chance to reject this call: */
1105 if (auth_res == SVC_OK && progp) {
1106 auth_stat = rpc_autherr_badcred;
1107 auth_res = progp->pg_authenticate(rqstp);
1115 rpc_stat = rpc_system_err;
1120 if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1121 svc_close_xprt(rqstp->rq_xprt);
1131 if (vers >= progp->pg_nvers ||
1132 !(versp = progp->pg_vers[vers]))
1135 procp = versp->vs_proc + proc;
1136 if (proc >= versp->vs_nproc || !procp->pc_func)
1138 rqstp->rq_procinfo = procp;
1140 /* Syntactic check complete */
1141 serv->sv_stats->rpccnt++;
1143 /* Build the reply header. */
1144 statp = resv->iov_base +resv->iov_len;
1145 svc_putnl(resv, RPC_SUCCESS);
1147 /* Bump per-procedure stats counter */
1150 /* Initialize storage for argp and resp */
1151 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1152 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1154 /* un-reserve some of the out-queue now that we have a
1155 * better idea of reply size
1157 if (procp->pc_xdrressize)
1158 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1160 /* Call the function that processes the request. */
1161 if (!versp->vs_dispatch) {
1162 /* Decode arguments */
1163 xdr = procp->pc_decode;
1164 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
1167 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1170 if (rqstp->rq_dropme) {
1171 if (procp->pc_release)
1172 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1175 if (*statp == rpc_success &&
1176 (xdr = procp->pc_encode) &&
1177 !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1178 dprintk("svc: failed to encode reply\n");
1179 /* serv->sv_stats->rpcsystemerr++; */
1180 *statp = rpc_system_err;
1183 dprintk("svc: calling dispatcher\n");
1184 if (!versp->vs_dispatch(rqstp, statp)) {
1185 /* Release reply info */
1186 if (procp->pc_release)
1187 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1192 /* Check RPC status result */
1193 if (*statp != rpc_success)
1194 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1196 /* Release reply info */
1197 if (procp->pc_release)
1198 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1200 if (procp->pc_encode == NULL)
1204 if (svc_authorise(rqstp))
1206 return 1; /* Caller can now send it */
1209 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1210 dprintk("svc: svc_process dropit\n");
1214 svc_printk(rqstp, "short len %Zd, dropping request\n",
1217 goto dropit; /* drop request */
1220 serv->sv_stats->rpcbadfmt++;
1221 svc_putnl(resv, 1); /* REJECT */
1222 svc_putnl(resv, 0); /* RPC_MISMATCH */
1223 svc_putnl(resv, 2); /* Only RPCv2 supported */
1228 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1229 serv->sv_stats->rpcbadauth++;
1230 /* Restore write pointer to location of accept status: */
1231 xdr_ressize_check(rqstp, reply_statp);
1232 svc_putnl(resv, 1); /* REJECT */
1233 svc_putnl(resv, 1); /* AUTH_ERROR */
1234 svc_putnl(resv, ntohl(auth_stat)); /* status */
1238 dprintk("svc: unknown program %d\n", prog);
1239 serv->sv_stats->rpcbadfmt++;
1240 svc_putnl(resv, RPC_PROG_UNAVAIL);
1244 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1245 vers, prog, progp->pg_name);
1247 serv->sv_stats->rpcbadfmt++;
1248 svc_putnl(resv, RPC_PROG_MISMATCH);
1249 svc_putnl(resv, progp->pg_lovers);
1250 svc_putnl(resv, progp->pg_hivers);
1254 svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1256 serv->sv_stats->rpcbadfmt++;
1257 svc_putnl(resv, RPC_PROC_UNAVAIL);
1261 svc_printk(rqstp, "failed to decode args\n");
1263 rpc_stat = rpc_garbage_args;
1265 serv->sv_stats->rpcbadfmt++;
1266 svc_putnl(resv, ntohl(rpc_stat));
1269 EXPORT_SYMBOL_GPL(svc_process);
1272 * Process the RPC request.
1275 svc_process(struct svc_rqst *rqstp)
1277 struct kvec *argv = &rqstp->rq_arg.head[0];
1278 struct kvec *resv = &rqstp->rq_res.head[0];
1279 struct svc_serv *serv = rqstp->rq_server;
1283 * Setup response xdr_buf.
1284 * Initially it has just one page
1286 rqstp->rq_resused = 1;
1287 resv->iov_base = page_address(rqstp->rq_respages[0]);
1289 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1290 rqstp->rq_res.len = 0;
1291 rqstp->rq_res.page_base = 0;
1292 rqstp->rq_res.page_len = 0;
1293 rqstp->rq_res.buflen = PAGE_SIZE;
1294 rqstp->rq_res.tail[0].iov_base = NULL;
1295 rqstp->rq_res.tail[0].iov_len = 0;
1297 rqstp->rq_xid = svc_getu32(argv);
1299 dir = svc_getnl(argv);
1301 /* direction != CALL */
1302 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1303 serv->sv_stats->rpcbadfmt++;
1308 /* Returns 1 for send, 0 for drop */
1309 if (svc_process_common(rqstp, argv, resv))
1310 return svc_send(rqstp);
1317 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1319 * Process a backchannel RPC request that arrived over an existing
1320 * outbound connection
1323 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1324 struct svc_rqst *rqstp)
1326 struct kvec *argv = &rqstp->rq_arg.head[0];
1327 struct kvec *resv = &rqstp->rq_res.head[0];
1329 /* Build the svc_rqst used by the common processing routine */
1330 rqstp->rq_xprt = serv->sv_bc_xprt;
1331 rqstp->rq_xid = req->rq_xid;
1332 rqstp->rq_prot = req->rq_xprt->prot;
1333 rqstp->rq_server = serv;
1335 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1336 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1337 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1338 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1340 /* reset result send buffer "put" position */
1343 if (rqstp->rq_prot != IPPROTO_TCP) {
1344 printk(KERN_ERR "No support for Non-TCP transports!\n");
1349 * Skip the next two words because they've already been
1350 * processed in the trasport
1352 svc_getu32(argv); /* XID */
1353 svc_getnl(argv); /* CALLDIR */
1355 /* Returns 1 for send, 0 for drop */
1356 if (svc_process_common(rqstp, argv, resv)) {
1357 memcpy(&req->rq_snd_buf, &rqstp->rq_res,
1358 sizeof(req->rq_snd_buf));
1359 return bc_send(req);
1362 xprt_free_bc_request(req);
1366 EXPORT_SYMBOL_GPL(bc_svc_process);
1367 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1370 * Return (transport-specific) limit on the rpc payload.
1372 u32 svc_max_payload(const struct svc_rqst *rqstp)
1374 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1376 if (rqstp->rq_server->sv_max_payload < max)
1377 max = rqstp->rq_server->sv_max_payload;
1380 EXPORT_SYMBOL_GPL(svc_max_payload);