2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22 #include <linux/slab.h>
24 #include <linux/sunrpc/types.h>
25 #include <linux/sunrpc/xdr.h>
26 #include <linux/sunrpc/stats.h>
27 #include <linux/sunrpc/svcsock.h>
28 #include <linux/sunrpc/clnt.h>
29 #include <linux/sunrpc/bc_xprt.h>
31 #define RPCDBG_FACILITY RPCDBG_SVCDSP
33 static void svc_unregister(const struct svc_serv *serv);
35 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
38 * Mode for mapping cpus to pools.
41 SVC_POOL_AUTO = -1, /* choose one of the others */
42 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
43 * (legacy & UP mode) */
44 SVC_POOL_PERCPU, /* one pool per cpu */
45 SVC_POOL_PERNODE /* one pool per numa node */
47 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
50 * Structure for mapping cpus to pools and vice versa.
51 * Setup once during sunrpc initialisation.
53 static struct svc_pool_map {
54 int count; /* How many svc_servs use us */
55 int mode; /* Note: int not enum to avoid
56 * warnings about "enumeration value
57 * not handled in switch" */
59 unsigned int *pool_to; /* maps pool id to cpu or node */
60 unsigned int *to_pool; /* maps cpu or node to pool id */
63 .mode = SVC_POOL_DEFAULT
65 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
68 param_set_pool_mode(const char *val, struct kernel_param *kp)
70 int *ip = (int *)kp->arg;
71 struct svc_pool_map *m = &svc_pool_map;
74 mutex_lock(&svc_pool_map_mutex);
81 if (!strncmp(val, "auto", 4))
83 else if (!strncmp(val, "global", 6))
84 *ip = SVC_POOL_GLOBAL;
85 else if (!strncmp(val, "percpu", 6))
86 *ip = SVC_POOL_PERCPU;
87 else if (!strncmp(val, "pernode", 7))
88 *ip = SVC_POOL_PERNODE;
93 mutex_unlock(&svc_pool_map_mutex);
98 param_get_pool_mode(char *buf, struct kernel_param *kp)
100 int *ip = (int *)kp->arg;
105 return strlcpy(buf, "auto", 20);
106 case SVC_POOL_GLOBAL:
107 return strlcpy(buf, "global", 20);
108 case SVC_POOL_PERCPU:
109 return strlcpy(buf, "percpu", 20);
110 case SVC_POOL_PERNODE:
111 return strlcpy(buf, "pernode", 20);
113 return sprintf(buf, "%d", *ip);
117 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
118 &svc_pool_map.mode, 0644);
121 * Detect best pool mapping mode heuristically,
122 * according to the machine's topology.
125 svc_pool_map_choose_mode(void)
129 if (nr_online_nodes > 1) {
131 * Actually have multiple NUMA nodes,
132 * so split pools on NUMA node boundaries
134 return SVC_POOL_PERNODE;
137 node = first_online_node;
138 if (nr_cpus_node(node) > 2) {
140 * Non-trivial SMP, or CONFIG_NUMA on
141 * non-NUMA hardware, e.g. with a generic
142 * x86_64 kernel on Xeons. In this case we
143 * want to divide the pools on cpu boundaries.
145 return SVC_POOL_PERCPU;
148 /* default: one global pool */
149 return SVC_POOL_GLOBAL;
153 * Allocate the to_pool[] and pool_to[] arrays.
154 * Returns 0 on success or an errno.
157 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
159 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
162 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
175 * Initialise the pool map for SVC_POOL_PERCPU mode.
176 * Returns number of pools or <0 on error.
179 svc_pool_map_init_percpu(struct svc_pool_map *m)
181 unsigned int maxpools = nr_cpu_ids;
182 unsigned int pidx = 0;
186 err = svc_pool_map_alloc_arrays(m, maxpools);
190 for_each_online_cpu(cpu) {
191 BUG_ON(pidx > maxpools);
192 m->to_pool[cpu] = pidx;
193 m->pool_to[pidx] = cpu;
196 /* cpus brought online later all get mapped to pool0, sorry */
203 * Initialise the pool map for SVC_POOL_PERNODE mode.
204 * Returns number of pools or <0 on error.
207 svc_pool_map_init_pernode(struct svc_pool_map *m)
209 unsigned int maxpools = nr_node_ids;
210 unsigned int pidx = 0;
214 err = svc_pool_map_alloc_arrays(m, maxpools);
218 for_each_node_with_cpus(node) {
219 /* some architectures (e.g. SN2) have cpuless nodes */
220 BUG_ON(pidx > maxpools);
221 m->to_pool[node] = pidx;
222 m->pool_to[pidx] = node;
225 /* nodes brought online later all get mapped to pool0, sorry */
232 * Add a reference to the global map of cpus to pools (and
233 * vice versa). Initialise the map if we're the first user.
234 * Returns the number of pools.
237 svc_pool_map_get(void)
239 struct svc_pool_map *m = &svc_pool_map;
242 mutex_lock(&svc_pool_map_mutex);
245 mutex_unlock(&svc_pool_map_mutex);
249 if (m->mode == SVC_POOL_AUTO)
250 m->mode = svc_pool_map_choose_mode();
253 case SVC_POOL_PERCPU:
254 npools = svc_pool_map_init_percpu(m);
256 case SVC_POOL_PERNODE:
257 npools = svc_pool_map_init_pernode(m);
262 /* default, or memory allocation failure */
264 m->mode = SVC_POOL_GLOBAL;
268 mutex_unlock(&svc_pool_map_mutex);
274 * Drop a reference to the global map of cpus to pools.
275 * When the last reference is dropped, the map data is
276 * freed; this allows the sysadmin to change the pool
277 * mode using the pool_mode module option without
278 * rebooting or re-loading sunrpc.ko.
281 svc_pool_map_put(void)
283 struct svc_pool_map *m = &svc_pool_map;
285 mutex_lock(&svc_pool_map_mutex);
288 m->mode = SVC_POOL_DEFAULT;
294 mutex_unlock(&svc_pool_map_mutex);
299 * Set the given thread's cpus_allowed mask so that it
300 * will only run on cpus in the given pool.
303 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
305 struct svc_pool_map *m = &svc_pool_map;
306 unsigned int node = m->pool_to[pidx];
309 * The caller checks for sv_nrpools > 1, which
310 * implies that we've been initialized.
312 BUG_ON(m->count == 0);
315 case SVC_POOL_PERCPU:
317 set_cpus_allowed_ptr(task, cpumask_of(node));
320 case SVC_POOL_PERNODE:
322 set_cpus_allowed_ptr(task, cpumask_of_node(node));
329 * Use the mapping mode to choose a pool for a given CPU.
330 * Used when enqueueing an incoming RPC. Always returns
331 * a non-NULL pool pointer.
334 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
336 struct svc_pool_map *m = &svc_pool_map;
337 unsigned int pidx = 0;
340 * An uninitialised map happens in a pure client when
341 * lockd is brought up, so silently treat it the
342 * same as SVC_POOL_GLOBAL.
344 if (svc_serv_is_pooled(serv)) {
346 case SVC_POOL_PERCPU:
347 pidx = m->to_pool[cpu];
349 case SVC_POOL_PERNODE:
350 pidx = m->to_pool[cpu_to_node(cpu)];
354 return &serv->sv_pools[pidx % serv->sv_nrpools];
357 static int svc_rpcb_setup(struct svc_serv *serv)
361 err = rpcb_create_local();
365 /* Remove any stale portmap registrations */
366 svc_unregister(serv);
370 static void svc_rpcb_cleanup(struct svc_serv *serv)
372 svc_unregister(serv);
376 static int svc_uses_rpcbind(struct svc_serv *serv)
378 struct svc_program *progp;
381 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
382 for (i = 0; i < progp->pg_nvers; i++) {
383 if (progp->pg_vers[i] == NULL)
385 if (progp->pg_vers[i]->vs_hidden == 0)
394 * Create an RPC service
396 static struct svc_serv *
397 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
398 void (*shutdown)(struct svc_serv *serv))
400 struct svc_serv *serv;
402 unsigned int xdrsize;
405 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
407 serv->sv_name = prog->pg_name;
408 serv->sv_program = prog;
409 serv->sv_nrthreads = 1;
410 serv->sv_stats = prog->pg_stats;
411 if (bufsize > RPCSVC_MAXPAYLOAD)
412 bufsize = RPCSVC_MAXPAYLOAD;
413 serv->sv_max_payload = bufsize? bufsize : 4096;
414 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
415 serv->sv_shutdown = shutdown;
418 prog->pg_lovers = prog->pg_nvers-1;
419 for (vers=0; vers<prog->pg_nvers ; vers++)
420 if (prog->pg_vers[vers]) {
421 prog->pg_hivers = vers;
422 if (prog->pg_lovers > vers)
423 prog->pg_lovers = vers;
424 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
425 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
427 prog = prog->pg_next;
429 serv->sv_xdrsize = xdrsize;
430 INIT_LIST_HEAD(&serv->sv_tempsocks);
431 INIT_LIST_HEAD(&serv->sv_permsocks);
432 init_timer(&serv->sv_temptimer);
433 spin_lock_init(&serv->sv_lock);
435 serv->sv_nrpools = npools;
437 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
439 if (!serv->sv_pools) {
444 for (i = 0; i < serv->sv_nrpools; i++) {
445 struct svc_pool *pool = &serv->sv_pools[i];
447 dprintk("svc: initialising pool %u for %s\n",
451 INIT_LIST_HEAD(&pool->sp_threads);
452 INIT_LIST_HEAD(&pool->sp_sockets);
453 INIT_LIST_HEAD(&pool->sp_all_threads);
454 spin_lock_init(&pool->sp_lock);
457 /* Remove any stale portmap registrations */
458 svc_unregister(serv);
464 svc_create(struct svc_program *prog, unsigned int bufsize,
465 void (*shutdown)(struct svc_serv *serv))
467 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
469 EXPORT_SYMBOL_GPL(svc_create);
472 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
473 void (*shutdown)(struct svc_serv *serv),
474 svc_thread_fn func, struct module *mod)
476 struct svc_serv *serv;
477 unsigned int npools = svc_pool_map_get();
479 serv = __svc_create(prog, bufsize, npools, shutdown);
482 serv->sv_function = func;
483 serv->sv_module = mod;
488 EXPORT_SYMBOL_GPL(svc_create_pooled);
491 * Destroy an RPC service. Should be called with appropriate locking to
492 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
495 svc_destroy(struct svc_serv *serv)
497 dprintk("svc: svc_destroy(%s, %d)\n",
498 serv->sv_program->pg_name,
501 if (serv->sv_nrthreads) {
502 if (--(serv->sv_nrthreads) != 0) {
503 svc_sock_update_bufs(serv);
507 printk("svc_destroy: no threads for serv=%p!\n", serv);
509 del_timer_sync(&serv->sv_temptimer);
511 svc_close_all(&serv->sv_tempsocks);
513 if (serv->sv_shutdown)
514 serv->sv_shutdown(serv);
516 svc_close_all(&serv->sv_permsocks);
518 BUG_ON(!list_empty(&serv->sv_permsocks));
519 BUG_ON(!list_empty(&serv->sv_tempsocks));
521 cache_clean_deferred(serv);
523 if (svc_serv_is_pooled(serv))
526 svc_unregister(serv);
527 kfree(serv->sv_pools);
530 EXPORT_SYMBOL_GPL(svc_destroy);
533 * Allocate an RPC server's buffer space.
534 * We allocate pages and place them in rq_argpages.
537 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
539 unsigned int pages, arghi;
541 /* bc_xprt uses fore channel allocated buffers */
542 if (svc_is_backchannel(rqstp))
545 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
546 * We assume one is at most one page
549 BUG_ON(pages > RPCSVC_MAXPAGES);
551 struct page *p = alloc_page(GFP_KERNEL);
554 rqstp->rq_pages[arghi++] = p;
561 * Release an RPC server buffer
564 svc_release_buffer(struct svc_rqst *rqstp)
568 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
569 if (rqstp->rq_pages[i])
570 put_page(rqstp->rq_pages[i]);
574 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
576 struct svc_rqst *rqstp;
578 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
582 init_waitqueue_head(&rqstp->rq_wait);
584 serv->sv_nrthreads++;
585 spin_lock_bh(&pool->sp_lock);
586 pool->sp_nrthreads++;
587 list_add(&rqstp->rq_all, &pool->sp_all_threads);
588 spin_unlock_bh(&pool->sp_lock);
589 rqstp->rq_server = serv;
590 rqstp->rq_pool = pool;
592 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
596 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
600 if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
605 svc_exit_thread(rqstp);
607 return ERR_PTR(-ENOMEM);
609 EXPORT_SYMBOL_GPL(svc_prepare_thread);
612 * Choose a pool in which to create a new thread, for svc_set_num_threads
614 static inline struct svc_pool *
615 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
620 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
624 * Choose a thread to kill, for svc_set_num_threads
626 static inline struct task_struct *
627 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
630 struct task_struct *task = NULL;
633 spin_lock_bh(&pool->sp_lock);
635 /* choose a pool in round-robin fashion */
636 for (i = 0; i < serv->sv_nrpools; i++) {
637 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
638 spin_lock_bh(&pool->sp_lock);
639 if (!list_empty(&pool->sp_all_threads))
641 spin_unlock_bh(&pool->sp_lock);
647 if (!list_empty(&pool->sp_all_threads)) {
648 struct svc_rqst *rqstp;
651 * Remove from the pool->sp_all_threads list
652 * so we don't try to kill it again.
654 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
655 list_del_init(&rqstp->rq_all);
656 task = rqstp->rq_task;
658 spin_unlock_bh(&pool->sp_lock);
664 * Create or destroy enough new threads to make the number
665 * of threads the given number. If `pool' is non-NULL, applies
666 * only to threads in that pool, otherwise round-robins between
667 * all pools. Must be called with a svc_get() reference and
668 * the BKL or another lock to protect access to svc_serv fields.
670 * Destroying threads relies on the service threads filling in
671 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
672 * has been created using svc_create_pooled().
674 * Based on code that used to be in nfsd_svc() but tweaked
678 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
680 struct svc_rqst *rqstp;
681 struct task_struct *task;
682 struct svc_pool *chosen_pool;
684 unsigned int state = serv->sv_nrthreads-1;
687 /* The -1 assumes caller has done a svc_get() */
688 nrservs -= (serv->sv_nrthreads-1);
690 spin_lock_bh(&pool->sp_lock);
691 nrservs -= pool->sp_nrthreads;
692 spin_unlock_bh(&pool->sp_lock);
695 /* create new threads */
696 while (nrservs > 0) {
698 chosen_pool = choose_pool(serv, pool, &state);
700 rqstp = svc_prepare_thread(serv, chosen_pool);
702 error = PTR_ERR(rqstp);
706 __module_get(serv->sv_module);
707 task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
709 error = PTR_ERR(task);
710 module_put(serv->sv_module);
711 svc_exit_thread(rqstp);
715 rqstp->rq_task = task;
716 if (serv->sv_nrpools > 1)
717 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
719 svc_sock_update_bufs(serv);
720 wake_up_process(task);
722 /* destroy old threads */
723 while (nrservs < 0 &&
724 (task = choose_victim(serv, pool, &state)) != NULL) {
725 send_sig(SIGINT, task, 1);
731 EXPORT_SYMBOL_GPL(svc_set_num_threads);
734 * Called from a server thread as it's exiting. Caller must hold the BKL or
735 * the "service mutex", whichever is appropriate for the service.
738 svc_exit_thread(struct svc_rqst *rqstp)
740 struct svc_serv *serv = rqstp->rq_server;
741 struct svc_pool *pool = rqstp->rq_pool;
743 svc_release_buffer(rqstp);
744 kfree(rqstp->rq_resp);
745 kfree(rqstp->rq_argp);
746 kfree(rqstp->rq_auth_data);
748 spin_lock_bh(&pool->sp_lock);
749 pool->sp_nrthreads--;
750 list_del(&rqstp->rq_all);
751 spin_unlock_bh(&pool->sp_lock);
755 /* Release the server */
759 EXPORT_SYMBOL_GPL(svc_exit_thread);
762 * Register an "inet" protocol family netid with the local
763 * rpcbind daemon via an rpcbind v4 SET request.
765 * No netconfig infrastructure is available in the kernel, so
766 * we map IP_ protocol numbers to netids by hand.
768 * Returns zero on success; a negative errno value is returned
769 * if any error occurs.
771 static int __svc_rpcb_register4(const u32 program, const u32 version,
772 const unsigned short protocol,
773 const unsigned short port)
775 const struct sockaddr_in sin = {
776 .sin_family = AF_INET,
777 .sin_addr.s_addr = htonl(INADDR_ANY),
778 .sin_port = htons(port),
785 netid = RPCBIND_NETID_UDP;
788 netid = RPCBIND_NETID_TCP;
794 error = rpcb_v4_register(program, version,
795 (const struct sockaddr *)&sin, netid);
798 * User space didn't support rpcbind v4, so retry this
799 * registration request with the legacy rpcbind v2 protocol.
801 if (error == -EPROTONOSUPPORT)
802 error = rpcb_register(program, version, protocol, port);
807 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
809 * Register an "inet6" protocol family netid with the local
810 * rpcbind daemon via an rpcbind v4 SET request.
812 * No netconfig infrastructure is available in the kernel, so
813 * we map IP_ protocol numbers to netids by hand.
815 * Returns zero on success; a negative errno value is returned
816 * if any error occurs.
818 static int __svc_rpcb_register6(const u32 program, const u32 version,
819 const unsigned short protocol,
820 const unsigned short port)
822 const struct sockaddr_in6 sin6 = {
823 .sin6_family = AF_INET6,
824 .sin6_addr = IN6ADDR_ANY_INIT,
825 .sin6_port = htons(port),
832 netid = RPCBIND_NETID_UDP6;
835 netid = RPCBIND_NETID_TCP6;
841 error = rpcb_v4_register(program, version,
842 (const struct sockaddr *)&sin6, netid);
845 * User space didn't support rpcbind version 4, so we won't
846 * use a PF_INET6 listener.
848 if (error == -EPROTONOSUPPORT)
849 error = -EAFNOSUPPORT;
853 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
856 * Register a kernel RPC service via rpcbind version 4.
858 * Returns zero on success; a negative errno value is returned
859 * if any error occurs.
861 static int __svc_register(const char *progname,
862 const u32 program, const u32 version,
864 const unsigned short protocol,
865 const unsigned short port)
867 int error = -EAFNOSUPPORT;
871 error = __svc_rpcb_register4(program, version,
874 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
876 error = __svc_rpcb_register6(program, version,
878 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
882 printk(KERN_WARNING "svc: failed to register %sv%u RPC "
883 "service (errno %d).\n", progname, version, -error);
888 * svc_register - register an RPC service with the local portmapper
889 * @serv: svc_serv struct for the service to register
890 * @family: protocol family of service's listener socket
891 * @proto: transport protocol number to advertise
892 * @port: port to advertise
894 * Service is registered for any address in the passed-in protocol family
896 int svc_register(const struct svc_serv *serv, const int family,
897 const unsigned short proto, const unsigned short port)
899 struct svc_program *progp;
903 BUG_ON(proto == 0 && port == 0);
905 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
906 for (i = 0; i < progp->pg_nvers; i++) {
907 if (progp->pg_vers[i] == NULL)
910 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
913 proto == IPPROTO_UDP? "udp" : "tcp",
916 progp->pg_vers[i]->vs_hidden?
917 " (but not telling portmap)" : "");
919 if (progp->pg_vers[i]->vs_hidden)
922 error = __svc_register(progp->pg_name, progp->pg_prog,
923 i, family, proto, port);
933 * If user space is running rpcbind, it should take the v4 UNSET
934 * and clear everything for this [program, version]. If user space
935 * is running portmap, it will reject the v4 UNSET, but won't have
936 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
937 * in this case to clear all existing entries for [program, version].
939 static void __svc_unregister(const u32 program, const u32 version,
940 const char *progname)
944 error = rpcb_v4_register(program, version, NULL, "");
947 * User space didn't support rpcbind v4, so retry this
948 * request with the legacy rpcbind v2 protocol.
950 if (error == -EPROTONOSUPPORT)
951 error = rpcb_register(program, version, 0, 0);
953 dprintk("svc: %s(%sv%u), error %d\n",
954 __func__, progname, version, error);
958 * All netids, bind addresses and ports registered for [program, version]
959 * are removed from the local rpcbind database (if the service is not
960 * hidden) to make way for a new instance of the service.
962 * The result of unregistration is reported via dprintk for those who want
963 * verification of the result, but is otherwise not important.
965 static void svc_unregister(const struct svc_serv *serv)
967 struct svc_program *progp;
971 clear_thread_flag(TIF_SIGPENDING);
973 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
974 for (i = 0; i < progp->pg_nvers; i++) {
975 if (progp->pg_vers[i] == NULL)
977 if (progp->pg_vers[i]->vs_hidden)
980 dprintk("svc: attempting to unregister %sv%u\n",
982 __svc_unregister(progp->pg_prog, i, progp->pg_name);
986 spin_lock_irqsave(¤t->sighand->siglock, flags);
988 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
992 * Printk the given error with the address of the client that caused it.
995 __attribute__ ((format (printf, 2, 3)))
996 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1000 char buf[RPC_MAX_ADDRBUFLEN];
1002 if (!net_ratelimit())
1005 printk(KERN_WARNING "svc: %s: ",
1006 svc_print_addr(rqstp, buf, sizeof(buf)));
1008 va_start(args, fmt);
1009 r = vprintk(fmt, args);
1016 * Common routine for processing the RPC request.
1019 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1021 struct svc_program *progp;
1022 struct svc_version *versp = NULL; /* compiler food */
1023 struct svc_procedure *procp = NULL;
1024 struct svc_serv *serv = rqstp->rq_server;
1027 u32 prog, vers, proc;
1028 __be32 auth_stat, rpc_stat;
1030 __be32 *reply_statp;
1032 rpc_stat = rpc_success;
1034 if (argv->iov_len < 6*4)
1037 /* Will be turned off only in gss privacy case: */
1038 rqstp->rq_splice_ok = 1;
1039 /* Will be turned off only when NFSv4 Sessions are used */
1040 rqstp->rq_usedeferral = 1;
1041 rqstp->rq_dropme = false;
1043 /* Setup reply header */
1044 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1046 svc_putu32(resv, rqstp->rq_xid);
1048 vers = svc_getnl(argv);
1050 /* First words of reply: */
1051 svc_putnl(resv, 1); /* REPLY */
1053 if (vers != 2) /* RPC version number */
1056 /* Save position in case we later decide to reject: */
1057 reply_statp = resv->iov_base + resv->iov_len;
1059 svc_putnl(resv, 0); /* ACCEPT */
1061 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1062 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
1063 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
1065 progp = serv->sv_program;
1067 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1068 if (prog == progp->pg_prog)
1072 * Decode auth data, and add verifier to reply buffer.
1073 * We do this before anything else in order to get a decent
1076 auth_res = svc_authenticate(rqstp, &auth_stat);
1077 /* Also give the program a chance to reject this call: */
1078 if (auth_res == SVC_OK && progp) {
1079 auth_stat = rpc_autherr_badcred;
1080 auth_res = progp->pg_authenticate(rqstp);
1088 rpc_stat = rpc_system_err;
1093 if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1094 svc_close_xprt(rqstp->rq_xprt);
1104 if (vers >= progp->pg_nvers ||
1105 !(versp = progp->pg_vers[vers]))
1108 procp = versp->vs_proc + proc;
1109 if (proc >= versp->vs_nproc || !procp->pc_func)
1111 rqstp->rq_procinfo = procp;
1113 /* Syntactic check complete */
1114 serv->sv_stats->rpccnt++;
1116 /* Build the reply header. */
1117 statp = resv->iov_base +resv->iov_len;
1118 svc_putnl(resv, RPC_SUCCESS);
1120 /* Bump per-procedure stats counter */
1123 /* Initialize storage for argp and resp */
1124 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1125 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1127 /* un-reserve some of the out-queue now that we have a
1128 * better idea of reply size
1130 if (procp->pc_xdrressize)
1131 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1133 /* Call the function that processes the request. */
1134 if (!versp->vs_dispatch) {
1135 /* Decode arguments */
1136 xdr = procp->pc_decode;
1137 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
1140 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1143 if (rqstp->rq_dropme) {
1144 if (procp->pc_release)
1145 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1148 if (*statp == rpc_success &&
1149 (xdr = procp->pc_encode) &&
1150 !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1151 dprintk("svc: failed to encode reply\n");
1152 /* serv->sv_stats->rpcsystemerr++; */
1153 *statp = rpc_system_err;
1156 dprintk("svc: calling dispatcher\n");
1157 if (!versp->vs_dispatch(rqstp, statp)) {
1158 /* Release reply info */
1159 if (procp->pc_release)
1160 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1165 /* Check RPC status result */
1166 if (*statp != rpc_success)
1167 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1169 /* Release reply info */
1170 if (procp->pc_release)
1171 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1173 if (procp->pc_encode == NULL)
1177 if (svc_authorise(rqstp))
1179 return 1; /* Caller can now send it */
1182 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1183 dprintk("svc: svc_process dropit\n");
1187 svc_printk(rqstp, "short len %Zd, dropping request\n",
1190 goto dropit; /* drop request */
1193 serv->sv_stats->rpcbadfmt++;
1194 svc_putnl(resv, 1); /* REJECT */
1195 svc_putnl(resv, 0); /* RPC_MISMATCH */
1196 svc_putnl(resv, 2); /* Only RPCv2 supported */
1201 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1202 serv->sv_stats->rpcbadauth++;
1203 /* Restore write pointer to location of accept status: */
1204 xdr_ressize_check(rqstp, reply_statp);
1205 svc_putnl(resv, 1); /* REJECT */
1206 svc_putnl(resv, 1); /* AUTH_ERROR */
1207 svc_putnl(resv, ntohl(auth_stat)); /* status */
1211 dprintk("svc: unknown program %d\n", prog);
1212 serv->sv_stats->rpcbadfmt++;
1213 svc_putnl(resv, RPC_PROG_UNAVAIL);
1217 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1218 vers, prog, progp->pg_name);
1220 serv->sv_stats->rpcbadfmt++;
1221 svc_putnl(resv, RPC_PROG_MISMATCH);
1222 svc_putnl(resv, progp->pg_lovers);
1223 svc_putnl(resv, progp->pg_hivers);
1227 svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1229 serv->sv_stats->rpcbadfmt++;
1230 svc_putnl(resv, RPC_PROC_UNAVAIL);
1234 svc_printk(rqstp, "failed to decode args\n");
1236 rpc_stat = rpc_garbage_args;
1238 serv->sv_stats->rpcbadfmt++;
1239 svc_putnl(resv, ntohl(rpc_stat));
1242 EXPORT_SYMBOL_GPL(svc_process);
1245 * Process the RPC request.
1248 svc_process(struct svc_rqst *rqstp)
1250 struct kvec *argv = &rqstp->rq_arg.head[0];
1251 struct kvec *resv = &rqstp->rq_res.head[0];
1252 struct svc_serv *serv = rqstp->rq_server;
1256 * Setup response xdr_buf.
1257 * Initially it has just one page
1259 rqstp->rq_resused = 1;
1260 resv->iov_base = page_address(rqstp->rq_respages[0]);
1262 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1263 rqstp->rq_res.len = 0;
1264 rqstp->rq_res.page_base = 0;
1265 rqstp->rq_res.page_len = 0;
1266 rqstp->rq_res.buflen = PAGE_SIZE;
1267 rqstp->rq_res.tail[0].iov_base = NULL;
1268 rqstp->rq_res.tail[0].iov_len = 0;
1270 rqstp->rq_xid = svc_getu32(argv);
1272 dir = svc_getnl(argv);
1274 /* direction != CALL */
1275 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1276 serv->sv_stats->rpcbadfmt++;
1281 /* Returns 1 for send, 0 for drop */
1282 if (svc_process_common(rqstp, argv, resv))
1283 return svc_send(rqstp);
1290 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1292 * Process a backchannel RPC request that arrived over an existing
1293 * outbound connection
1296 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1297 struct svc_rqst *rqstp)
1299 struct kvec *argv = &rqstp->rq_arg.head[0];
1300 struct kvec *resv = &rqstp->rq_res.head[0];
1302 /* Build the svc_rqst used by the common processing routine */
1303 rqstp->rq_xprt = serv->sv_bc_xprt;
1304 rqstp->rq_xid = req->rq_xid;
1305 rqstp->rq_prot = req->rq_xprt->prot;
1306 rqstp->rq_server = serv;
1308 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1309 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1310 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1311 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1313 /* reset result send buffer "put" position */
1316 if (rqstp->rq_prot != IPPROTO_TCP) {
1317 printk(KERN_ERR "No support for Non-TCP transports!\n");
1322 * Skip the next two words because they've already been
1323 * processed in the trasport
1325 svc_getu32(argv); /* XID */
1326 svc_getnl(argv); /* CALLDIR */
1328 /* Returns 1 for send, 0 for drop */
1329 if (svc_process_common(rqstp, argv, resv)) {
1330 memcpy(&req->rq_snd_buf, &rqstp->rq_res,
1331 sizeof(req->rq_snd_buf));
1332 return bc_send(req);
1334 /* Nothing to do to drop request */
1338 EXPORT_SYMBOL_GPL(bc_svc_process);
1339 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1342 * Return (transport-specific) limit on the rpc payload.
1344 u32 svc_max_payload(const struct svc_rqst *rqstp)
1346 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1348 if (rqstp->rq_server->sv_max_payload < max)
1349 max = rqstp->rq_server->sv_max_payload;
1352 EXPORT_SYMBOL_GPL(svc_max_payload);