SUNRPC: Fix RPC call retransmission statistics
[pandora-kernel.git] / net / sunrpc / clnt.c
1 /*
2  *  linux/net/sunrpc/clnt.c
3  *
4  *  This file contains the high-level RPC interface.
5  *  It is modeled as a finite state machine to support both synchronous
6  *  and asynchronous requests.
7  *
8  *  -   RPC header generation and argument serialization.
9  *  -   Credential refresh.
10  *  -   TCP connect handling.
11  *  -   Retry of operation when it is suspected the operation failed because
12  *      of uid squashing on the server, or when the credentials were stale
13  *      and need to be refreshed, or when a packet was damaged in transit.
14  *      This may be have to be moved to the VFS layer.
15  *
16  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18  */
19
20
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mm.h>
25 #include <linux/namei.h>
26 #include <linux/mount.h>
27 #include <linux/slab.h>
28 #include <linux/utsname.h>
29 #include <linux/workqueue.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/un.h>
33 #include <linux/rcupdate.h>
34
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/rpc_pipe_fs.h>
38 #include <linux/sunrpc/metrics.h>
39 #include <linux/sunrpc/bc_xprt.h>
40 #include <trace/events/sunrpc.h>
41
42 #include "sunrpc.h"
43 #include "netns.h"
44
45 #ifdef RPC_DEBUG
46 # define RPCDBG_FACILITY        RPCDBG_CALL
47 #endif
48
49 #define dprint_status(t)                                        \
50         dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,         \
51                         __func__, t->tk_status)
52
53 /*
54  * All RPC clients are linked into this list
55  */
56
57 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
58
59
60 static void     call_start(struct rpc_task *task);
61 static void     call_reserve(struct rpc_task *task);
62 static void     call_reserveresult(struct rpc_task *task);
63 static void     call_allocate(struct rpc_task *task);
64 static void     call_decode(struct rpc_task *task);
65 static void     call_bind(struct rpc_task *task);
66 static void     call_bind_status(struct rpc_task *task);
67 static void     call_transmit(struct rpc_task *task);
68 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
69 static void     call_bc_transmit(struct rpc_task *task);
70 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
71 static void     call_status(struct rpc_task *task);
72 static void     call_transmit_status(struct rpc_task *task);
73 static void     call_refresh(struct rpc_task *task);
74 static void     call_refreshresult(struct rpc_task *task);
75 static void     call_timeout(struct rpc_task *task);
76 static void     call_connect(struct rpc_task *task);
77 static void     call_connect_status(struct rpc_task *task);
78
79 static __be32   *rpc_encode_header(struct rpc_task *task);
80 static __be32   *rpc_verify_header(struct rpc_task *task);
81 static int      rpc_ping(struct rpc_clnt *clnt);
82
83 static void rpc_register_client(struct rpc_clnt *clnt)
84 {
85         struct net *net = rpc_net_ns(clnt);
86         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
87
88         spin_lock(&sn->rpc_client_lock);
89         list_add(&clnt->cl_clients, &sn->all_clients);
90         spin_unlock(&sn->rpc_client_lock);
91 }
92
93 static void rpc_unregister_client(struct rpc_clnt *clnt)
94 {
95         struct net *net = rpc_net_ns(clnt);
96         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
97
98         spin_lock(&sn->rpc_client_lock);
99         list_del(&clnt->cl_clients);
100         spin_unlock(&sn->rpc_client_lock);
101 }
102
103 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
104 {
105         rpc_remove_client_dir(clnt);
106 }
107
108 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
109 {
110         struct net *net = rpc_net_ns(clnt);
111         struct super_block *pipefs_sb;
112
113         pipefs_sb = rpc_get_sb_net(net);
114         if (pipefs_sb) {
115                 __rpc_clnt_remove_pipedir(clnt);
116                 rpc_put_sb_net(net);
117         }
118 }
119
120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121                                     struct rpc_clnt *clnt)
122 {
123         static uint32_t clntid;
124         const char *dir_name = clnt->cl_program->pipe_dir_name;
125         char name[15];
126         struct dentry *dir, *dentry;
127
128         dir = rpc_d_lookup_sb(sb, dir_name);
129         if (dir == NULL) {
130                 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
131                 return dir;
132         }
133         for (;;) {
134                 snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
135                 name[sizeof(name) - 1] = '\0';
136                 dentry = rpc_create_client_dir(dir, name, clnt);
137                 if (!IS_ERR(dentry))
138                         break;
139                 if (dentry == ERR_PTR(-EEXIST))
140                         continue;
141                 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
142                                 " %s/%s, error %ld\n",
143                                 dir_name, name, PTR_ERR(dentry));
144                 break;
145         }
146         dput(dir);
147         return dentry;
148 }
149
150 static int
151 rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
152 {
153         struct dentry *dentry;
154
155         if (clnt->cl_program->pipe_dir_name != NULL) {
156                 dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
157                 if (IS_ERR(dentry))
158                         return PTR_ERR(dentry);
159         }
160         return 0;
161 }
162
163 static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
164 {
165         if (clnt->cl_program->pipe_dir_name == NULL)
166                 return 1;
167
168         switch (event) {
169         case RPC_PIPEFS_MOUNT:
170                 if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
171                         return 1;
172                 if (atomic_read(&clnt->cl_count) == 0)
173                         return 1;
174                 break;
175         case RPC_PIPEFS_UMOUNT:
176                 if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
177                         return 1;
178                 break;
179         }
180         return 0;
181 }
182
183 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
184                                    struct super_block *sb)
185 {
186         struct dentry *dentry;
187         int err = 0;
188
189         switch (event) {
190         case RPC_PIPEFS_MOUNT:
191                 dentry = rpc_setup_pipedir_sb(sb, clnt);
192                 if (!dentry)
193                         return -ENOENT;
194                 if (IS_ERR(dentry))
195                         return PTR_ERR(dentry);
196                 break;
197         case RPC_PIPEFS_UMOUNT:
198                 __rpc_clnt_remove_pipedir(clnt);
199                 break;
200         default:
201                 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
202                 return -ENOTSUPP;
203         }
204         return err;
205 }
206
207 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
208                                 struct super_block *sb)
209 {
210         int error = 0;
211
212         for (;; clnt = clnt->cl_parent) {
213                 if (!rpc_clnt_skip_event(clnt, event))
214                         error = __rpc_clnt_handle_event(clnt, event, sb);
215                 if (error || clnt == clnt->cl_parent)
216                         break;
217         }
218         return error;
219 }
220
221 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
222 {
223         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
224         struct rpc_clnt *clnt;
225
226         spin_lock(&sn->rpc_client_lock);
227         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
228                 if (rpc_clnt_skip_event(clnt, event))
229                         continue;
230                 spin_unlock(&sn->rpc_client_lock);
231                 return clnt;
232         }
233         spin_unlock(&sn->rpc_client_lock);
234         return NULL;
235 }
236
237 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
238                             void *ptr)
239 {
240         struct super_block *sb = ptr;
241         struct rpc_clnt *clnt;
242         int error = 0;
243
244         while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
245                 error = __rpc_pipefs_event(clnt, event, sb);
246                 if (error)
247                         break;
248         }
249         return error;
250 }
251
252 static struct notifier_block rpc_clients_block = {
253         .notifier_call  = rpc_pipefs_event,
254         .priority       = SUNRPC_PIPEFS_RPC_PRIO,
255 };
256
257 int rpc_clients_notifier_register(void)
258 {
259         return rpc_pipefs_notifier_register(&rpc_clients_block);
260 }
261
262 void rpc_clients_notifier_unregister(void)
263 {
264         return rpc_pipefs_notifier_unregister(&rpc_clients_block);
265 }
266
267 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
268 {
269         clnt->cl_nodelen = strlen(nodename);
270         if (clnt->cl_nodelen > UNX_MAXNODENAME)
271                 clnt->cl_nodelen = UNX_MAXNODENAME;
272         memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
273 }
274
275 static int rpc_client_register(const struct rpc_create_args *args,
276                                struct rpc_clnt *clnt)
277 {
278         struct rpc_auth_create_args auth_args = {
279                 .pseudoflavor = args->authflavor,
280                 .target_name = args->client_name,
281         };
282         struct rpc_auth *auth;
283         struct net *net = rpc_net_ns(clnt);
284         struct super_block *pipefs_sb;
285         int err;
286
287         pipefs_sb = rpc_get_sb_net(net);
288         if (pipefs_sb) {
289                 err = rpc_setup_pipedir(pipefs_sb, clnt);
290                 if (err)
291                         goto out;
292         }
293
294         rpc_register_client(clnt);
295         if (pipefs_sb)
296                 rpc_put_sb_net(net);
297
298         auth = rpcauth_create(&auth_args, clnt);
299         if (IS_ERR(auth)) {
300                 dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
301                                 args->authflavor);
302                 err = PTR_ERR(auth);
303                 goto err_auth;
304         }
305         return 0;
306 err_auth:
307         pipefs_sb = rpc_get_sb_net(net);
308         rpc_unregister_client(clnt);
309         __rpc_clnt_remove_pipedir(clnt);
310 out:
311         if (pipefs_sb)
312                 rpc_put_sb_net(net);
313         return err;
314 }
315
316 static DEFINE_IDA(rpc_clids);
317
318 static int rpc_alloc_clid(struct rpc_clnt *clnt)
319 {
320         int clid;
321
322         clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
323         if (clid < 0)
324                 return clid;
325         clnt->cl_clid = clid;
326         return 0;
327 }
328
329 static void rpc_free_clid(struct rpc_clnt *clnt)
330 {
331         ida_simple_remove(&rpc_clids, clnt->cl_clid);
332 }
333
334 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
335                 struct rpc_xprt *xprt,
336                 struct rpc_clnt *parent)
337 {
338         const struct rpc_program *program = args->program;
339         const struct rpc_version *version;
340         struct rpc_clnt         *clnt = NULL;
341         int err;
342
343         /* sanity check the name before trying to print it */
344         dprintk("RPC:       creating %s client for %s (xprt %p)\n",
345                         program->name, args->servername, xprt);
346
347         err = rpciod_up();
348         if (err)
349                 goto out_no_rpciod;
350
351         err = -EINVAL;
352         if (args->version >= program->nrvers)
353                 goto out_err;
354         version = program->version[args->version];
355         if (version == NULL)
356                 goto out_err;
357
358         err = -ENOMEM;
359         clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
360         if (!clnt)
361                 goto out_err;
362         clnt->cl_parent = parent ? : clnt;
363
364         err = rpc_alloc_clid(clnt);
365         if (err)
366                 goto out_no_clid;
367
368         rcu_assign_pointer(clnt->cl_xprt, xprt);
369         clnt->cl_procinfo = version->procs;
370         clnt->cl_maxproc  = version->nrprocs;
371         clnt->cl_prog     = args->prognumber ? : program->number;
372         clnt->cl_vers     = version->number;
373         clnt->cl_stats    = program->stats;
374         clnt->cl_metrics  = rpc_alloc_iostats(clnt);
375         rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
376         err = -ENOMEM;
377         if (clnt->cl_metrics == NULL)
378                 goto out_no_stats;
379         clnt->cl_program  = program;
380         INIT_LIST_HEAD(&clnt->cl_tasks);
381         spin_lock_init(&clnt->cl_lock);
382
383         if (!xprt_bound(xprt))
384                 clnt->cl_autobind = 1;
385
386         clnt->cl_timeout = xprt->timeout;
387         if (args->timeout != NULL) {
388                 memcpy(&clnt->cl_timeout_default, args->timeout,
389                                 sizeof(clnt->cl_timeout_default));
390                 clnt->cl_timeout = &clnt->cl_timeout_default;
391         }
392
393         clnt->cl_rtt = &clnt->cl_rtt_default;
394         rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
395
396         atomic_set(&clnt->cl_count, 1);
397
398         /* save the nodename */
399         rpc_clnt_set_nodename(clnt, utsname()->nodename);
400
401         err = rpc_client_register(args, clnt);
402         if (err)
403                 goto out_no_path;
404         if (parent)
405                 atomic_inc(&parent->cl_count);
406         return clnt;
407
408 out_no_path:
409         rpc_free_iostats(clnt->cl_metrics);
410 out_no_stats:
411         rpc_free_clid(clnt);
412 out_no_clid:
413         kfree(clnt);
414 out_err:
415         rpciod_down();
416 out_no_rpciod:
417         xprt_put(xprt);
418         return ERR_PTR(err);
419 }
420
421 /**
422  * rpc_create - create an RPC client and transport with one call
423  * @args: rpc_clnt create argument structure
424  *
425  * Creates and initializes an RPC transport and an RPC client.
426  *
427  * It can ping the server in order to determine if it is up, and to see if
428  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
429  * this behavior so asynchronous tasks can also use rpc_create.
430  */
431 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
432 {
433         struct rpc_xprt *xprt;
434         struct rpc_clnt *clnt;
435         struct xprt_create xprtargs = {
436                 .net = args->net,
437                 .ident = args->protocol,
438                 .srcaddr = args->saddress,
439                 .dstaddr = args->address,
440                 .addrlen = args->addrsize,
441                 .servername = args->servername,
442                 .bc_xprt = args->bc_xprt,
443         };
444         char servername[48];
445
446         if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
447                 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
448         if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
449                 xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
450         /*
451          * If the caller chooses not to specify a hostname, whip
452          * up a string representation of the passed-in address.
453          */
454         if (xprtargs.servername == NULL) {
455                 struct sockaddr_un *sun =
456                                 (struct sockaddr_un *)args->address;
457                 struct sockaddr_in *sin =
458                                 (struct sockaddr_in *)args->address;
459                 struct sockaddr_in6 *sin6 =
460                                 (struct sockaddr_in6 *)args->address;
461
462                 servername[0] = '\0';
463                 switch (args->address->sa_family) {
464                 case AF_LOCAL:
465                         snprintf(servername, sizeof(servername), "%s",
466                                  sun->sun_path);
467                         break;
468                 case AF_INET:
469                         snprintf(servername, sizeof(servername), "%pI4",
470                                  &sin->sin_addr.s_addr);
471                         break;
472                 case AF_INET6:
473                         snprintf(servername, sizeof(servername), "%pI6",
474                                  &sin6->sin6_addr);
475                         break;
476                 default:
477                         /* caller wants default server name, but
478                          * address family isn't recognized. */
479                         return ERR_PTR(-EINVAL);
480                 }
481                 xprtargs.servername = servername;
482         }
483
484         xprt = xprt_create_transport(&xprtargs);
485         if (IS_ERR(xprt))
486                 return (struct rpc_clnt *)xprt;
487
488         /*
489          * By default, kernel RPC client connects from a reserved port.
490          * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
491          * but it is always enabled for rpciod, which handles the connect
492          * operation.
493          */
494         xprt->resvport = 1;
495         if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
496                 xprt->resvport = 0;
497
498         clnt = rpc_new_client(args, xprt, NULL);
499         if (IS_ERR(clnt))
500                 return clnt;
501
502         if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
503                 int err = rpc_ping(clnt);
504                 if (err != 0) {
505                         rpc_shutdown_client(clnt);
506                         return ERR_PTR(err);
507                 }
508         }
509
510         clnt->cl_softrtry = 1;
511         if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
512                 clnt->cl_softrtry = 0;
513
514         if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
515                 clnt->cl_autobind = 1;
516         if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
517                 clnt->cl_discrtry = 1;
518         if (!(args->flags & RPC_CLNT_CREATE_QUIET))
519                 clnt->cl_chatty = 1;
520
521         return clnt;
522 }
523 EXPORT_SYMBOL_GPL(rpc_create);
524
525 /*
526  * This function clones the RPC client structure. It allows us to share the
527  * same transport while varying parameters such as the authentication
528  * flavour.
529  */
530 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
531                                            struct rpc_clnt *clnt)
532 {
533         struct rpc_xprt *xprt;
534         struct rpc_clnt *new;
535         int err;
536
537         err = -ENOMEM;
538         rcu_read_lock();
539         xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
540         rcu_read_unlock();
541         if (xprt == NULL)
542                 goto out_err;
543         args->servername = xprt->servername;
544
545         new = rpc_new_client(args, xprt, clnt);
546         if (IS_ERR(new)) {
547                 err = PTR_ERR(new);
548                 goto out_err;
549         }
550
551         /* Turn off autobind on clones */
552         new->cl_autobind = 0;
553         new->cl_softrtry = clnt->cl_softrtry;
554         new->cl_discrtry = clnt->cl_discrtry;
555         new->cl_chatty = clnt->cl_chatty;
556         return new;
557
558 out_err:
559         dprintk("RPC:       %s: returned error %d\n", __func__, err);
560         return ERR_PTR(err);
561 }
562
563 /**
564  * rpc_clone_client - Clone an RPC client structure
565  *
566  * @clnt: RPC client whose parameters are copied
567  *
568  * Returns a fresh RPC client or an ERR_PTR.
569  */
570 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
571 {
572         struct rpc_create_args args = {
573                 .program        = clnt->cl_program,
574                 .prognumber     = clnt->cl_prog,
575                 .version        = clnt->cl_vers,
576                 .authflavor     = clnt->cl_auth->au_flavor,
577         };
578         return __rpc_clone_client(&args, clnt);
579 }
580 EXPORT_SYMBOL_GPL(rpc_clone_client);
581
582 /**
583  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
584  *
585  * @clnt: RPC client whose parameters are copied
586  * @flavor: security flavor for new client
587  *
588  * Returns a fresh RPC client or an ERR_PTR.
589  */
590 struct rpc_clnt *
591 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
592 {
593         struct rpc_create_args args = {
594                 .program        = clnt->cl_program,
595                 .prognumber     = clnt->cl_prog,
596                 .version        = clnt->cl_vers,
597                 .authflavor     = flavor,
598         };
599         return __rpc_clone_client(&args, clnt);
600 }
601 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
602
603 /*
604  * Kill all tasks for the given client.
605  * XXX: kill their descendants as well?
606  */
607 void rpc_killall_tasks(struct rpc_clnt *clnt)
608 {
609         struct rpc_task *rovr;
610
611
612         if (list_empty(&clnt->cl_tasks))
613                 return;
614         dprintk("RPC:       killing all tasks for client %p\n", clnt);
615         /*
616          * Spin lock all_tasks to prevent changes...
617          */
618         spin_lock(&clnt->cl_lock);
619         list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
620                 if (!RPC_IS_ACTIVATED(rovr))
621                         continue;
622                 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
623                         rovr->tk_flags |= RPC_TASK_KILLED;
624                         rpc_exit(rovr, -EIO);
625                         if (RPC_IS_QUEUED(rovr))
626                                 rpc_wake_up_queued_task(rovr->tk_waitqueue,
627                                                         rovr);
628                 }
629         }
630         spin_unlock(&clnt->cl_lock);
631 }
632 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
633
634 /*
635  * Properly shut down an RPC client, terminating all outstanding
636  * requests.
637  */
638 void rpc_shutdown_client(struct rpc_clnt *clnt)
639 {
640         might_sleep();
641
642         dprintk_rcu("RPC:       shutting down %s client for %s\n",
643                         clnt->cl_program->name,
644                         rcu_dereference(clnt->cl_xprt)->servername);
645
646         while (!list_empty(&clnt->cl_tasks)) {
647                 rpc_killall_tasks(clnt);
648                 wait_event_timeout(destroy_wait,
649                         list_empty(&clnt->cl_tasks), 1*HZ);
650         }
651
652         rpc_release_client(clnt);
653 }
654 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
655
656 /*
657  * Free an RPC client
658  */
659 static void
660 rpc_free_client(struct rpc_clnt *clnt)
661 {
662         dprintk_rcu("RPC:       destroying %s client for %s\n",
663                         clnt->cl_program->name,
664                         rcu_dereference(clnt->cl_xprt)->servername);
665         if (clnt->cl_parent != clnt)
666                 rpc_release_client(clnt->cl_parent);
667         rpc_clnt_remove_pipedir(clnt);
668         rpc_unregister_client(clnt);
669         rpc_free_iostats(clnt->cl_metrics);
670         clnt->cl_metrics = NULL;
671         xprt_put(rcu_dereference_raw(clnt->cl_xprt));
672         rpciod_down();
673         rpc_free_clid(clnt);
674         kfree(clnt);
675 }
676
677 /*
678  * Free an RPC client
679  */
680 static void
681 rpc_free_auth(struct rpc_clnt *clnt)
682 {
683         if (clnt->cl_auth == NULL) {
684                 rpc_free_client(clnt);
685                 return;
686         }
687
688         /*
689          * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
690          *       release remaining GSS contexts. This mechanism ensures
691          *       that it can do so safely.
692          */
693         atomic_inc(&clnt->cl_count);
694         rpcauth_release(clnt->cl_auth);
695         clnt->cl_auth = NULL;
696         if (atomic_dec_and_test(&clnt->cl_count))
697                 rpc_free_client(clnt);
698 }
699
700 /*
701  * Release reference to the RPC client
702  */
703 void
704 rpc_release_client(struct rpc_clnt *clnt)
705 {
706         dprintk("RPC:       rpc_release_client(%p)\n", clnt);
707
708         if (list_empty(&clnt->cl_tasks))
709                 wake_up(&destroy_wait);
710         if (atomic_dec_and_test(&clnt->cl_count))
711                 rpc_free_auth(clnt);
712 }
713 EXPORT_SYMBOL_GPL(rpc_release_client);
714
715 /**
716  * rpc_bind_new_program - bind a new RPC program to an existing client
717  * @old: old rpc_client
718  * @program: rpc program to set
719  * @vers: rpc program version
720  *
721  * Clones the rpc client and sets up a new RPC program. This is mainly
722  * of use for enabling different RPC programs to share the same transport.
723  * The Sun NFSv2/v3 ACL protocol can do this.
724  */
725 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
726                                       const struct rpc_program *program,
727                                       u32 vers)
728 {
729         struct rpc_create_args args = {
730                 .program        = program,
731                 .prognumber     = program->number,
732                 .version        = vers,
733                 .authflavor     = old->cl_auth->au_flavor,
734         };
735         struct rpc_clnt *clnt;
736         int err;
737
738         clnt = __rpc_clone_client(&args, old);
739         if (IS_ERR(clnt))
740                 goto out;
741         err = rpc_ping(clnt);
742         if (err != 0) {
743                 rpc_shutdown_client(clnt);
744                 clnt = ERR_PTR(err);
745         }
746 out:
747         return clnt;
748 }
749 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
750
751 void rpc_task_release_client(struct rpc_task *task)
752 {
753         struct rpc_clnt *clnt = task->tk_client;
754
755         if (clnt != NULL) {
756                 /* Remove from client task list */
757                 spin_lock(&clnt->cl_lock);
758                 list_del(&task->tk_task);
759                 spin_unlock(&clnt->cl_lock);
760                 task->tk_client = NULL;
761
762                 rpc_release_client(clnt);
763         }
764 }
765
766 static
767 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
768 {
769         if (clnt != NULL) {
770                 rpc_task_release_client(task);
771                 task->tk_client = clnt;
772                 atomic_inc(&clnt->cl_count);
773                 if (clnt->cl_softrtry)
774                         task->tk_flags |= RPC_TASK_SOFT;
775                 if (clnt->cl_noretranstimeo)
776                         task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
777                 if (sk_memalloc_socks()) {
778                         struct rpc_xprt *xprt;
779
780                         rcu_read_lock();
781                         xprt = rcu_dereference(clnt->cl_xprt);
782                         if (xprt->swapper)
783                                 task->tk_flags |= RPC_TASK_SWAPPER;
784                         rcu_read_unlock();
785                 }
786                 /* Add to the client's list of all tasks */
787                 spin_lock(&clnt->cl_lock);
788                 list_add_tail(&task->tk_task, &clnt->cl_tasks);
789                 spin_unlock(&clnt->cl_lock);
790         }
791 }
792
793 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
794 {
795         rpc_task_release_client(task);
796         rpc_task_set_client(task, clnt);
797 }
798 EXPORT_SYMBOL_GPL(rpc_task_reset_client);
799
800
801 static void
802 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
803 {
804         if (msg != NULL) {
805                 task->tk_msg.rpc_proc = msg->rpc_proc;
806                 task->tk_msg.rpc_argp = msg->rpc_argp;
807                 task->tk_msg.rpc_resp = msg->rpc_resp;
808                 if (msg->rpc_cred != NULL)
809                         task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
810         }
811 }
812
813 /*
814  * Default callback for async RPC calls
815  */
816 static void
817 rpc_default_callback(struct rpc_task *task, void *data)
818 {
819 }
820
821 static const struct rpc_call_ops rpc_default_ops = {
822         .rpc_call_done = rpc_default_callback,
823 };
824
825 /**
826  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
827  * @task_setup_data: pointer to task initialisation data
828  */
829 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
830 {
831         struct rpc_task *task;
832
833         task = rpc_new_task(task_setup_data);
834         if (IS_ERR(task))
835                 goto out;
836
837         rpc_task_set_client(task, task_setup_data->rpc_client);
838         rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
839
840         if (task->tk_action == NULL)
841                 rpc_call_start(task);
842
843         atomic_inc(&task->tk_count);
844         rpc_execute(task);
845 out:
846         return task;
847 }
848 EXPORT_SYMBOL_GPL(rpc_run_task);
849
850 /**
851  * rpc_call_sync - Perform a synchronous RPC call
852  * @clnt: pointer to RPC client
853  * @msg: RPC call parameters
854  * @flags: RPC call flags
855  */
856 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
857 {
858         struct rpc_task *task;
859         struct rpc_task_setup task_setup_data = {
860                 .rpc_client = clnt,
861                 .rpc_message = msg,
862                 .callback_ops = &rpc_default_ops,
863                 .flags = flags,
864         };
865         int status;
866
867         WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
868         if (flags & RPC_TASK_ASYNC) {
869                 rpc_release_calldata(task_setup_data.callback_ops,
870                         task_setup_data.callback_data);
871                 return -EINVAL;
872         }
873
874         task = rpc_run_task(&task_setup_data);
875         if (IS_ERR(task))
876                 return PTR_ERR(task);
877         status = task->tk_status;
878         rpc_put_task(task);
879         return status;
880 }
881 EXPORT_SYMBOL_GPL(rpc_call_sync);
882
883 /**
884  * rpc_call_async - Perform an asynchronous RPC call
885  * @clnt: pointer to RPC client
886  * @msg: RPC call parameters
887  * @flags: RPC call flags
888  * @tk_ops: RPC call ops
889  * @data: user call data
890  */
891 int
892 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
893                const struct rpc_call_ops *tk_ops, void *data)
894 {
895         struct rpc_task *task;
896         struct rpc_task_setup task_setup_data = {
897                 .rpc_client = clnt,
898                 .rpc_message = msg,
899                 .callback_ops = tk_ops,
900                 .callback_data = data,
901                 .flags = flags|RPC_TASK_ASYNC,
902         };
903
904         task = rpc_run_task(&task_setup_data);
905         if (IS_ERR(task))
906                 return PTR_ERR(task);
907         rpc_put_task(task);
908         return 0;
909 }
910 EXPORT_SYMBOL_GPL(rpc_call_async);
911
912 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
913 /**
914  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
915  * rpc_execute against it
916  * @req: RPC request
917  * @tk_ops: RPC call ops
918  */
919 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
920                                 const struct rpc_call_ops *tk_ops)
921 {
922         struct rpc_task *task;
923         struct xdr_buf *xbufp = &req->rq_snd_buf;
924         struct rpc_task_setup task_setup_data = {
925                 .callback_ops = tk_ops,
926         };
927
928         dprintk("RPC: rpc_run_bc_task req= %p\n", req);
929         /*
930          * Create an rpc_task to send the data
931          */
932         task = rpc_new_task(&task_setup_data);
933         if (IS_ERR(task)) {
934                 xprt_free_bc_request(req);
935                 goto out;
936         }
937         task->tk_rqstp = req;
938
939         /*
940          * Set up the xdr_buf length.
941          * This also indicates that the buffer is XDR encoded already.
942          */
943         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
944                         xbufp->tail[0].iov_len;
945
946         task->tk_action = call_bc_transmit;
947         atomic_inc(&task->tk_count);
948         WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
949         rpc_execute(task);
950
951 out:
952         dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
953         return task;
954 }
955 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
956
957 void
958 rpc_call_start(struct rpc_task *task)
959 {
960         task->tk_action = call_start;
961 }
962 EXPORT_SYMBOL_GPL(rpc_call_start);
963
964 /**
965  * rpc_peeraddr - extract remote peer address from clnt's xprt
966  * @clnt: RPC client structure
967  * @buf: target buffer
968  * @bufsize: length of target buffer
969  *
970  * Returns the number of bytes that are actually in the stored address.
971  */
972 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
973 {
974         size_t bytes;
975         struct rpc_xprt *xprt;
976
977         rcu_read_lock();
978         xprt = rcu_dereference(clnt->cl_xprt);
979
980         bytes = xprt->addrlen;
981         if (bytes > bufsize)
982                 bytes = bufsize;
983         memcpy(buf, &xprt->addr, bytes);
984         rcu_read_unlock();
985
986         return bytes;
987 }
988 EXPORT_SYMBOL_GPL(rpc_peeraddr);
989
990 /**
991  * rpc_peeraddr2str - return remote peer address in printable format
992  * @clnt: RPC client structure
993  * @format: address format
994  *
995  * NB: the lifetime of the memory referenced by the returned pointer is
996  * the same as the rpc_xprt itself.  As long as the caller uses this
997  * pointer, it must hold the RCU read lock.
998  */
999 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
1000                              enum rpc_display_format_t format)
1001 {
1002         struct rpc_xprt *xprt;
1003
1004         xprt = rcu_dereference(clnt->cl_xprt);
1005
1006         if (xprt->address_strings[format] != NULL)
1007                 return xprt->address_strings[format];
1008         else
1009                 return "unprintable";
1010 }
1011 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
1012
1013 static const struct sockaddr_in rpc_inaddr_loopback = {
1014         .sin_family             = AF_INET,
1015         .sin_addr.s_addr        = htonl(INADDR_ANY),
1016 };
1017
1018 static const struct sockaddr_in6 rpc_in6addr_loopback = {
1019         .sin6_family            = AF_INET6,
1020         .sin6_addr              = IN6ADDR_ANY_INIT,
1021 };
1022
1023 /*
1024  * Try a getsockname() on a connected datagram socket.  Using a
1025  * connected datagram socket prevents leaving a socket in TIME_WAIT.
1026  * This conserves the ephemeral port number space.
1027  *
1028  * Returns zero and fills in "buf" if successful; otherwise, a
1029  * negative errno is returned.
1030  */
1031 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
1032                         struct sockaddr *buf, int buflen)
1033 {
1034         struct socket *sock;
1035         int err;
1036
1037         err = __sock_create(net, sap->sa_family,
1038                                 SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1039         if (err < 0) {
1040                 dprintk("RPC:       can't create UDP socket (%d)\n", err);
1041                 goto out;
1042         }
1043
1044         switch (sap->sa_family) {
1045         case AF_INET:
1046                 err = kernel_bind(sock,
1047                                 (struct sockaddr *)&rpc_inaddr_loopback,
1048                                 sizeof(rpc_inaddr_loopback));
1049                 break;
1050         case AF_INET6:
1051                 err = kernel_bind(sock,
1052                                 (struct sockaddr *)&rpc_in6addr_loopback,
1053                                 sizeof(rpc_in6addr_loopback));
1054                 break;
1055         default:
1056                 err = -EAFNOSUPPORT;
1057                 goto out;
1058         }
1059         if (err < 0) {
1060                 dprintk("RPC:       can't bind UDP socket (%d)\n", err);
1061                 goto out_release;
1062         }
1063
1064         err = kernel_connect(sock, sap, salen, 0);
1065         if (err < 0) {
1066                 dprintk("RPC:       can't connect UDP socket (%d)\n", err);
1067                 goto out_release;
1068         }
1069
1070         err = kernel_getsockname(sock, buf, &buflen);
1071         if (err < 0) {
1072                 dprintk("RPC:       getsockname failed (%d)\n", err);
1073                 goto out_release;
1074         }
1075
1076         err = 0;
1077         if (buf->sa_family == AF_INET6) {
1078                 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1079                 sin6->sin6_scope_id = 0;
1080         }
1081         dprintk("RPC:       %s succeeded\n", __func__);
1082
1083 out_release:
1084         sock_release(sock);
1085 out:
1086         return err;
1087 }
1088
1089 /*
1090  * Scraping a connected socket failed, so we don't have a useable
1091  * local address.  Fallback: generate an address that will prevent
1092  * the server from calling us back.
1093  *
1094  * Returns zero and fills in "buf" if successful; otherwise, a
1095  * negative errno is returned.
1096  */
1097 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1098 {
1099         switch (family) {
1100         case AF_INET:
1101                 if (buflen < sizeof(rpc_inaddr_loopback))
1102                         return -EINVAL;
1103                 memcpy(buf, &rpc_inaddr_loopback,
1104                                 sizeof(rpc_inaddr_loopback));
1105                 break;
1106         case AF_INET6:
1107                 if (buflen < sizeof(rpc_in6addr_loopback))
1108                         return -EINVAL;
1109                 memcpy(buf, &rpc_in6addr_loopback,
1110                                 sizeof(rpc_in6addr_loopback));
1111         default:
1112                 dprintk("RPC:       %s: address family not supported\n",
1113                         __func__);
1114                 return -EAFNOSUPPORT;
1115         }
1116         dprintk("RPC:       %s: succeeded\n", __func__);
1117         return 0;
1118 }
1119
1120 /**
1121  * rpc_localaddr - discover local endpoint address for an RPC client
1122  * @clnt: RPC client structure
1123  * @buf: target buffer
1124  * @buflen: size of target buffer, in bytes
1125  *
1126  * Returns zero and fills in "buf" and "buflen" if successful;
1127  * otherwise, a negative errno is returned.
1128  *
1129  * This works even if the underlying transport is not currently connected,
1130  * or if the upper layer never previously provided a source address.
1131  *
1132  * The result of this function call is transient: multiple calls in
1133  * succession may give different results, depending on how local
1134  * networking configuration changes over time.
1135  */
1136 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1137 {
1138         struct sockaddr_storage address;
1139         struct sockaddr *sap = (struct sockaddr *)&address;
1140         struct rpc_xprt *xprt;
1141         struct net *net;
1142         size_t salen;
1143         int err;
1144
1145         rcu_read_lock();
1146         xprt = rcu_dereference(clnt->cl_xprt);
1147         salen = xprt->addrlen;
1148         memcpy(sap, &xprt->addr, salen);
1149         net = get_net(xprt->xprt_net);
1150         rcu_read_unlock();
1151
1152         rpc_set_port(sap, 0);
1153         err = rpc_sockname(net, sap, salen, buf, buflen);
1154         put_net(net);
1155         if (err != 0)
1156                 /* Couldn't discover local address, return ANYADDR */
1157                 return rpc_anyaddr(sap->sa_family, buf, buflen);
1158         return 0;
1159 }
1160 EXPORT_SYMBOL_GPL(rpc_localaddr);
1161
1162 void
1163 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1164 {
1165         struct rpc_xprt *xprt;
1166
1167         rcu_read_lock();
1168         xprt = rcu_dereference(clnt->cl_xprt);
1169         if (xprt->ops->set_buffer_size)
1170                 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1171         rcu_read_unlock();
1172 }
1173 EXPORT_SYMBOL_GPL(rpc_setbufsize);
1174
1175 /**
1176  * rpc_protocol - Get transport protocol number for an RPC client
1177  * @clnt: RPC client to query
1178  *
1179  */
1180 int rpc_protocol(struct rpc_clnt *clnt)
1181 {
1182         int protocol;
1183
1184         rcu_read_lock();
1185         protocol = rcu_dereference(clnt->cl_xprt)->prot;
1186         rcu_read_unlock();
1187         return protocol;
1188 }
1189 EXPORT_SYMBOL_GPL(rpc_protocol);
1190
1191 /**
1192  * rpc_net_ns - Get the network namespace for this RPC client
1193  * @clnt: RPC client to query
1194  *
1195  */
1196 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1197 {
1198         struct net *ret;
1199
1200         rcu_read_lock();
1201         ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1202         rcu_read_unlock();
1203         return ret;
1204 }
1205 EXPORT_SYMBOL_GPL(rpc_net_ns);
1206
1207 /**
1208  * rpc_max_payload - Get maximum payload size for a transport, in bytes
1209  * @clnt: RPC client to query
1210  *
1211  * For stream transports, this is one RPC record fragment (see RFC
1212  * 1831), as we don't support multi-record requests yet.  For datagram
1213  * transports, this is the size of an IP packet minus the IP, UDP, and
1214  * RPC header sizes.
1215  */
1216 size_t rpc_max_payload(struct rpc_clnt *clnt)
1217 {
1218         size_t ret;
1219
1220         rcu_read_lock();
1221         ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1222         rcu_read_unlock();
1223         return ret;
1224 }
1225 EXPORT_SYMBOL_GPL(rpc_max_payload);
1226
1227 /**
1228  * rpc_get_timeout - Get timeout for transport in units of HZ
1229  * @clnt: RPC client to query
1230  */
1231 unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
1232 {
1233         unsigned long ret;
1234
1235         rcu_read_lock();
1236         ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
1237         rcu_read_unlock();
1238         return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(rpc_get_timeout);
1241
1242 /**
1243  * rpc_force_rebind - force transport to check that remote port is unchanged
1244  * @clnt: client to rebind
1245  *
1246  */
1247 void rpc_force_rebind(struct rpc_clnt *clnt)
1248 {
1249         if (clnt->cl_autobind) {
1250                 rcu_read_lock();
1251                 xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1252                 rcu_read_unlock();
1253         }
1254 }
1255 EXPORT_SYMBOL_GPL(rpc_force_rebind);
1256
1257 /*
1258  * Restart an (async) RPC call from the call_prepare state.
1259  * Usually called from within the exit handler.
1260  */
1261 int
1262 rpc_restart_call_prepare(struct rpc_task *task)
1263 {
1264         if (RPC_ASSASSINATED(task))
1265                 return 0;
1266         task->tk_action = call_start;
1267         if (task->tk_ops->rpc_call_prepare != NULL)
1268                 task->tk_action = rpc_prepare_task;
1269         return 1;
1270 }
1271 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
1272
1273 /*
1274  * Restart an (async) RPC call. Usually called from within the
1275  * exit handler.
1276  */
1277 int
1278 rpc_restart_call(struct rpc_task *task)
1279 {
1280         if (RPC_ASSASSINATED(task))
1281                 return 0;
1282         task->tk_action = call_start;
1283         return 1;
1284 }
1285 EXPORT_SYMBOL_GPL(rpc_restart_call);
1286
1287 #ifdef RPC_DEBUG
1288 static const char *rpc_proc_name(const struct rpc_task *task)
1289 {
1290         const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1291
1292         if (proc) {
1293                 if (proc->p_name)
1294                         return proc->p_name;
1295                 else
1296                         return "NULL";
1297         } else
1298                 return "no proc";
1299 }
1300 #endif
1301
1302 /*
1303  * 0.  Initial state
1304  *
1305  *     Other FSM states can be visited zero or more times, but
1306  *     this state is visited exactly once for each RPC.
1307  */
1308 static void
1309 call_start(struct rpc_task *task)
1310 {
1311         struct rpc_clnt *clnt = task->tk_client;
1312
1313         dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1314                         clnt->cl_program->name, clnt->cl_vers,
1315                         rpc_proc_name(task),
1316                         (RPC_IS_ASYNC(task) ? "async" : "sync"));
1317
1318         /* Increment call count */
1319         task->tk_msg.rpc_proc->p_count++;
1320         clnt->cl_stats->rpccnt++;
1321         task->tk_action = call_reserve;
1322 }
1323
1324 /*
1325  * 1.   Reserve an RPC call slot
1326  */
1327 static void
1328 call_reserve(struct rpc_task *task)
1329 {
1330         dprint_status(task);
1331
1332         task->tk_status  = 0;
1333         task->tk_action  = call_reserveresult;
1334         xprt_reserve(task);
1335 }
1336
1337 static void call_retry_reserve(struct rpc_task *task);
1338
1339 /*
1340  * 1b.  Grok the result of xprt_reserve()
1341  */
1342 static void
1343 call_reserveresult(struct rpc_task *task)
1344 {
1345         int status = task->tk_status;
1346
1347         dprint_status(task);
1348
1349         /*
1350          * After a call to xprt_reserve(), we must have either
1351          * a request slot or else an error status.
1352          */
1353         task->tk_status = 0;
1354         if (status >= 0) {
1355                 if (task->tk_rqstp) {
1356                         task->tk_action = call_refresh;
1357                         return;
1358                 }
1359
1360                 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1361                                 __func__, status);
1362                 rpc_exit(task, -EIO);
1363                 return;
1364         }
1365
1366         /*
1367          * Even though there was an error, we may have acquired
1368          * a request slot somehow.  Make sure not to leak it.
1369          */
1370         if (task->tk_rqstp) {
1371                 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1372                                 __func__, status);
1373                 xprt_release(task);
1374         }
1375
1376         switch (status) {
1377         case -ENOMEM:
1378                 rpc_delay(task, HZ >> 2);
1379         case -EAGAIN:   /* woken up; retry */
1380                 task->tk_action = call_retry_reserve;
1381                 return;
1382         case -EIO:      /* probably a shutdown */
1383                 break;
1384         default:
1385                 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1386                                 __func__, status);
1387                 break;
1388         }
1389         rpc_exit(task, status);
1390 }
1391
1392 /*
1393  * 1c.  Retry reserving an RPC call slot
1394  */
1395 static void
1396 call_retry_reserve(struct rpc_task *task)
1397 {
1398         dprint_status(task);
1399
1400         task->tk_status  = 0;
1401         task->tk_action  = call_reserveresult;
1402         xprt_retry_reserve(task);
1403 }
1404
1405 /*
1406  * 2.   Bind and/or refresh the credentials
1407  */
1408 static void
1409 call_refresh(struct rpc_task *task)
1410 {
1411         dprint_status(task);
1412
1413         task->tk_action = call_refreshresult;
1414         task->tk_status = 0;
1415         task->tk_client->cl_stats->rpcauthrefresh++;
1416         rpcauth_refreshcred(task);
1417 }
1418
1419 /*
1420  * 2a.  Process the results of a credential refresh
1421  */
1422 static void
1423 call_refreshresult(struct rpc_task *task)
1424 {
1425         int status = task->tk_status;
1426
1427         dprint_status(task);
1428
1429         task->tk_status = 0;
1430         task->tk_action = call_refresh;
1431         switch (status) {
1432         case 0:
1433                 if (rpcauth_uptodatecred(task))
1434                         task->tk_action = call_allocate;
1435                 return;
1436         case -ETIMEDOUT:
1437                 rpc_delay(task, 3*HZ);
1438         case -EAGAIN:
1439                 status = -EACCES;
1440         case -EKEYEXPIRED:
1441                 if (!task->tk_cred_retry)
1442                         break;
1443                 task->tk_cred_retry--;
1444                 dprintk("RPC: %5u %s: retry refresh creds\n",
1445                                 task->tk_pid, __func__);
1446                 return;
1447         }
1448         dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1449                                 task->tk_pid, __func__, status);
1450         rpc_exit(task, status);
1451 }
1452
1453 /*
1454  * 2b.  Allocate the buffer. For details, see sched.c:rpc_malloc.
1455  *      (Note: buffer memory is freed in xprt_release).
1456  */
1457 static void
1458 call_allocate(struct rpc_task *task)
1459 {
1460         unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1461         struct rpc_rqst *req = task->tk_rqstp;
1462         struct rpc_xprt *xprt = req->rq_xprt;
1463         struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1464
1465         dprint_status(task);
1466
1467         task->tk_status = 0;
1468         task->tk_action = call_bind;
1469
1470         if (req->rq_buffer)
1471                 return;
1472
1473         if (proc->p_proc != 0) {
1474                 BUG_ON(proc->p_arglen == 0);
1475                 if (proc->p_decode != NULL)
1476                         BUG_ON(proc->p_replen == 0);
1477         }
1478
1479         /*
1480          * Calculate the size (in quads) of the RPC call
1481          * and reply headers, and convert both values
1482          * to byte sizes.
1483          */
1484         req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1485         req->rq_callsize <<= 2;
1486         req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1487         req->rq_rcvsize <<= 2;
1488
1489         req->rq_buffer = xprt->ops->buf_alloc(task,
1490                                         req->rq_callsize + req->rq_rcvsize);
1491         if (req->rq_buffer != NULL)
1492                 return;
1493
1494         dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1495
1496         if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1497                 task->tk_action = call_allocate;
1498                 rpc_delay(task, HZ>>4);
1499                 return;
1500         }
1501
1502         rpc_exit(task, -ERESTARTSYS);
1503 }
1504
1505 static inline int
1506 rpc_task_need_encode(struct rpc_task *task)
1507 {
1508         return task->tk_rqstp->rq_snd_buf.len == 0;
1509 }
1510
1511 static inline void
1512 rpc_task_force_reencode(struct rpc_task *task)
1513 {
1514         task->tk_rqstp->rq_snd_buf.len = 0;
1515         task->tk_rqstp->rq_bytes_sent = 0;
1516 }
1517
1518 static inline void
1519 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1520 {
1521         buf->head[0].iov_base = start;
1522         buf->head[0].iov_len = len;
1523         buf->tail[0].iov_len = 0;
1524         buf->page_len = 0;
1525         buf->flags = 0;
1526         buf->len = 0;
1527         buf->buflen = len;
1528 }
1529
1530 /*
1531  * 3.   Encode arguments of an RPC call
1532  */
1533 static void
1534 rpc_xdr_encode(struct rpc_task *task)
1535 {
1536         struct rpc_rqst *req = task->tk_rqstp;
1537         kxdreproc_t     encode;
1538         __be32          *p;
1539
1540         dprint_status(task);
1541
1542         rpc_xdr_buf_init(&req->rq_snd_buf,
1543                          req->rq_buffer,
1544                          req->rq_callsize);
1545         rpc_xdr_buf_init(&req->rq_rcv_buf,
1546                          (char *)req->rq_buffer + req->rq_callsize,
1547                          req->rq_rcvsize);
1548
1549         p = rpc_encode_header(task);
1550         if (p == NULL) {
1551                 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1552                 rpc_exit(task, -EIO);
1553                 return;
1554         }
1555
1556         encode = task->tk_msg.rpc_proc->p_encode;
1557         if (encode == NULL)
1558                 return;
1559
1560         task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1561                         task->tk_msg.rpc_argp);
1562 }
1563
1564 /*
1565  * 4.   Get the server port number if not yet set
1566  */
1567 static void
1568 call_bind(struct rpc_task *task)
1569 {
1570         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1571
1572         dprint_status(task);
1573
1574         task->tk_action = call_connect;
1575         if (!xprt_bound(xprt)) {
1576                 task->tk_action = call_bind_status;
1577                 task->tk_timeout = xprt->bind_timeout;
1578                 xprt->ops->rpcbind(task);
1579         }
1580 }
1581
1582 /*
1583  * 4a.  Sort out bind result
1584  */
1585 static void
1586 call_bind_status(struct rpc_task *task)
1587 {
1588         int status = -EIO;
1589
1590         if (task->tk_status >= 0) {
1591                 dprint_status(task);
1592                 task->tk_status = 0;
1593                 task->tk_action = call_connect;
1594                 return;
1595         }
1596
1597         trace_rpc_bind_status(task);
1598         switch (task->tk_status) {
1599         case -ENOMEM:
1600                 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1601                 rpc_delay(task, HZ >> 2);
1602                 goto retry_timeout;
1603         case -EACCES:
1604                 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1605                                 "unavailable\n", task->tk_pid);
1606                 /* fail immediately if this is an RPC ping */
1607                 if (task->tk_msg.rpc_proc->p_proc == 0) {
1608                         status = -EOPNOTSUPP;
1609                         break;
1610                 }
1611                 if (task->tk_rebind_retry == 0)
1612                         break;
1613                 task->tk_rebind_retry--;
1614                 rpc_delay(task, 3*HZ);
1615                 goto retry_timeout;
1616         case -ETIMEDOUT:
1617                 dprintk("RPC: %5u rpcbind request timed out\n",
1618                                 task->tk_pid);
1619                 goto retry_timeout;
1620         case -EPFNOSUPPORT:
1621                 /* server doesn't support any rpcbind version we know of */
1622                 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1623                                 task->tk_pid);
1624                 break;
1625         case -EPROTONOSUPPORT:
1626                 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1627                                 task->tk_pid);
1628                 task->tk_status = 0;
1629                 task->tk_action = call_bind;
1630                 return;
1631         case -ECONNREFUSED:             /* connection problems */
1632         case -ECONNRESET:
1633         case -ENOTCONN:
1634         case -EHOSTDOWN:
1635         case -EHOSTUNREACH:
1636         case -ENETUNREACH:
1637         case -EPIPE:
1638                 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1639                                 task->tk_pid, task->tk_status);
1640                 if (!RPC_IS_SOFTCONN(task)) {
1641                         rpc_delay(task, 5*HZ);
1642                         goto retry_timeout;
1643                 }
1644                 status = task->tk_status;
1645                 break;
1646         default:
1647                 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1648                                 task->tk_pid, -task->tk_status);
1649         }
1650
1651         rpc_exit(task, status);
1652         return;
1653
1654 retry_timeout:
1655         task->tk_action = call_timeout;
1656 }
1657
1658 /*
1659  * 4b.  Connect to the RPC server
1660  */
1661 static void
1662 call_connect(struct rpc_task *task)
1663 {
1664         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1665
1666         dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1667                         task->tk_pid, xprt,
1668                         (xprt_connected(xprt) ? "is" : "is not"));
1669
1670         task->tk_action = call_transmit;
1671         if (!xprt_connected(xprt)) {
1672                 task->tk_action = call_connect_status;
1673                 if (task->tk_status < 0)
1674                         return;
1675                 if (task->tk_flags & RPC_TASK_NOCONNECT) {
1676                         rpc_exit(task, -ENOTCONN);
1677                         return;
1678                 }
1679                 xprt_connect(task);
1680         }
1681 }
1682
1683 /*
1684  * 4c.  Sort out connect result
1685  */
1686 static void
1687 call_connect_status(struct rpc_task *task)
1688 {
1689         struct rpc_clnt *clnt = task->tk_client;
1690         int status = task->tk_status;
1691
1692         dprint_status(task);
1693
1694         trace_rpc_connect_status(task, status);
1695         switch (status) {
1696                 /* if soft mounted, test if we've timed out */
1697         case -ETIMEDOUT:
1698                 task->tk_action = call_timeout;
1699                 return;
1700         case -ECONNREFUSED:
1701         case -ECONNRESET:
1702         case -ENETUNREACH:
1703                 if (RPC_IS_SOFTCONN(task))
1704                         break;
1705                 /* retry with existing socket, after a delay */
1706         case 0:
1707         case -EAGAIN:
1708                 task->tk_status = 0;
1709                 clnt->cl_stats->netreconn++;
1710                 task->tk_action = call_transmit;
1711                 return;
1712         }
1713         rpc_exit(task, status);
1714 }
1715
1716 /*
1717  * 5.   Transmit the RPC request, and wait for reply
1718  */
1719 static void
1720 call_transmit(struct rpc_task *task)
1721 {
1722         int is_retrans = RPC_WAS_SENT(task);
1723
1724         dprint_status(task);
1725
1726         task->tk_action = call_status;
1727         if (task->tk_status < 0)
1728                 return;
1729         if (!xprt_prepare_transmit(task))
1730                 return;
1731         task->tk_action = call_transmit_status;
1732         /* Encode here so that rpcsec_gss can use correct sequence number. */
1733         if (rpc_task_need_encode(task)) {
1734                 rpc_xdr_encode(task);
1735                 /* Did the encode result in an error condition? */
1736                 if (task->tk_status != 0) {
1737                         /* Was the error nonfatal? */
1738                         if (task->tk_status == -EAGAIN)
1739                                 rpc_delay(task, HZ >> 4);
1740                         else
1741                                 rpc_exit(task, task->tk_status);
1742                         return;
1743                 }
1744         }
1745         xprt_transmit(task);
1746         if (task->tk_status < 0)
1747                 return;
1748         if (is_retrans)
1749                 task->tk_client->cl_stats->rpcretrans++;
1750         /*
1751          * On success, ensure that we call xprt_end_transmit() before sleeping
1752          * in order to allow access to the socket to other RPC requests.
1753          */
1754         call_transmit_status(task);
1755         if (rpc_reply_expected(task))
1756                 return;
1757         task->tk_action = rpc_exit_task;
1758         rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
1759 }
1760
1761 /*
1762  * 5a.  Handle cleanup after a transmission
1763  */
1764 static void
1765 call_transmit_status(struct rpc_task *task)
1766 {
1767         task->tk_action = call_status;
1768
1769         /*
1770          * Common case: success.  Force the compiler to put this
1771          * test first.
1772          */
1773         if (task->tk_status == 0) {
1774                 xprt_end_transmit(task);
1775                 rpc_task_force_reencode(task);
1776                 return;
1777         }
1778
1779         switch (task->tk_status) {
1780         case -EAGAIN:
1781                 break;
1782         default:
1783                 dprint_status(task);
1784                 xprt_end_transmit(task);
1785                 rpc_task_force_reencode(task);
1786                 break;
1787                 /*
1788                  * Special cases: if we've been waiting on the
1789                  * socket's write_space() callback, or if the
1790                  * socket just returned a connection error,
1791                  * then hold onto the transport lock.
1792                  */
1793         case -ECONNREFUSED:
1794         case -EHOSTDOWN:
1795         case -EHOSTUNREACH:
1796         case -ENETUNREACH:
1797                 if (RPC_IS_SOFTCONN(task)) {
1798                         xprt_end_transmit(task);
1799                         rpc_exit(task, task->tk_status);
1800                         break;
1801                 }
1802         case -ECONNRESET:
1803         case -ENOTCONN:
1804         case -EPIPE:
1805                 rpc_task_force_reencode(task);
1806         }
1807 }
1808
1809 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1810 /*
1811  * 5b.  Send the backchannel RPC reply.  On error, drop the reply.  In
1812  * addition, disconnect on connectivity errors.
1813  */
1814 static void
1815 call_bc_transmit(struct rpc_task *task)
1816 {
1817         struct rpc_rqst *req = task->tk_rqstp;
1818
1819         if (!xprt_prepare_transmit(task)) {
1820                 /*
1821                  * Could not reserve the transport. Try again after the
1822                  * transport is released.
1823                  */
1824                 task->tk_status = 0;
1825                 task->tk_action = call_bc_transmit;
1826                 return;
1827         }
1828
1829         task->tk_action = rpc_exit_task;
1830         if (task->tk_status < 0) {
1831                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1832                         "error: %d\n", task->tk_status);
1833                 return;
1834         }
1835
1836         xprt_transmit(task);
1837         xprt_end_transmit(task);
1838         dprint_status(task);
1839         switch (task->tk_status) {
1840         case 0:
1841                 /* Success */
1842                 break;
1843         case -EHOSTDOWN:
1844         case -EHOSTUNREACH:
1845         case -ENETUNREACH:
1846         case -ETIMEDOUT:
1847                 /*
1848                  * Problem reaching the server.  Disconnect and let the
1849                  * forechannel reestablish the connection.  The server will
1850                  * have to retransmit the backchannel request and we'll
1851                  * reprocess it.  Since these ops are idempotent, there's no
1852                  * need to cache our reply at this time.
1853                  */
1854                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1855                         "error: %d\n", task->tk_status);
1856                 xprt_conditional_disconnect(req->rq_xprt,
1857                         req->rq_connect_cookie);
1858                 break;
1859         default:
1860                 /*
1861                  * We were unable to reply and will have to drop the
1862                  * request.  The server should reconnect and retransmit.
1863                  */
1864                 WARN_ON_ONCE(task->tk_status == -EAGAIN);
1865                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1866                         "error: %d\n", task->tk_status);
1867                 break;
1868         }
1869         rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1870 }
1871 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1872
1873 /*
1874  * 6.   Sort out the RPC call status
1875  */
1876 static void
1877 call_status(struct rpc_task *task)
1878 {
1879         struct rpc_clnt *clnt = task->tk_client;
1880         struct rpc_rqst *req = task->tk_rqstp;
1881         int             status;
1882
1883         if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1884                 task->tk_status = req->rq_reply_bytes_recvd;
1885
1886         dprint_status(task);
1887
1888         status = task->tk_status;
1889         if (status >= 0) {
1890                 task->tk_action = call_decode;
1891                 return;
1892         }
1893
1894         trace_rpc_call_status(task);
1895         task->tk_status = 0;
1896         switch(status) {
1897         case -EHOSTDOWN:
1898         case -EHOSTUNREACH:
1899         case -ENETUNREACH:
1900                 /*
1901                  * Delay any retries for 3 seconds, then handle as if it
1902                  * were a timeout.
1903                  */
1904                 rpc_delay(task, 3*HZ);
1905         case -ETIMEDOUT:
1906                 task->tk_action = call_timeout;
1907                 if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
1908                     && task->tk_client->cl_discrtry)
1909                         xprt_conditional_disconnect(req->rq_xprt,
1910                                         req->rq_connect_cookie);
1911                 break;
1912         case -ECONNRESET:
1913         case -ECONNREFUSED:
1914                 rpc_force_rebind(clnt);
1915                 rpc_delay(task, 3*HZ);
1916         case -EPIPE:
1917         case -ENOTCONN:
1918                 task->tk_action = call_bind;
1919                 break;
1920         case -EAGAIN:
1921                 task->tk_action = call_transmit;
1922                 break;
1923         case -EIO:
1924                 /* shutdown or soft timeout */
1925                 rpc_exit(task, status);
1926                 break;
1927         default:
1928                 if (clnt->cl_chatty)
1929                         printk("%s: RPC call returned error %d\n",
1930                                clnt->cl_program->name, -status);
1931                 rpc_exit(task, status);
1932         }
1933 }
1934
1935 /*
1936  * 6a.  Handle RPC timeout
1937  *      We do not release the request slot, so we keep using the
1938  *      same XID for all retransmits.
1939  */
1940 static void
1941 call_timeout(struct rpc_task *task)
1942 {
1943         struct rpc_clnt *clnt = task->tk_client;
1944
1945         if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1946                 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1947                 goto retry;
1948         }
1949
1950         dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1951         task->tk_timeouts++;
1952
1953         if (RPC_IS_SOFTCONN(task)) {
1954                 rpc_exit(task, -ETIMEDOUT);
1955                 return;
1956         }
1957         if (RPC_IS_SOFT(task)) {
1958                 if (clnt->cl_chatty) {
1959                         rcu_read_lock();
1960                         printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1961                                 clnt->cl_program->name,
1962                                 rcu_dereference(clnt->cl_xprt)->servername);
1963                         rcu_read_unlock();
1964                 }
1965                 if (task->tk_flags & RPC_TASK_TIMEOUT)
1966                         rpc_exit(task, -ETIMEDOUT);
1967                 else
1968                         rpc_exit(task, -EIO);
1969                 return;
1970         }
1971
1972         if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1973                 task->tk_flags |= RPC_CALL_MAJORSEEN;
1974                 if (clnt->cl_chatty) {
1975                         rcu_read_lock();
1976                         printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1977                         clnt->cl_program->name,
1978                         rcu_dereference(clnt->cl_xprt)->servername);
1979                         rcu_read_unlock();
1980                 }
1981         }
1982         rpc_force_rebind(clnt);
1983         /*
1984          * Did our request time out due to an RPCSEC_GSS out-of-sequence
1985          * event? RFC2203 requires the server to drop all such requests.
1986          */
1987         rpcauth_invalcred(task);
1988
1989 retry:
1990         task->tk_action = call_bind;
1991         task->tk_status = 0;
1992 }
1993
1994 /*
1995  * 7.   Decode the RPC reply
1996  */
1997 static void
1998 call_decode(struct rpc_task *task)
1999 {
2000         struct rpc_clnt *clnt = task->tk_client;
2001         struct rpc_rqst *req = task->tk_rqstp;
2002         kxdrdproc_t     decode = task->tk_msg.rpc_proc->p_decode;
2003         __be32          *p;
2004
2005         dprint_status(task);
2006
2007         if (task->tk_flags & RPC_CALL_MAJORSEEN) {
2008                 if (clnt->cl_chatty) {
2009                         rcu_read_lock();
2010                         printk(KERN_NOTICE "%s: server %s OK\n",
2011                                 clnt->cl_program->name,
2012                                 rcu_dereference(clnt->cl_xprt)->servername);
2013                         rcu_read_unlock();
2014                 }
2015                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
2016         }
2017
2018         /*
2019          * Ensure that we see all writes made by xprt_complete_rqst()
2020          * before it changed req->rq_reply_bytes_recvd.
2021          */
2022         smp_rmb();
2023         req->rq_rcv_buf.len = req->rq_private_buf.len;
2024
2025         /* Check that the softirq receive buffer is valid */
2026         WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
2027                                 sizeof(req->rq_rcv_buf)) != 0);
2028
2029         if (req->rq_rcv_buf.len < 12) {
2030                 if (!RPC_IS_SOFT(task)) {
2031                         task->tk_action = call_bind;
2032                         goto out_retry;
2033                 }
2034                 dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
2035                                 clnt->cl_program->name, task->tk_status);
2036                 task->tk_action = call_timeout;
2037                 goto out_retry;
2038         }
2039
2040         p = rpc_verify_header(task);
2041         if (IS_ERR(p)) {
2042                 if (p == ERR_PTR(-EAGAIN))
2043                         goto out_retry;
2044                 return;
2045         }
2046
2047         task->tk_action = rpc_exit_task;
2048
2049         if (decode) {
2050                 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
2051                                                       task->tk_msg.rpc_resp);
2052         }
2053         dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
2054                         task->tk_status);
2055         return;
2056 out_retry:
2057         task->tk_status = 0;
2058         /* Note: rpc_verify_header() may have freed the RPC slot */
2059         if (task->tk_rqstp == req) {
2060                 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
2061                 if (task->tk_client->cl_discrtry)
2062                         xprt_conditional_disconnect(req->rq_xprt,
2063                                         req->rq_connect_cookie);
2064         }
2065 }
2066
2067 static __be32 *
2068 rpc_encode_header(struct rpc_task *task)
2069 {
2070         struct rpc_clnt *clnt = task->tk_client;
2071         struct rpc_rqst *req = task->tk_rqstp;
2072         __be32          *p = req->rq_svec[0].iov_base;
2073
2074         /* FIXME: check buffer size? */
2075
2076         p = xprt_skip_transport_header(req->rq_xprt, p);
2077         *p++ = req->rq_xid;             /* XID */
2078         *p++ = htonl(RPC_CALL);         /* CALL */
2079         *p++ = htonl(RPC_VERSION);      /* RPC version */
2080         *p++ = htonl(clnt->cl_prog);    /* program number */
2081         *p++ = htonl(clnt->cl_vers);    /* program version */
2082         *p++ = htonl(task->tk_msg.rpc_proc->p_proc);    /* procedure */
2083         p = rpcauth_marshcred(task, p);
2084         req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2085         return p;
2086 }
2087
2088 static __be32 *
2089 rpc_verify_header(struct rpc_task *task)
2090 {
2091         struct rpc_clnt *clnt = task->tk_client;
2092         struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2093         int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2094         __be32  *p = iov->iov_base;
2095         u32 n;
2096         int error = -EACCES;
2097
2098         if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2099                 /* RFC-1014 says that the representation of XDR data must be a
2100                  * multiple of four bytes
2101                  * - if it isn't pointer subtraction in the NFS client may give
2102                  *   undefined results
2103                  */
2104                 dprintk("RPC: %5u %s: XDR representation not a multiple of"
2105                        " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2106                        task->tk_rqstp->rq_rcv_buf.len);
2107                 error = -EIO;
2108                 goto out_err;
2109         }
2110         if ((len -= 3) < 0)
2111                 goto out_overflow;
2112
2113         p += 1; /* skip XID */
2114         if ((n = ntohl(*p++)) != RPC_REPLY) {
2115                 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2116                         task->tk_pid, __func__, n);
2117                 error = -EIO;
2118                 goto out_garbage;
2119         }
2120
2121         if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2122                 if (--len < 0)
2123                         goto out_overflow;
2124                 switch ((n = ntohl(*p++))) {
2125                 case RPC_AUTH_ERROR:
2126                         break;
2127                 case RPC_MISMATCH:
2128                         dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2129                                 task->tk_pid, __func__);
2130                         error = -EPROTONOSUPPORT;
2131                         goto out_err;
2132                 default:
2133                         dprintk("RPC: %5u %s: RPC call rejected, "
2134                                 "unknown error: %x\n",
2135                                 task->tk_pid, __func__, n);
2136                         error = -EIO;
2137                         goto out_err;
2138                 }
2139                 if (--len < 0)
2140                         goto out_overflow;
2141                 switch ((n = ntohl(*p++))) {
2142                 case RPC_AUTH_REJECTEDCRED:
2143                 case RPC_AUTH_REJECTEDVERF:
2144                 case RPCSEC_GSS_CREDPROBLEM:
2145                 case RPCSEC_GSS_CTXPROBLEM:
2146                         if (!task->tk_cred_retry)
2147                                 break;
2148                         task->tk_cred_retry--;
2149                         dprintk("RPC: %5u %s: retry stale creds\n",
2150                                         task->tk_pid, __func__);
2151                         rpcauth_invalcred(task);
2152                         /* Ensure we obtain a new XID! */
2153                         xprt_release(task);
2154                         task->tk_action = call_reserve;
2155                         goto out_retry;
2156                 case RPC_AUTH_BADCRED:
2157                 case RPC_AUTH_BADVERF:
2158                         /* possibly garbled cred/verf? */
2159                         if (!task->tk_garb_retry)
2160                                 break;
2161                         task->tk_garb_retry--;
2162                         dprintk("RPC: %5u %s: retry garbled creds\n",
2163                                         task->tk_pid, __func__);
2164                         task->tk_action = call_bind;
2165                         goto out_retry;
2166                 case RPC_AUTH_TOOWEAK:
2167                         rcu_read_lock();
2168                         printk(KERN_NOTICE "RPC: server %s requires stronger "
2169                                "authentication.\n",
2170                                rcu_dereference(clnt->cl_xprt)->servername);
2171                         rcu_read_unlock();
2172                         break;
2173                 default:
2174                         dprintk("RPC: %5u %s: unknown auth error: %x\n",
2175                                         task->tk_pid, __func__, n);
2176                         error = -EIO;
2177                 }
2178                 dprintk("RPC: %5u %s: call rejected %d\n",
2179                                 task->tk_pid, __func__, n);
2180                 goto out_err;
2181         }
2182         p = rpcauth_checkverf(task, p);
2183         if (IS_ERR(p)) {
2184                 error = PTR_ERR(p);
2185                 dprintk("RPC: %5u %s: auth check failed with %d\n",
2186                                 task->tk_pid, __func__, error);
2187                 goto out_garbage;               /* bad verifier, retry */
2188         }
2189         len = p - (__be32 *)iov->iov_base - 1;
2190         if (len < 0)
2191                 goto out_overflow;
2192         switch ((n = ntohl(*p++))) {
2193         case RPC_SUCCESS:
2194                 return p;
2195         case RPC_PROG_UNAVAIL:
2196                 dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2197                                 "by server %s\n", task->tk_pid, __func__,
2198                                 (unsigned int)clnt->cl_prog,
2199                                 rcu_dereference(clnt->cl_xprt)->servername);
2200                 error = -EPFNOSUPPORT;
2201                 goto out_err;
2202         case RPC_PROG_MISMATCH:
2203                 dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2204                                 "by server %s\n", task->tk_pid, __func__,
2205                                 (unsigned int)clnt->cl_prog,
2206                                 (unsigned int)clnt->cl_vers,
2207                                 rcu_dereference(clnt->cl_xprt)->servername);
2208                 error = -EPROTONOSUPPORT;
2209                 goto out_err;
2210         case RPC_PROC_UNAVAIL:
2211                 dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2212                                 "version %u on server %s\n",
2213                                 task->tk_pid, __func__,
2214                                 rpc_proc_name(task),
2215                                 clnt->cl_prog, clnt->cl_vers,
2216                                 rcu_dereference(clnt->cl_xprt)->servername);
2217                 error = -EOPNOTSUPP;
2218                 goto out_err;
2219         case RPC_GARBAGE_ARGS:
2220                 dprintk("RPC: %5u %s: server saw garbage\n",
2221                                 task->tk_pid, __func__);
2222                 break;                  /* retry */
2223         default:
2224                 dprintk("RPC: %5u %s: server accept status: %x\n",
2225                                 task->tk_pid, __func__, n);
2226                 /* Also retry */
2227         }
2228
2229 out_garbage:
2230         clnt->cl_stats->rpcgarbage++;
2231         if (task->tk_garb_retry) {
2232                 task->tk_garb_retry--;
2233                 dprintk("RPC: %5u %s: retrying\n",
2234                                 task->tk_pid, __func__);
2235                 task->tk_action = call_bind;
2236 out_retry:
2237                 return ERR_PTR(-EAGAIN);
2238         }
2239 out_err:
2240         rpc_exit(task, error);
2241         dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2242                         __func__, error);
2243         return ERR_PTR(error);
2244 out_overflow:
2245         dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2246                         __func__);
2247         goto out_garbage;
2248 }
2249
2250 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2251 {
2252 }
2253
2254 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2255 {
2256         return 0;
2257 }
2258
2259 static struct rpc_procinfo rpcproc_null = {
2260         .p_encode = rpcproc_encode_null,
2261         .p_decode = rpcproc_decode_null,
2262 };
2263
2264 static int rpc_ping(struct rpc_clnt *clnt)
2265 {
2266         struct rpc_message msg = {
2267                 .rpc_proc = &rpcproc_null,
2268         };
2269         int err;
2270         msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2271         err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2272         put_rpccred(msg.rpc_cred);
2273         return err;
2274 }
2275
2276 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2277 {
2278         struct rpc_message msg = {
2279                 .rpc_proc = &rpcproc_null,
2280                 .rpc_cred = cred,
2281         };
2282         struct rpc_task_setup task_setup_data = {
2283                 .rpc_client = clnt,
2284                 .rpc_message = &msg,
2285                 .callback_ops = &rpc_default_ops,
2286                 .flags = flags,
2287         };
2288         return rpc_run_task(&task_setup_data);
2289 }
2290 EXPORT_SYMBOL_GPL(rpc_call_null);
2291
2292 #ifdef RPC_DEBUG
2293 static void rpc_show_header(void)
2294 {
2295         printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2296                 "-timeout ---ops--\n");
2297 }
2298
2299 static void rpc_show_task(const struct rpc_clnt *clnt,
2300                           const struct rpc_task *task)
2301 {
2302         const char *rpc_waitq = "none";
2303
2304         if (RPC_IS_QUEUED(task))
2305                 rpc_waitq = rpc_qname(task->tk_waitqueue);
2306
2307         printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2308                 task->tk_pid, task->tk_flags, task->tk_status,
2309                 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2310                 clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
2311                 task->tk_action, rpc_waitq);
2312 }
2313
2314 void rpc_show_tasks(struct net *net)
2315 {
2316         struct rpc_clnt *clnt;
2317         struct rpc_task *task;
2318         int header = 0;
2319         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2320
2321         spin_lock(&sn->rpc_client_lock);
2322         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2323                 spin_lock(&clnt->cl_lock);
2324                 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2325                         if (!header) {
2326                                 rpc_show_header();
2327                                 header++;
2328                         }
2329                         rpc_show_task(clnt, task);
2330                 }
2331                 spin_unlock(&clnt->cl_lock);
2332         }
2333         spin_unlock(&sn->rpc_client_lock);
2334 }
2335 #endif