SUNRPC: constify the rpc_program
[pandora-kernel.git] / net / sunrpc / clnt.c
1 /*
2  *  linux/net/sunrpc/clnt.c
3  *
4  *  This file contains the high-level RPC interface.
5  *  It is modeled as a finite state machine to support both synchronous
6  *  and asynchronous requests.
7  *
8  *  -   RPC header generation and argument serialization.
9  *  -   Credential refresh.
10  *  -   TCP connect handling.
11  *  -   Retry of operation when it is suspected the operation failed because
12  *      of uid squashing on the server, or when the credentials were stale
13  *      and need to be refreshed, or when a packet was damaged in transit.
14  *      This may be have to be moved to the VFS layer.
15  *
16  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18  */
19
20 #include <asm/system.h>
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kallsyms.h>
25 #include <linux/mm.h>
26 #include <linux/namei.h>
27 #include <linux/mount.h>
28 #include <linux/slab.h>
29 #include <linux/utsname.h>
30 #include <linux/workqueue.h>
31 #include <linux/in.h>
32 #include <linux/in6.h>
33 #include <linux/un.h>
34
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #include <linux/sunrpc/metrics.h>
38 #include <linux/sunrpc/bc_xprt.h>
39
40 #include "sunrpc.h"
41 #include "netns.h"
42
43 #ifdef RPC_DEBUG
44 # define RPCDBG_FACILITY        RPCDBG_CALL
45 #endif
46
47 #define dprint_status(t)                                        \
48         dprintk("RPC: %5u %s (status %d)\n", t->tk_pid,         \
49                         __func__, t->tk_status)
50
51 /*
52  * All RPC clients are linked into this list
53  */
54
55 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
56
57
58 static void     call_start(struct rpc_task *task);
59 static void     call_reserve(struct rpc_task *task);
60 static void     call_reserveresult(struct rpc_task *task);
61 static void     call_allocate(struct rpc_task *task);
62 static void     call_decode(struct rpc_task *task);
63 static void     call_bind(struct rpc_task *task);
64 static void     call_bind_status(struct rpc_task *task);
65 static void     call_transmit(struct rpc_task *task);
66 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
67 static void     call_bc_transmit(struct rpc_task *task);
68 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
69 static void     call_status(struct rpc_task *task);
70 static void     call_transmit_status(struct rpc_task *task);
71 static void     call_refresh(struct rpc_task *task);
72 static void     call_refreshresult(struct rpc_task *task);
73 static void     call_timeout(struct rpc_task *task);
74 static void     call_connect(struct rpc_task *task);
75 static void     call_connect_status(struct rpc_task *task);
76
77 static __be32   *rpc_encode_header(struct rpc_task *task);
78 static __be32   *rpc_verify_header(struct rpc_task *task);
79 static int      rpc_ping(struct rpc_clnt *clnt);
80
81 static void rpc_register_client(struct rpc_clnt *clnt)
82 {
83         struct sunrpc_net *sn = net_generic(clnt->cl_xprt->xprt_net, sunrpc_net_id);
84
85         spin_lock(&sn->rpc_client_lock);
86         list_add(&clnt->cl_clients, &sn->all_clients);
87         spin_unlock(&sn->rpc_client_lock);
88 }
89
90 static void rpc_unregister_client(struct rpc_clnt *clnt)
91 {
92         struct sunrpc_net *sn = net_generic(clnt->cl_xprt->xprt_net, sunrpc_net_id);
93
94         spin_lock(&sn->rpc_client_lock);
95         list_del(&clnt->cl_clients);
96         spin_unlock(&sn->rpc_client_lock);
97 }
98
99 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
100 {
101         if (clnt->cl_dentry) {
102                 if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy)
103                         clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth);
104                 rpc_remove_client_dir(clnt->cl_dentry);
105         }
106         clnt->cl_dentry = NULL;
107 }
108
109 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
110 {
111         struct super_block *pipefs_sb;
112
113         pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net);
114         if (pipefs_sb) {
115                 __rpc_clnt_remove_pipedir(clnt);
116                 rpc_put_sb_net(clnt->cl_xprt->xprt_net);
117         }
118 }
119
120 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
121                                     struct rpc_clnt *clnt,
122                                     const char *dir_name)
123 {
124         static uint32_t clntid;
125         char name[15];
126         struct qstr q = {
127                 .name = name,
128         };
129         struct dentry *dir, *dentry;
130         int error;
131
132         dir = rpc_d_lookup_sb(sb, dir_name);
133         if (dir == NULL)
134                 return dir;
135         for (;;) {
136                 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
137                 name[sizeof(name) - 1] = '\0';
138                 q.hash = full_name_hash(q.name, q.len);
139                 dentry = rpc_create_client_dir(dir, &q, clnt);
140                 if (!IS_ERR(dentry))
141                         break;
142                 error = PTR_ERR(dentry);
143                 if (error != -EEXIST) {
144                         printk(KERN_INFO "RPC: Couldn't create pipefs entry"
145                                         " %s/%s, error %d\n",
146                                         dir_name, name, error);
147                         break;
148                 }
149         }
150         dput(dir);
151         return dentry;
152 }
153
154 static int
155 rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
156 {
157         struct super_block *pipefs_sb;
158         struct dentry *dentry;
159
160         clnt->cl_dentry = NULL;
161         if (dir_name == NULL)
162                 return 0;
163         pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net);
164         if (!pipefs_sb)
165                 return 0;
166         dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
167         rpc_put_sb_net(clnt->cl_xprt->xprt_net);
168         if (IS_ERR(dentry))
169                 return PTR_ERR(dentry);
170         clnt->cl_dentry = dentry;
171         return 0;
172 }
173
174 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
175                                 struct super_block *sb)
176 {
177         struct dentry *dentry;
178         int err = 0;
179
180         switch (event) {
181         case RPC_PIPEFS_MOUNT:
182                 if (clnt->cl_program->pipe_dir_name == NULL)
183                         break;
184                 dentry = rpc_setup_pipedir_sb(sb, clnt,
185                                               clnt->cl_program->pipe_dir_name);
186                 BUG_ON(dentry == NULL);
187                 if (IS_ERR(dentry))
188                         return PTR_ERR(dentry);
189                 clnt->cl_dentry = dentry;
190                 if (clnt->cl_auth->au_ops->pipes_create) {
191                         err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth);
192                         if (err)
193                                 __rpc_clnt_remove_pipedir(clnt);
194                 }
195                 break;
196         case RPC_PIPEFS_UMOUNT:
197                 __rpc_clnt_remove_pipedir(clnt);
198                 break;
199         default:
200                 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
201                 return -ENOTSUPP;
202         }
203         return err;
204 }
205
206 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
207                             void *ptr)
208 {
209         struct super_block *sb = ptr;
210         struct rpc_clnt *clnt;
211         int error = 0;
212         struct sunrpc_net *sn = net_generic(sb->s_fs_info, sunrpc_net_id);
213
214         spin_lock(&sn->rpc_client_lock);
215         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
216                 error = __rpc_pipefs_event(clnt, event, sb);
217                 if (error)
218                         break;
219         }
220         spin_unlock(&sn->rpc_client_lock);
221         return error;
222 }
223
224 static struct notifier_block rpc_clients_block = {
225         .notifier_call  = rpc_pipefs_event,
226         .priority       = SUNRPC_PIPEFS_RPC_PRIO,
227 };
228
229 int rpc_clients_notifier_register(void)
230 {
231         return rpc_pipefs_notifier_register(&rpc_clients_block);
232 }
233
234 void rpc_clients_notifier_unregister(void)
235 {
236         return rpc_pipefs_notifier_unregister(&rpc_clients_block);
237 }
238
239 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
240 {
241         const struct rpc_program *program = args->program;
242         const struct rpc_version *version;
243         struct rpc_clnt         *clnt = NULL;
244         struct rpc_auth         *auth;
245         int err;
246         size_t len;
247
248         /* sanity check the name before trying to print it */
249         err = -EINVAL;
250         len = strlen(args->servername);
251         if (len > RPC_MAXNETNAMELEN)
252                 goto out_no_rpciod;
253         len++;
254
255         dprintk("RPC:       creating %s client for %s (xprt %p)\n",
256                         program->name, args->servername, xprt);
257
258         err = rpciod_up();
259         if (err)
260                 goto out_no_rpciod;
261         err = -EINVAL;
262         if (!xprt)
263                 goto out_no_xprt;
264
265         if (args->version >= program->nrvers)
266                 goto out_err;
267         version = program->version[args->version];
268         if (version == NULL)
269                 goto out_err;
270
271         err = -ENOMEM;
272         clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
273         if (!clnt)
274                 goto out_err;
275         clnt->cl_parent = clnt;
276
277         clnt->cl_server = kstrdup(args->servername, GFP_KERNEL);
278         if (clnt->cl_server == NULL)
279                 goto out_no_server;
280
281         clnt->cl_xprt     = xprt;
282         clnt->cl_procinfo = version->procs;
283         clnt->cl_maxproc  = version->nrprocs;
284         clnt->cl_protname = program->name;
285         clnt->cl_prog     = args->prognumber ? : program->number;
286         clnt->cl_vers     = version->number;
287         clnt->cl_stats    = program->stats;
288         clnt->cl_metrics  = rpc_alloc_iostats(clnt);
289         err = -ENOMEM;
290         if (clnt->cl_metrics == NULL)
291                 goto out_no_stats;
292         clnt->cl_program  = program;
293         INIT_LIST_HEAD(&clnt->cl_tasks);
294         spin_lock_init(&clnt->cl_lock);
295
296         if (!xprt_bound(clnt->cl_xprt))
297                 clnt->cl_autobind = 1;
298
299         clnt->cl_timeout = xprt->timeout;
300         if (args->timeout != NULL) {
301                 memcpy(&clnt->cl_timeout_default, args->timeout,
302                                 sizeof(clnt->cl_timeout_default));
303                 clnt->cl_timeout = &clnt->cl_timeout_default;
304         }
305
306         clnt->cl_rtt = &clnt->cl_rtt_default;
307         rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
308         clnt->cl_principal = NULL;
309         if (args->client_name) {
310                 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
311                 if (!clnt->cl_principal)
312                         goto out_no_principal;
313         }
314
315         atomic_set(&clnt->cl_count, 1);
316
317         err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
318         if (err < 0)
319                 goto out_no_path;
320
321         auth = rpcauth_create(args->authflavor, clnt);
322         if (IS_ERR(auth)) {
323                 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
324                                 args->authflavor);
325                 err = PTR_ERR(auth);
326                 goto out_no_auth;
327         }
328
329         /* save the nodename */
330         clnt->cl_nodelen = strlen(init_utsname()->nodename);
331         if (clnt->cl_nodelen > UNX_MAXNODENAME)
332                 clnt->cl_nodelen = UNX_MAXNODENAME;
333         memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
334         rpc_register_client(clnt);
335         return clnt;
336
337 out_no_auth:
338         rpc_clnt_remove_pipedir(clnt);
339 out_no_path:
340         kfree(clnt->cl_principal);
341 out_no_principal:
342         rpc_free_iostats(clnt->cl_metrics);
343 out_no_stats:
344         kfree(clnt->cl_server);
345 out_no_server:
346         kfree(clnt);
347 out_err:
348         xprt_put(xprt);
349 out_no_xprt:
350         rpciod_down();
351 out_no_rpciod:
352         return ERR_PTR(err);
353 }
354
355 /*
356  * rpc_create - create an RPC client and transport with one call
357  * @args: rpc_clnt create argument structure
358  *
359  * Creates and initializes an RPC transport and an RPC client.
360  *
361  * It can ping the server in order to determine if it is up, and to see if
362  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
363  * this behavior so asynchronous tasks can also use rpc_create.
364  */
365 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
366 {
367         struct rpc_xprt *xprt;
368         struct rpc_clnt *clnt;
369         struct xprt_create xprtargs = {
370                 .net = args->net,
371                 .ident = args->protocol,
372                 .srcaddr = args->saddress,
373                 .dstaddr = args->address,
374                 .addrlen = args->addrsize,
375                 .bc_xprt = args->bc_xprt,
376         };
377         char servername[48];
378
379         /*
380          * If the caller chooses not to specify a hostname, whip
381          * up a string representation of the passed-in address.
382          */
383         if (args->servername == NULL) {
384                 struct sockaddr_un *sun =
385                                 (struct sockaddr_un *)args->address;
386                 struct sockaddr_in *sin =
387                                 (struct sockaddr_in *)args->address;
388                 struct sockaddr_in6 *sin6 =
389                                 (struct sockaddr_in6 *)args->address;
390
391                 servername[0] = '\0';
392                 switch (args->address->sa_family) {
393                 case AF_LOCAL:
394                         snprintf(servername, sizeof(servername), "%s",
395                                  sun->sun_path);
396                         break;
397                 case AF_INET:
398                         snprintf(servername, sizeof(servername), "%pI4",
399                                  &sin->sin_addr.s_addr);
400                         break;
401                 case AF_INET6:
402                         snprintf(servername, sizeof(servername), "%pI6",
403                                  &sin6->sin6_addr);
404                         break;
405                 default:
406                         /* caller wants default server name, but
407                          * address family isn't recognized. */
408                         return ERR_PTR(-EINVAL);
409                 }
410                 args->servername = servername;
411         }
412
413         xprt = xprt_create_transport(&xprtargs);
414         if (IS_ERR(xprt))
415                 return (struct rpc_clnt *)xprt;
416
417         /*
418          * By default, kernel RPC client connects from a reserved port.
419          * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
420          * but it is always enabled for rpciod, which handles the connect
421          * operation.
422          */
423         xprt->resvport = 1;
424         if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
425                 xprt->resvport = 0;
426
427         clnt = rpc_new_client(args, xprt);
428         if (IS_ERR(clnt))
429                 return clnt;
430
431         if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
432                 int err = rpc_ping(clnt);
433                 if (err != 0) {
434                         rpc_shutdown_client(clnt);
435                         return ERR_PTR(err);
436                 }
437         }
438
439         clnt->cl_softrtry = 1;
440         if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
441                 clnt->cl_softrtry = 0;
442
443         if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
444                 clnt->cl_autobind = 1;
445         if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
446                 clnt->cl_discrtry = 1;
447         if (!(args->flags & RPC_CLNT_CREATE_QUIET))
448                 clnt->cl_chatty = 1;
449
450         return clnt;
451 }
452 EXPORT_SYMBOL_GPL(rpc_create);
453
454 /*
455  * This function clones the RPC client structure. It allows us to share the
456  * same transport while varying parameters such as the authentication
457  * flavour.
458  */
459 struct rpc_clnt *
460 rpc_clone_client(struct rpc_clnt *clnt)
461 {
462         struct rpc_clnt *new;
463         int err = -ENOMEM;
464
465         new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
466         if (!new)
467                 goto out_no_clnt;
468         new->cl_server = kstrdup(clnt->cl_server, GFP_KERNEL);
469         if (new->cl_server == NULL)
470                 goto out_no_server;
471         new->cl_parent = clnt;
472         /* Turn off autobind on clones */
473         new->cl_autobind = 0;
474         INIT_LIST_HEAD(&new->cl_tasks);
475         spin_lock_init(&new->cl_lock);
476         rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
477         new->cl_metrics = rpc_alloc_iostats(clnt);
478         if (new->cl_metrics == NULL)
479                 goto out_no_stats;
480         if (clnt->cl_principal) {
481                 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
482                 if (new->cl_principal == NULL)
483                         goto out_no_principal;
484         }
485         atomic_set(&new->cl_count, 1);
486         err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
487         if (err != 0)
488                 goto out_no_path;
489         if (new->cl_auth)
490                 atomic_inc(&new->cl_auth->au_count);
491         xprt_get(clnt->cl_xprt);
492         atomic_inc(&clnt->cl_count);
493         rpc_register_client(new);
494         rpciod_up();
495         return new;
496 out_no_path:
497         kfree(new->cl_principal);
498 out_no_principal:
499         rpc_free_iostats(new->cl_metrics);
500 out_no_stats:
501         kfree(new->cl_server);
502 out_no_server:
503         kfree(new);
504 out_no_clnt:
505         dprintk("RPC:       %s: returned error %d\n", __func__, err);
506         return ERR_PTR(err);
507 }
508 EXPORT_SYMBOL_GPL(rpc_clone_client);
509
510 /*
511  * Kill all tasks for the given client.
512  * XXX: kill their descendants as well?
513  */
514 void rpc_killall_tasks(struct rpc_clnt *clnt)
515 {
516         struct rpc_task *rovr;
517
518
519         if (list_empty(&clnt->cl_tasks))
520                 return;
521         dprintk("RPC:       killing all tasks for client %p\n", clnt);
522         /*
523          * Spin lock all_tasks to prevent changes...
524          */
525         spin_lock(&clnt->cl_lock);
526         list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
527                 if (!RPC_IS_ACTIVATED(rovr))
528                         continue;
529                 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
530                         rovr->tk_flags |= RPC_TASK_KILLED;
531                         rpc_exit(rovr, -EIO);
532                         if (RPC_IS_QUEUED(rovr))
533                                 rpc_wake_up_queued_task(rovr->tk_waitqueue,
534                                                         rovr);
535                 }
536         }
537         spin_unlock(&clnt->cl_lock);
538 }
539 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
540
541 /*
542  * Properly shut down an RPC client, terminating all outstanding
543  * requests.
544  */
545 void rpc_shutdown_client(struct rpc_clnt *clnt)
546 {
547         dprintk("RPC:       shutting down %s client for %s\n",
548                         clnt->cl_protname, clnt->cl_server);
549
550         while (!list_empty(&clnt->cl_tasks)) {
551                 rpc_killall_tasks(clnt);
552                 wait_event_timeout(destroy_wait,
553                         list_empty(&clnt->cl_tasks), 1*HZ);
554         }
555
556         rpc_release_client(clnt);
557 }
558 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
559
560 /*
561  * Free an RPC client
562  */
563 static void
564 rpc_free_client(struct rpc_clnt *clnt)
565 {
566         dprintk("RPC:       destroying %s client for %s\n",
567                         clnt->cl_protname, clnt->cl_server);
568         if (clnt->cl_parent != clnt)
569                 rpc_release_client(clnt->cl_parent);
570         kfree(clnt->cl_server);
571         rpc_unregister_client(clnt);
572         rpc_clnt_remove_pipedir(clnt);
573         rpc_free_iostats(clnt->cl_metrics);
574         kfree(clnt->cl_principal);
575         clnt->cl_metrics = NULL;
576         xprt_put(clnt->cl_xprt);
577         rpciod_down();
578         kfree(clnt);
579 }
580
581 /*
582  * Free an RPC client
583  */
584 static void
585 rpc_free_auth(struct rpc_clnt *clnt)
586 {
587         if (clnt->cl_auth == NULL) {
588                 rpc_free_client(clnt);
589                 return;
590         }
591
592         /*
593          * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
594          *       release remaining GSS contexts. This mechanism ensures
595          *       that it can do so safely.
596          */
597         atomic_inc(&clnt->cl_count);
598         rpcauth_release(clnt->cl_auth);
599         clnt->cl_auth = NULL;
600         if (atomic_dec_and_test(&clnt->cl_count))
601                 rpc_free_client(clnt);
602 }
603
604 /*
605  * Release reference to the RPC client
606  */
607 void
608 rpc_release_client(struct rpc_clnt *clnt)
609 {
610         dprintk("RPC:       rpc_release_client(%p)\n", clnt);
611
612         if (list_empty(&clnt->cl_tasks))
613                 wake_up(&destroy_wait);
614         if (atomic_dec_and_test(&clnt->cl_count))
615                 rpc_free_auth(clnt);
616 }
617
618 /**
619  * rpc_bind_new_program - bind a new RPC program to an existing client
620  * @old: old rpc_client
621  * @program: rpc program to set
622  * @vers: rpc program version
623  *
624  * Clones the rpc client and sets up a new RPC program. This is mainly
625  * of use for enabling different RPC programs to share the same transport.
626  * The Sun NFSv2/v3 ACL protocol can do this.
627  */
628 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
629                                       const struct rpc_program *program,
630                                       u32 vers)
631 {
632         struct rpc_clnt *clnt;
633         const struct rpc_version *version;
634         int err;
635
636         BUG_ON(vers >= program->nrvers || !program->version[vers]);
637         version = program->version[vers];
638         clnt = rpc_clone_client(old);
639         if (IS_ERR(clnt))
640                 goto out;
641         clnt->cl_procinfo = version->procs;
642         clnt->cl_maxproc  = version->nrprocs;
643         clnt->cl_protname = program->name;
644         clnt->cl_prog     = program->number;
645         clnt->cl_vers     = version->number;
646         clnt->cl_stats    = program->stats;
647         err = rpc_ping(clnt);
648         if (err != 0) {
649                 rpc_shutdown_client(clnt);
650                 clnt = ERR_PTR(err);
651         }
652 out:
653         return clnt;
654 }
655 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
656
657 void rpc_task_release_client(struct rpc_task *task)
658 {
659         struct rpc_clnt *clnt = task->tk_client;
660
661         if (clnt != NULL) {
662                 /* Remove from client task list */
663                 spin_lock(&clnt->cl_lock);
664                 list_del(&task->tk_task);
665                 spin_unlock(&clnt->cl_lock);
666                 task->tk_client = NULL;
667
668                 rpc_release_client(clnt);
669         }
670 }
671
672 static
673 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
674 {
675         if (clnt != NULL) {
676                 rpc_task_release_client(task);
677                 task->tk_client = clnt;
678                 atomic_inc(&clnt->cl_count);
679                 if (clnt->cl_softrtry)
680                         task->tk_flags |= RPC_TASK_SOFT;
681                 /* Add to the client's list of all tasks */
682                 spin_lock(&clnt->cl_lock);
683                 list_add_tail(&task->tk_task, &clnt->cl_tasks);
684                 spin_unlock(&clnt->cl_lock);
685         }
686 }
687
688 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
689 {
690         rpc_task_release_client(task);
691         rpc_task_set_client(task, clnt);
692 }
693 EXPORT_SYMBOL_GPL(rpc_task_reset_client);
694
695
696 static void
697 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
698 {
699         if (msg != NULL) {
700                 task->tk_msg.rpc_proc = msg->rpc_proc;
701                 task->tk_msg.rpc_argp = msg->rpc_argp;
702                 task->tk_msg.rpc_resp = msg->rpc_resp;
703                 if (msg->rpc_cred != NULL)
704                         task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
705         }
706 }
707
708 /*
709  * Default callback for async RPC calls
710  */
711 static void
712 rpc_default_callback(struct rpc_task *task, void *data)
713 {
714 }
715
716 static const struct rpc_call_ops rpc_default_ops = {
717         .rpc_call_done = rpc_default_callback,
718 };
719
720 /**
721  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
722  * @task_setup_data: pointer to task initialisation data
723  */
724 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
725 {
726         struct rpc_task *task;
727
728         task = rpc_new_task(task_setup_data);
729         if (IS_ERR(task))
730                 goto out;
731
732         rpc_task_set_client(task, task_setup_data->rpc_client);
733         rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
734
735         if (task->tk_action == NULL)
736                 rpc_call_start(task);
737
738         atomic_inc(&task->tk_count);
739         rpc_execute(task);
740 out:
741         return task;
742 }
743 EXPORT_SYMBOL_GPL(rpc_run_task);
744
745 /**
746  * rpc_call_sync - Perform a synchronous RPC call
747  * @clnt: pointer to RPC client
748  * @msg: RPC call parameters
749  * @flags: RPC call flags
750  */
751 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
752 {
753         struct rpc_task *task;
754         struct rpc_task_setup task_setup_data = {
755                 .rpc_client = clnt,
756                 .rpc_message = msg,
757                 .callback_ops = &rpc_default_ops,
758                 .flags = flags,
759         };
760         int status;
761
762         BUG_ON(flags & RPC_TASK_ASYNC);
763
764         task = rpc_run_task(&task_setup_data);
765         if (IS_ERR(task))
766                 return PTR_ERR(task);
767         status = task->tk_status;
768         rpc_put_task(task);
769         return status;
770 }
771 EXPORT_SYMBOL_GPL(rpc_call_sync);
772
773 /**
774  * rpc_call_async - Perform an asynchronous RPC call
775  * @clnt: pointer to RPC client
776  * @msg: RPC call parameters
777  * @flags: RPC call flags
778  * @tk_ops: RPC call ops
779  * @data: user call data
780  */
781 int
782 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
783                const struct rpc_call_ops *tk_ops, void *data)
784 {
785         struct rpc_task *task;
786         struct rpc_task_setup task_setup_data = {
787                 .rpc_client = clnt,
788                 .rpc_message = msg,
789                 .callback_ops = tk_ops,
790                 .callback_data = data,
791                 .flags = flags|RPC_TASK_ASYNC,
792         };
793
794         task = rpc_run_task(&task_setup_data);
795         if (IS_ERR(task))
796                 return PTR_ERR(task);
797         rpc_put_task(task);
798         return 0;
799 }
800 EXPORT_SYMBOL_GPL(rpc_call_async);
801
802 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
803 /**
804  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
805  * rpc_execute against it
806  * @req: RPC request
807  * @tk_ops: RPC call ops
808  */
809 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
810                                 const struct rpc_call_ops *tk_ops)
811 {
812         struct rpc_task *task;
813         struct xdr_buf *xbufp = &req->rq_snd_buf;
814         struct rpc_task_setup task_setup_data = {
815                 .callback_ops = tk_ops,
816         };
817
818         dprintk("RPC: rpc_run_bc_task req= %p\n", req);
819         /*
820          * Create an rpc_task to send the data
821          */
822         task = rpc_new_task(&task_setup_data);
823         if (IS_ERR(task)) {
824                 xprt_free_bc_request(req);
825                 goto out;
826         }
827         task->tk_rqstp = req;
828
829         /*
830          * Set up the xdr_buf length.
831          * This also indicates that the buffer is XDR encoded already.
832          */
833         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
834                         xbufp->tail[0].iov_len;
835
836         task->tk_action = call_bc_transmit;
837         atomic_inc(&task->tk_count);
838         BUG_ON(atomic_read(&task->tk_count) != 2);
839         rpc_execute(task);
840
841 out:
842         dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
843         return task;
844 }
845 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
846
847 void
848 rpc_call_start(struct rpc_task *task)
849 {
850         task->tk_action = call_start;
851 }
852 EXPORT_SYMBOL_GPL(rpc_call_start);
853
854 /**
855  * rpc_peeraddr - extract remote peer address from clnt's xprt
856  * @clnt: RPC client structure
857  * @buf: target buffer
858  * @bufsize: length of target buffer
859  *
860  * Returns the number of bytes that are actually in the stored address.
861  */
862 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
863 {
864         size_t bytes;
865         struct rpc_xprt *xprt = clnt->cl_xprt;
866
867         bytes = sizeof(xprt->addr);
868         if (bytes > bufsize)
869                 bytes = bufsize;
870         memcpy(buf, &clnt->cl_xprt->addr, bytes);
871         return xprt->addrlen;
872 }
873 EXPORT_SYMBOL_GPL(rpc_peeraddr);
874
875 /**
876  * rpc_peeraddr2str - return remote peer address in printable format
877  * @clnt: RPC client structure
878  * @format: address format
879  *
880  */
881 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
882                              enum rpc_display_format_t format)
883 {
884         struct rpc_xprt *xprt = clnt->cl_xprt;
885
886         if (xprt->address_strings[format] != NULL)
887                 return xprt->address_strings[format];
888         else
889                 return "unprintable";
890 }
891 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
892
893 void
894 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
895 {
896         struct rpc_xprt *xprt = clnt->cl_xprt;
897         if (xprt->ops->set_buffer_size)
898                 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
899 }
900 EXPORT_SYMBOL_GPL(rpc_setbufsize);
901
902 /*
903  * Return size of largest payload RPC client can support, in bytes
904  *
905  * For stream transports, this is one RPC record fragment (see RFC
906  * 1831), as we don't support multi-record requests yet.  For datagram
907  * transports, this is the size of an IP packet minus the IP, UDP, and
908  * RPC header sizes.
909  */
910 size_t rpc_max_payload(struct rpc_clnt *clnt)
911 {
912         return clnt->cl_xprt->max_payload;
913 }
914 EXPORT_SYMBOL_GPL(rpc_max_payload);
915
916 /**
917  * rpc_force_rebind - force transport to check that remote port is unchanged
918  * @clnt: client to rebind
919  *
920  */
921 void rpc_force_rebind(struct rpc_clnt *clnt)
922 {
923         if (clnt->cl_autobind)
924                 xprt_clear_bound(clnt->cl_xprt);
925 }
926 EXPORT_SYMBOL_GPL(rpc_force_rebind);
927
928 /*
929  * Restart an (async) RPC call from the call_prepare state.
930  * Usually called from within the exit handler.
931  */
932 int
933 rpc_restart_call_prepare(struct rpc_task *task)
934 {
935         if (RPC_ASSASSINATED(task))
936                 return 0;
937         task->tk_action = call_start;
938         if (task->tk_ops->rpc_call_prepare != NULL)
939                 task->tk_action = rpc_prepare_task;
940         return 1;
941 }
942 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
943
944 /*
945  * Restart an (async) RPC call. Usually called from within the
946  * exit handler.
947  */
948 int
949 rpc_restart_call(struct rpc_task *task)
950 {
951         if (RPC_ASSASSINATED(task))
952                 return 0;
953         task->tk_action = call_start;
954         return 1;
955 }
956 EXPORT_SYMBOL_GPL(rpc_restart_call);
957
958 #ifdef RPC_DEBUG
959 static const char *rpc_proc_name(const struct rpc_task *task)
960 {
961         const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
962
963         if (proc) {
964                 if (proc->p_name)
965                         return proc->p_name;
966                 else
967                         return "NULL";
968         } else
969                 return "no proc";
970 }
971 #endif
972
973 /*
974  * 0.  Initial state
975  *
976  *     Other FSM states can be visited zero or more times, but
977  *     this state is visited exactly once for each RPC.
978  */
979 static void
980 call_start(struct rpc_task *task)
981 {
982         struct rpc_clnt *clnt = task->tk_client;
983
984         dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
985                         clnt->cl_protname, clnt->cl_vers,
986                         rpc_proc_name(task),
987                         (RPC_IS_ASYNC(task) ? "async" : "sync"));
988
989         /* Increment call count */
990         task->tk_msg.rpc_proc->p_count++;
991         clnt->cl_stats->rpccnt++;
992         task->tk_action = call_reserve;
993 }
994
995 /*
996  * 1.   Reserve an RPC call slot
997  */
998 static void
999 call_reserve(struct rpc_task *task)
1000 {
1001         dprint_status(task);
1002
1003         task->tk_status  = 0;
1004         task->tk_action  = call_reserveresult;
1005         xprt_reserve(task);
1006 }
1007
1008 /*
1009  * 1b.  Grok the result of xprt_reserve()
1010  */
1011 static void
1012 call_reserveresult(struct rpc_task *task)
1013 {
1014         int status = task->tk_status;
1015
1016         dprint_status(task);
1017
1018         /*
1019          * After a call to xprt_reserve(), we must have either
1020          * a request slot or else an error status.
1021          */
1022         task->tk_status = 0;
1023         if (status >= 0) {
1024                 if (task->tk_rqstp) {
1025                         task->tk_action = call_refresh;
1026                         return;
1027                 }
1028
1029                 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1030                                 __func__, status);
1031                 rpc_exit(task, -EIO);
1032                 return;
1033         }
1034
1035         /*
1036          * Even though there was an error, we may have acquired
1037          * a request slot somehow.  Make sure not to leak it.
1038          */
1039         if (task->tk_rqstp) {
1040                 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1041                                 __func__, status);
1042                 xprt_release(task);
1043         }
1044
1045         switch (status) {
1046         case -EAGAIN:   /* woken up; retry */
1047                 task->tk_action = call_reserve;
1048                 return;
1049         case -EIO:      /* probably a shutdown */
1050                 break;
1051         default:
1052                 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1053                                 __func__, status);
1054                 break;
1055         }
1056         rpc_exit(task, status);
1057 }
1058
1059 /*
1060  * 2.   Bind and/or refresh the credentials
1061  */
1062 static void
1063 call_refresh(struct rpc_task *task)
1064 {
1065         dprint_status(task);
1066
1067         task->tk_action = call_refreshresult;
1068         task->tk_status = 0;
1069         task->tk_client->cl_stats->rpcauthrefresh++;
1070         rpcauth_refreshcred(task);
1071 }
1072
1073 /*
1074  * 2a.  Process the results of a credential refresh
1075  */
1076 static void
1077 call_refreshresult(struct rpc_task *task)
1078 {
1079         int status = task->tk_status;
1080
1081         dprint_status(task);
1082
1083         task->tk_status = 0;
1084         task->tk_action = call_refresh;
1085         switch (status) {
1086         case 0:
1087                 if (rpcauth_uptodatecred(task))
1088                         task->tk_action = call_allocate;
1089                 return;
1090         case -ETIMEDOUT:
1091                 rpc_delay(task, 3*HZ);
1092         case -EAGAIN:
1093                 status = -EACCES;
1094                 if (!task->tk_cred_retry)
1095                         break;
1096                 task->tk_cred_retry--;
1097                 dprintk("RPC: %5u %s: retry refresh creds\n",
1098                                 task->tk_pid, __func__);
1099                 return;
1100         }
1101         dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1102                                 task->tk_pid, __func__, status);
1103         rpc_exit(task, status);
1104 }
1105
1106 /*
1107  * 2b.  Allocate the buffer. For details, see sched.c:rpc_malloc.
1108  *      (Note: buffer memory is freed in xprt_release).
1109  */
1110 static void
1111 call_allocate(struct rpc_task *task)
1112 {
1113         unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1114         struct rpc_rqst *req = task->tk_rqstp;
1115         struct rpc_xprt *xprt = task->tk_xprt;
1116         struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1117
1118         dprint_status(task);
1119
1120         task->tk_status = 0;
1121         task->tk_action = call_bind;
1122
1123         if (req->rq_buffer)
1124                 return;
1125
1126         if (proc->p_proc != 0) {
1127                 BUG_ON(proc->p_arglen == 0);
1128                 if (proc->p_decode != NULL)
1129                         BUG_ON(proc->p_replen == 0);
1130         }
1131
1132         /*
1133          * Calculate the size (in quads) of the RPC call
1134          * and reply headers, and convert both values
1135          * to byte sizes.
1136          */
1137         req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1138         req->rq_callsize <<= 2;
1139         req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1140         req->rq_rcvsize <<= 2;
1141
1142         req->rq_buffer = xprt->ops->buf_alloc(task,
1143                                         req->rq_callsize + req->rq_rcvsize);
1144         if (req->rq_buffer != NULL)
1145                 return;
1146
1147         dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1148
1149         if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1150                 task->tk_action = call_allocate;
1151                 rpc_delay(task, HZ>>4);
1152                 return;
1153         }
1154
1155         rpc_exit(task, -ERESTARTSYS);
1156 }
1157
1158 static inline int
1159 rpc_task_need_encode(struct rpc_task *task)
1160 {
1161         return task->tk_rqstp->rq_snd_buf.len == 0;
1162 }
1163
1164 static inline void
1165 rpc_task_force_reencode(struct rpc_task *task)
1166 {
1167         task->tk_rqstp->rq_snd_buf.len = 0;
1168         task->tk_rqstp->rq_bytes_sent = 0;
1169 }
1170
1171 static inline void
1172 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1173 {
1174         buf->head[0].iov_base = start;
1175         buf->head[0].iov_len = len;
1176         buf->tail[0].iov_len = 0;
1177         buf->page_len = 0;
1178         buf->flags = 0;
1179         buf->len = 0;
1180         buf->buflen = len;
1181 }
1182
1183 /*
1184  * 3.   Encode arguments of an RPC call
1185  */
1186 static void
1187 rpc_xdr_encode(struct rpc_task *task)
1188 {
1189         struct rpc_rqst *req = task->tk_rqstp;
1190         kxdreproc_t     encode;
1191         __be32          *p;
1192
1193         dprint_status(task);
1194
1195         rpc_xdr_buf_init(&req->rq_snd_buf,
1196                          req->rq_buffer,
1197                          req->rq_callsize);
1198         rpc_xdr_buf_init(&req->rq_rcv_buf,
1199                          (char *)req->rq_buffer + req->rq_callsize,
1200                          req->rq_rcvsize);
1201
1202         p = rpc_encode_header(task);
1203         if (p == NULL) {
1204                 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1205                 rpc_exit(task, -EIO);
1206                 return;
1207         }
1208
1209         encode = task->tk_msg.rpc_proc->p_encode;
1210         if (encode == NULL)
1211                 return;
1212
1213         task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1214                         task->tk_msg.rpc_argp);
1215 }
1216
1217 /*
1218  * 4.   Get the server port number if not yet set
1219  */
1220 static void
1221 call_bind(struct rpc_task *task)
1222 {
1223         struct rpc_xprt *xprt = task->tk_xprt;
1224
1225         dprint_status(task);
1226
1227         task->tk_action = call_connect;
1228         if (!xprt_bound(xprt)) {
1229                 task->tk_action = call_bind_status;
1230                 task->tk_timeout = xprt->bind_timeout;
1231                 xprt->ops->rpcbind(task);
1232         }
1233 }
1234
1235 /*
1236  * 4a.  Sort out bind result
1237  */
1238 static void
1239 call_bind_status(struct rpc_task *task)
1240 {
1241         int status = -EIO;
1242
1243         if (task->tk_status >= 0) {
1244                 dprint_status(task);
1245                 task->tk_status = 0;
1246                 task->tk_action = call_connect;
1247                 return;
1248         }
1249
1250         switch (task->tk_status) {
1251         case -ENOMEM:
1252                 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1253                 rpc_delay(task, HZ >> 2);
1254                 goto retry_timeout;
1255         case -EACCES:
1256                 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1257                                 "unavailable\n", task->tk_pid);
1258                 /* fail immediately if this is an RPC ping */
1259                 if (task->tk_msg.rpc_proc->p_proc == 0) {
1260                         status = -EOPNOTSUPP;
1261                         break;
1262                 }
1263                 if (task->tk_rebind_retry == 0)
1264                         break;
1265                 task->tk_rebind_retry--;
1266                 rpc_delay(task, 3*HZ);
1267                 goto retry_timeout;
1268         case -ETIMEDOUT:
1269                 dprintk("RPC: %5u rpcbind request timed out\n",
1270                                 task->tk_pid);
1271                 goto retry_timeout;
1272         case -EPFNOSUPPORT:
1273                 /* server doesn't support any rpcbind version we know of */
1274                 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1275                                 task->tk_pid);
1276                 break;
1277         case -EPROTONOSUPPORT:
1278                 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1279                                 task->tk_pid);
1280                 task->tk_status = 0;
1281                 task->tk_action = call_bind;
1282                 return;
1283         case -ECONNREFUSED:             /* connection problems */
1284         case -ECONNRESET:
1285         case -ENOTCONN:
1286         case -EHOSTDOWN:
1287         case -EHOSTUNREACH:
1288         case -ENETUNREACH:
1289         case -EPIPE:
1290                 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1291                                 task->tk_pid, task->tk_status);
1292                 if (!RPC_IS_SOFTCONN(task)) {
1293                         rpc_delay(task, 5*HZ);
1294                         goto retry_timeout;
1295                 }
1296                 status = task->tk_status;
1297                 break;
1298         default:
1299                 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1300                                 task->tk_pid, -task->tk_status);
1301         }
1302
1303         rpc_exit(task, status);
1304         return;
1305
1306 retry_timeout:
1307         task->tk_action = call_timeout;
1308 }
1309
1310 /*
1311  * 4b.  Connect to the RPC server
1312  */
1313 static void
1314 call_connect(struct rpc_task *task)
1315 {
1316         struct rpc_xprt *xprt = task->tk_xprt;
1317
1318         dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1319                         task->tk_pid, xprt,
1320                         (xprt_connected(xprt) ? "is" : "is not"));
1321
1322         task->tk_action = call_transmit;
1323         if (!xprt_connected(xprt)) {
1324                 task->tk_action = call_connect_status;
1325                 if (task->tk_status < 0)
1326                         return;
1327                 xprt_connect(task);
1328         }
1329 }
1330
1331 /*
1332  * 4c.  Sort out connect result
1333  */
1334 static void
1335 call_connect_status(struct rpc_task *task)
1336 {
1337         struct rpc_clnt *clnt = task->tk_client;
1338         int status = task->tk_status;
1339
1340         dprint_status(task);
1341
1342         task->tk_status = 0;
1343         if (status >= 0 || status == -EAGAIN) {
1344                 clnt->cl_stats->netreconn++;
1345                 task->tk_action = call_transmit;
1346                 return;
1347         }
1348
1349         switch (status) {
1350                 /* if soft mounted, test if we've timed out */
1351         case -ETIMEDOUT:
1352                 task->tk_action = call_timeout;
1353                 break;
1354         default:
1355                 rpc_exit(task, -EIO);
1356         }
1357 }
1358
1359 /*
1360  * 5.   Transmit the RPC request, and wait for reply
1361  */
1362 static void
1363 call_transmit(struct rpc_task *task)
1364 {
1365         dprint_status(task);
1366
1367         task->tk_action = call_status;
1368         if (task->tk_status < 0)
1369                 return;
1370         task->tk_status = xprt_prepare_transmit(task);
1371         if (task->tk_status != 0)
1372                 return;
1373         task->tk_action = call_transmit_status;
1374         /* Encode here so that rpcsec_gss can use correct sequence number. */
1375         if (rpc_task_need_encode(task)) {
1376                 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1377                 rpc_xdr_encode(task);
1378                 /* Did the encode result in an error condition? */
1379                 if (task->tk_status != 0) {
1380                         /* Was the error nonfatal? */
1381                         if (task->tk_status == -EAGAIN)
1382                                 rpc_delay(task, HZ >> 4);
1383                         else
1384                                 rpc_exit(task, task->tk_status);
1385                         return;
1386                 }
1387         }
1388         xprt_transmit(task);
1389         if (task->tk_status < 0)
1390                 return;
1391         /*
1392          * On success, ensure that we call xprt_end_transmit() before sleeping
1393          * in order to allow access to the socket to other RPC requests.
1394          */
1395         call_transmit_status(task);
1396         if (rpc_reply_expected(task))
1397                 return;
1398         task->tk_action = rpc_exit_task;
1399         rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1400 }
1401
1402 /*
1403  * 5a.  Handle cleanup after a transmission
1404  */
1405 static void
1406 call_transmit_status(struct rpc_task *task)
1407 {
1408         task->tk_action = call_status;
1409
1410         /*
1411          * Common case: success.  Force the compiler to put this
1412          * test first.
1413          */
1414         if (task->tk_status == 0) {
1415                 xprt_end_transmit(task);
1416                 rpc_task_force_reencode(task);
1417                 return;
1418         }
1419
1420         switch (task->tk_status) {
1421         case -EAGAIN:
1422                 break;
1423         default:
1424                 dprint_status(task);
1425                 xprt_end_transmit(task);
1426                 rpc_task_force_reencode(task);
1427                 break;
1428                 /*
1429                  * Special cases: if we've been waiting on the
1430                  * socket's write_space() callback, or if the
1431                  * socket just returned a connection error,
1432                  * then hold onto the transport lock.
1433                  */
1434         case -ECONNREFUSED:
1435         case -EHOSTDOWN:
1436         case -EHOSTUNREACH:
1437         case -ENETUNREACH:
1438                 if (RPC_IS_SOFTCONN(task)) {
1439                         xprt_end_transmit(task);
1440                         rpc_exit(task, task->tk_status);
1441                         break;
1442                 }
1443         case -ECONNRESET:
1444         case -ENOTCONN:
1445         case -EPIPE:
1446                 rpc_task_force_reencode(task);
1447         }
1448 }
1449
1450 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1451 /*
1452  * 5b.  Send the backchannel RPC reply.  On error, drop the reply.  In
1453  * addition, disconnect on connectivity errors.
1454  */
1455 static void
1456 call_bc_transmit(struct rpc_task *task)
1457 {
1458         struct rpc_rqst *req = task->tk_rqstp;
1459
1460         BUG_ON(task->tk_status != 0);
1461         task->tk_status = xprt_prepare_transmit(task);
1462         if (task->tk_status == -EAGAIN) {
1463                 /*
1464                  * Could not reserve the transport. Try again after the
1465                  * transport is released.
1466                  */
1467                 task->tk_status = 0;
1468                 task->tk_action = call_bc_transmit;
1469                 return;
1470         }
1471
1472         task->tk_action = rpc_exit_task;
1473         if (task->tk_status < 0) {
1474                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1475                         "error: %d\n", task->tk_status);
1476                 return;
1477         }
1478
1479         xprt_transmit(task);
1480         xprt_end_transmit(task);
1481         dprint_status(task);
1482         switch (task->tk_status) {
1483         case 0:
1484                 /* Success */
1485                 break;
1486         case -EHOSTDOWN:
1487         case -EHOSTUNREACH:
1488         case -ENETUNREACH:
1489         case -ETIMEDOUT:
1490                 /*
1491                  * Problem reaching the server.  Disconnect and let the
1492                  * forechannel reestablish the connection.  The server will
1493                  * have to retransmit the backchannel request and we'll
1494                  * reprocess it.  Since these ops are idempotent, there's no
1495                  * need to cache our reply at this time.
1496                  */
1497                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1498                         "error: %d\n", task->tk_status);
1499                 xprt_conditional_disconnect(task->tk_xprt,
1500                         req->rq_connect_cookie);
1501                 break;
1502         default:
1503                 /*
1504                  * We were unable to reply and will have to drop the
1505                  * request.  The server should reconnect and retransmit.
1506                  */
1507                 BUG_ON(task->tk_status == -EAGAIN);
1508                 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1509                         "error: %d\n", task->tk_status);
1510                 break;
1511         }
1512         rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1513 }
1514 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1515
1516 /*
1517  * 6.   Sort out the RPC call status
1518  */
1519 static void
1520 call_status(struct rpc_task *task)
1521 {
1522         struct rpc_clnt *clnt = task->tk_client;
1523         struct rpc_rqst *req = task->tk_rqstp;
1524         int             status;
1525
1526         if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1527                 task->tk_status = req->rq_reply_bytes_recvd;
1528
1529         dprint_status(task);
1530
1531         status = task->tk_status;
1532         if (status >= 0) {
1533                 task->tk_action = call_decode;
1534                 return;
1535         }
1536
1537         task->tk_status = 0;
1538         switch(status) {
1539         case -EHOSTDOWN:
1540         case -EHOSTUNREACH:
1541         case -ENETUNREACH:
1542                 /*
1543                  * Delay any retries for 3 seconds, then handle as if it
1544                  * were a timeout.
1545                  */
1546                 rpc_delay(task, 3*HZ);
1547         case -ETIMEDOUT:
1548                 task->tk_action = call_timeout;
1549                 if (task->tk_client->cl_discrtry)
1550                         xprt_conditional_disconnect(task->tk_xprt,
1551                                         req->rq_connect_cookie);
1552                 break;
1553         case -ECONNRESET:
1554         case -ECONNREFUSED:
1555                 rpc_force_rebind(clnt);
1556                 rpc_delay(task, 3*HZ);
1557         case -EPIPE:
1558         case -ENOTCONN:
1559                 task->tk_action = call_bind;
1560                 break;
1561         case -EAGAIN:
1562                 task->tk_action = call_transmit;
1563                 break;
1564         case -EIO:
1565                 /* shutdown or soft timeout */
1566                 rpc_exit(task, status);
1567                 break;
1568         default:
1569                 if (clnt->cl_chatty)
1570                         printk("%s: RPC call returned error %d\n",
1571                                clnt->cl_protname, -status);
1572                 rpc_exit(task, status);
1573         }
1574 }
1575
1576 /*
1577  * 6a.  Handle RPC timeout
1578  *      We do not release the request slot, so we keep using the
1579  *      same XID for all retransmits.
1580  */
1581 static void
1582 call_timeout(struct rpc_task *task)
1583 {
1584         struct rpc_clnt *clnt = task->tk_client;
1585
1586         if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1587                 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1588                 goto retry;
1589         }
1590
1591         dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1592         task->tk_timeouts++;
1593
1594         if (RPC_IS_SOFTCONN(task)) {
1595                 rpc_exit(task, -ETIMEDOUT);
1596                 return;
1597         }
1598         if (RPC_IS_SOFT(task)) {
1599                 if (clnt->cl_chatty)
1600                         printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1601                                 clnt->cl_protname, clnt->cl_server);
1602                 if (task->tk_flags & RPC_TASK_TIMEOUT)
1603                         rpc_exit(task, -ETIMEDOUT);
1604                 else
1605                         rpc_exit(task, -EIO);
1606                 return;
1607         }
1608
1609         if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1610                 task->tk_flags |= RPC_CALL_MAJORSEEN;
1611                 if (clnt->cl_chatty)
1612                         printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1613                         clnt->cl_protname, clnt->cl_server);
1614         }
1615         rpc_force_rebind(clnt);
1616         /*
1617          * Did our request time out due to an RPCSEC_GSS out-of-sequence
1618          * event? RFC2203 requires the server to drop all such requests.
1619          */
1620         rpcauth_invalcred(task);
1621
1622 retry:
1623         clnt->cl_stats->rpcretrans++;
1624         task->tk_action = call_bind;
1625         task->tk_status = 0;
1626 }
1627
1628 /*
1629  * 7.   Decode the RPC reply
1630  */
1631 static void
1632 call_decode(struct rpc_task *task)
1633 {
1634         struct rpc_clnt *clnt = task->tk_client;
1635         struct rpc_rqst *req = task->tk_rqstp;
1636         kxdrdproc_t     decode = task->tk_msg.rpc_proc->p_decode;
1637         __be32          *p;
1638
1639         dprint_status(task);
1640
1641         if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1642                 if (clnt->cl_chatty)
1643                         printk(KERN_NOTICE "%s: server %s OK\n",
1644                                 clnt->cl_protname, clnt->cl_server);
1645                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1646         }
1647
1648         /*
1649          * Ensure that we see all writes made by xprt_complete_rqst()
1650          * before it changed req->rq_reply_bytes_recvd.
1651          */
1652         smp_rmb();
1653         req->rq_rcv_buf.len = req->rq_private_buf.len;
1654
1655         /* Check that the softirq receive buffer is valid */
1656         WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1657                                 sizeof(req->rq_rcv_buf)) != 0);
1658
1659         if (req->rq_rcv_buf.len < 12) {
1660                 if (!RPC_IS_SOFT(task)) {
1661                         task->tk_action = call_bind;
1662                         clnt->cl_stats->rpcretrans++;
1663                         goto out_retry;
1664                 }
1665                 dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
1666                                 clnt->cl_protname, task->tk_status);
1667                 task->tk_action = call_timeout;
1668                 goto out_retry;
1669         }
1670
1671         p = rpc_verify_header(task);
1672         if (IS_ERR(p)) {
1673                 if (p == ERR_PTR(-EAGAIN))
1674                         goto out_retry;
1675                 return;
1676         }
1677
1678         task->tk_action = rpc_exit_task;
1679
1680         if (decode) {
1681                 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1682                                                       task->tk_msg.rpc_resp);
1683         }
1684         dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1685                         task->tk_status);
1686         return;
1687 out_retry:
1688         task->tk_status = 0;
1689         /* Note: rpc_verify_header() may have freed the RPC slot */
1690         if (task->tk_rqstp == req) {
1691                 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1692                 if (task->tk_client->cl_discrtry)
1693                         xprt_conditional_disconnect(task->tk_xprt,
1694                                         req->rq_connect_cookie);
1695         }
1696 }
1697
1698 static __be32 *
1699 rpc_encode_header(struct rpc_task *task)
1700 {
1701         struct rpc_clnt *clnt = task->tk_client;
1702         struct rpc_rqst *req = task->tk_rqstp;
1703         __be32          *p = req->rq_svec[0].iov_base;
1704
1705         /* FIXME: check buffer size? */
1706
1707         p = xprt_skip_transport_header(task->tk_xprt, p);
1708         *p++ = req->rq_xid;             /* XID */
1709         *p++ = htonl(RPC_CALL);         /* CALL */
1710         *p++ = htonl(RPC_VERSION);      /* RPC version */
1711         *p++ = htonl(clnt->cl_prog);    /* program number */
1712         *p++ = htonl(clnt->cl_vers);    /* program version */
1713         *p++ = htonl(task->tk_msg.rpc_proc->p_proc);    /* procedure */
1714         p = rpcauth_marshcred(task, p);
1715         req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1716         return p;
1717 }
1718
1719 static __be32 *
1720 rpc_verify_header(struct rpc_task *task)
1721 {
1722         struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1723         int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1724         __be32  *p = iov->iov_base;
1725         u32 n;
1726         int error = -EACCES;
1727
1728         if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1729                 /* RFC-1014 says that the representation of XDR data must be a
1730                  * multiple of four bytes
1731                  * - if it isn't pointer subtraction in the NFS client may give
1732                  *   undefined results
1733                  */
1734                 dprintk("RPC: %5u %s: XDR representation not a multiple of"
1735                        " 4 bytes: 0x%x\n", task->tk_pid, __func__,
1736                        task->tk_rqstp->rq_rcv_buf.len);
1737                 goto out_eio;
1738         }
1739         if ((len -= 3) < 0)
1740                 goto out_overflow;
1741
1742         p += 1; /* skip XID */
1743         if ((n = ntohl(*p++)) != RPC_REPLY) {
1744                 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1745                         task->tk_pid, __func__, n);
1746                 goto out_garbage;
1747         }
1748
1749         if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1750                 if (--len < 0)
1751                         goto out_overflow;
1752                 switch ((n = ntohl(*p++))) {
1753                 case RPC_AUTH_ERROR:
1754                         break;
1755                 case RPC_MISMATCH:
1756                         dprintk("RPC: %5u %s: RPC call version mismatch!\n",
1757                                 task->tk_pid, __func__);
1758                         error = -EPROTONOSUPPORT;
1759                         goto out_err;
1760                 default:
1761                         dprintk("RPC: %5u %s: RPC call rejected, "
1762                                 "unknown error: %x\n",
1763                                 task->tk_pid, __func__, n);
1764                         goto out_eio;
1765                 }
1766                 if (--len < 0)
1767                         goto out_overflow;
1768                 switch ((n = ntohl(*p++))) {
1769                 case RPC_AUTH_REJECTEDCRED:
1770                 case RPC_AUTH_REJECTEDVERF:
1771                 case RPCSEC_GSS_CREDPROBLEM:
1772                 case RPCSEC_GSS_CTXPROBLEM:
1773                         if (!task->tk_cred_retry)
1774                                 break;
1775                         task->tk_cred_retry--;
1776                         dprintk("RPC: %5u %s: retry stale creds\n",
1777                                         task->tk_pid, __func__);
1778                         rpcauth_invalcred(task);
1779                         /* Ensure we obtain a new XID! */
1780                         xprt_release(task);
1781                         task->tk_action = call_reserve;
1782                         goto out_retry;
1783                 case RPC_AUTH_BADCRED:
1784                 case RPC_AUTH_BADVERF:
1785                         /* possibly garbled cred/verf? */
1786                         if (!task->tk_garb_retry)
1787                                 break;
1788                         task->tk_garb_retry--;
1789                         dprintk("RPC: %5u %s: retry garbled creds\n",
1790                                         task->tk_pid, __func__);
1791                         task->tk_action = call_bind;
1792                         goto out_retry;
1793                 case RPC_AUTH_TOOWEAK:
1794                         printk(KERN_NOTICE "RPC: server %s requires stronger "
1795                                "authentication.\n", task->tk_client->cl_server);
1796                         break;
1797                 default:
1798                         dprintk("RPC: %5u %s: unknown auth error: %x\n",
1799                                         task->tk_pid, __func__, n);
1800                         error = -EIO;
1801                 }
1802                 dprintk("RPC: %5u %s: call rejected %d\n",
1803                                 task->tk_pid, __func__, n);
1804                 goto out_err;
1805         }
1806         if (!(p = rpcauth_checkverf(task, p))) {
1807                 dprintk("RPC: %5u %s: auth check failed\n",
1808                                 task->tk_pid, __func__);
1809                 goto out_garbage;               /* bad verifier, retry */
1810         }
1811         len = p - (__be32 *)iov->iov_base - 1;
1812         if (len < 0)
1813                 goto out_overflow;
1814         switch ((n = ntohl(*p++))) {
1815         case RPC_SUCCESS:
1816                 return p;
1817         case RPC_PROG_UNAVAIL:
1818                 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
1819                                 task->tk_pid, __func__,
1820                                 (unsigned int)task->tk_client->cl_prog,
1821                                 task->tk_client->cl_server);
1822                 error = -EPFNOSUPPORT;
1823                 goto out_err;
1824         case RPC_PROG_MISMATCH:
1825                 dprintk("RPC: %5u %s: program %u, version %u unsupported by "
1826                                 "server %s\n", task->tk_pid, __func__,
1827                                 (unsigned int)task->tk_client->cl_prog,
1828                                 (unsigned int)task->tk_client->cl_vers,
1829                                 task->tk_client->cl_server);
1830                 error = -EPROTONOSUPPORT;
1831                 goto out_err;
1832         case RPC_PROC_UNAVAIL:
1833                 dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
1834                                 "version %u on server %s\n",
1835                                 task->tk_pid, __func__,
1836                                 rpc_proc_name(task),
1837                                 task->tk_client->cl_prog,
1838                                 task->tk_client->cl_vers,
1839                                 task->tk_client->cl_server);
1840                 error = -EOPNOTSUPP;
1841                 goto out_err;
1842         case RPC_GARBAGE_ARGS:
1843                 dprintk("RPC: %5u %s: server saw garbage\n",
1844                                 task->tk_pid, __func__);
1845                 break;                  /* retry */
1846         default:
1847                 dprintk("RPC: %5u %s: server accept status: %x\n",
1848                                 task->tk_pid, __func__, n);
1849                 /* Also retry */
1850         }
1851
1852 out_garbage:
1853         task->tk_client->cl_stats->rpcgarbage++;
1854         if (task->tk_garb_retry) {
1855                 task->tk_garb_retry--;
1856                 dprintk("RPC: %5u %s: retrying\n",
1857                                 task->tk_pid, __func__);
1858                 task->tk_action = call_bind;
1859 out_retry:
1860                 return ERR_PTR(-EAGAIN);
1861         }
1862 out_eio:
1863         error = -EIO;
1864 out_err:
1865         rpc_exit(task, error);
1866         dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
1867                         __func__, error);
1868         return ERR_PTR(error);
1869 out_overflow:
1870         dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
1871                         __func__);
1872         goto out_garbage;
1873 }
1874
1875 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
1876 {
1877 }
1878
1879 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
1880 {
1881         return 0;
1882 }
1883
1884 static struct rpc_procinfo rpcproc_null = {
1885         .p_encode = rpcproc_encode_null,
1886         .p_decode = rpcproc_decode_null,
1887 };
1888
1889 static int rpc_ping(struct rpc_clnt *clnt)
1890 {
1891         struct rpc_message msg = {
1892                 .rpc_proc = &rpcproc_null,
1893         };
1894         int err;
1895         msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1896         err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
1897         put_rpccred(msg.rpc_cred);
1898         return err;
1899 }
1900
1901 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
1902 {
1903         struct rpc_message msg = {
1904                 .rpc_proc = &rpcproc_null,
1905                 .rpc_cred = cred,
1906         };
1907         struct rpc_task_setup task_setup_data = {
1908                 .rpc_client = clnt,
1909                 .rpc_message = &msg,
1910                 .callback_ops = &rpc_default_ops,
1911                 .flags = flags,
1912         };
1913         return rpc_run_task(&task_setup_data);
1914 }
1915 EXPORT_SYMBOL_GPL(rpc_call_null);
1916
1917 #ifdef RPC_DEBUG
1918 static void rpc_show_header(void)
1919 {
1920         printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
1921                 "-timeout ---ops--\n");
1922 }
1923
1924 static void rpc_show_task(const struct rpc_clnt *clnt,
1925                           const struct rpc_task *task)
1926 {
1927         const char *rpc_waitq = "none";
1928
1929         if (RPC_IS_QUEUED(task))
1930                 rpc_waitq = rpc_qname(task->tk_waitqueue);
1931
1932         printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
1933                 task->tk_pid, task->tk_flags, task->tk_status,
1934                 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1935                 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
1936                 task->tk_action, rpc_waitq);
1937 }
1938
1939 void rpc_show_tasks(struct net *net)
1940 {
1941         struct rpc_clnt *clnt;
1942         struct rpc_task *task;
1943         int header = 0;
1944         struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1945
1946         spin_lock(&sn->rpc_client_lock);
1947         list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
1948                 spin_lock(&clnt->cl_lock);
1949                 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
1950                         if (!header) {
1951                                 rpc_show_header();
1952                                 header++;
1953                         }
1954                         rpc_show_task(clnt, task);
1955                 }
1956                 spin_unlock(&clnt->cl_lock);
1957         }
1958         spin_unlock(&sn->rpc_client_lock);
1959 }
1960 #endif