Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[pandora-kernel.git] / fs / lockd / clntproc.c
1 /*
2  * linux/fs/lockd/clntproc.c
3  *
4  * RPC procedures for the client side NLM implementation
5  *
6  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/nfs_fs.h>
15 #include <linux/utsname.h>
16 #include <linux/smp_lock.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/sunrpc/svc.h>
19 #include <linux/lockd/lockd.h>
20 #include <linux/lockd/sm_inter.h>
21
22 #define NLMDBG_FACILITY         NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT      (5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT    (30*HZ)
25
26 static int      nlmclnt_test(struct nlm_rqst *, struct file_lock *);
27 static int      nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
28 static int      nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
29 static int      nlm_stat_to_errno(u32 stat);
30 static void     nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
31
32 static const struct rpc_call_ops nlmclnt_unlock_ops;
33 static const struct rpc_call_ops nlmclnt_cancel_ops;
34
35 /*
36  * Cookie counter for NLM requests
37  */
38 static u32      nlm_cookie = 0x1234;
39
40 static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
41 {
42         memcpy(c->data, &nlm_cookie, 4);
43         memset(c->data+4, 0, 4);
44         c->len=4;
45         nlm_cookie++;
46 }
47
48 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
49 {
50         atomic_inc(&lockowner->count);
51         return lockowner;
52 }
53
54 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
55 {
56         if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
57                 return;
58         list_del(&lockowner->list);
59         spin_unlock(&lockowner->host->h_lock);
60         nlm_release_host(lockowner->host);
61         kfree(lockowner);
62 }
63
64 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
65 {
66         struct nlm_lockowner *lockowner;
67         list_for_each_entry(lockowner, &host->h_lockowners, list) {
68                 if (lockowner->pid == pid)
69                         return -EBUSY;
70         }
71         return 0;
72 }
73
74 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
75 {
76         uint32_t res;
77         do {
78                 res = host->h_pidcount++;
79         } while (nlm_pidbusy(host, res) < 0);
80         return res;
81 }
82
83 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
84 {
85         struct nlm_lockowner *lockowner;
86         list_for_each_entry(lockowner, &host->h_lockowners, list) {
87                 if (lockowner->owner != owner)
88                         continue;
89                 return nlm_get_lockowner(lockowner);
90         }
91         return NULL;
92 }
93
94 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
95 {
96         struct nlm_lockowner *res, *new = NULL;
97
98         spin_lock(&host->h_lock);
99         res = __nlm_find_lockowner(host, owner);
100         if (res == NULL) {
101                 spin_unlock(&host->h_lock);
102                 new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
103                 spin_lock(&host->h_lock);
104                 res = __nlm_find_lockowner(host, owner);
105                 if (res == NULL && new != NULL) {
106                         res = new;
107                         atomic_set(&new->count, 1);
108                         new->owner = owner;
109                         new->pid = __nlm_alloc_pid(host);
110                         new->host = nlm_get_host(host);
111                         list_add(&new->list, &host->h_lockowners);
112                         new = NULL;
113                 }
114         }
115         spin_unlock(&host->h_lock);
116         kfree(new);
117         return res;
118 }
119
120 /*
121  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
122  */
123 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
124 {
125         struct nlm_args *argp = &req->a_args;
126         struct nlm_lock *lock = &argp->lock;
127
128         nlmclnt_next_cookie(&argp->cookie);
129         argp->state   = nsm_local_state;
130         memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
131         lock->caller  = system_utsname.nodename;
132         lock->oh.data = req->a_owner;
133         lock->oh.len  = sprintf(req->a_owner, "%d@%s",
134                                 current->pid, system_utsname.nodename);
135         locks_copy_lock(&lock->fl, fl);
136 }
137
138 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
139 {
140         struct file_lock *fl = &req->a_args.lock.fl;
141
142         if (fl->fl_ops && fl->fl_ops->fl_release_private)
143                 fl->fl_ops->fl_release_private(fl);
144 }
145
146 /*
147  * Initialize arguments for GRANTED call. The nlm_rqst structure
148  * has been cleared already.
149  */
150 int
151 nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
152 {
153         locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
154         memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
155         call->a_args.lock.caller = system_utsname.nodename;
156         call->a_args.lock.oh.len = lock->oh.len;
157
158         /* set default data area */
159         call->a_args.lock.oh.data = call->a_owner;
160
161         if (lock->oh.len > NLMCLNT_OHSIZE) {
162                 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
163                 if (!data) {
164                         nlmclnt_freegrantargs(call);
165                         return 0;
166                 }
167                 call->a_args.lock.oh.data = (u8 *) data;
168         }
169
170         memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
171         return 1;
172 }
173
174 void
175 nlmclnt_freegrantargs(struct nlm_rqst *call)
176 {
177         struct file_lock *fl = &call->a_args.lock.fl;
178         /*
179          * Check whether we allocated memory for the owner.
180          */
181         if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
182                 kfree(call->a_args.lock.oh.data);
183         }
184         if (fl->fl_ops && fl->fl_ops->fl_release_private)
185                 fl->fl_ops->fl_release_private(fl);
186 }
187
188 /*
189  * This is the main entry point for the NLM client.
190  */
191 int
192 nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
193 {
194         struct nfs_server       *nfssrv = NFS_SERVER(inode);
195         struct nlm_host         *host;
196         struct nlm_rqst         reqst, *call = &reqst;
197         sigset_t                oldset;
198         unsigned long           flags;
199         int                     status, proto, vers;
200
201         vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
202         if (NFS_PROTO(inode)->version > 3) {
203                 printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
204                 return -ENOLCK;
205         }
206
207         /* Retrieve transport protocol from NFS client */
208         proto = NFS_CLIENT(inode)->cl_xprt->prot;
209
210         if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
211                 return -ENOLCK;
212
213         /* Create RPC client handle if not there, and copy soft
214          * and intr flags from NFS client. */
215         if (host->h_rpcclnt == NULL) {
216                 struct rpc_clnt *clnt;
217
218                 /* Bind an rpc client to this host handle (does not
219                  * perform a portmapper lookup) */
220                 if (!(clnt = nlm_bind_host(host))) {
221                         status = -ENOLCK;
222                         goto done;
223                 }
224                 clnt->cl_softrtry = nfssrv->client->cl_softrtry;
225                 clnt->cl_intr = nfssrv->client->cl_intr;
226         }
227
228         /* Keep the old signal mask */
229         spin_lock_irqsave(&current->sighand->siglock, flags);
230         oldset = current->blocked;
231
232         /* If we're cleaning up locks because the process is exiting,
233          * perform the RPC call asynchronously. */
234         if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
235             && fl->fl_type == F_UNLCK
236             && (current->flags & PF_EXITING)) {
237                 sigfillset(&current->blocked);  /* Mask all signals */
238                 recalc_sigpending();
239                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
240
241                 call = nlmclnt_alloc_call();
242                 if (!call) {
243                         status = -ENOMEM;
244                         goto out_restore;
245                 }
246                 call->a_flags = RPC_TASK_ASYNC;
247         } else {
248                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
249                 memset(call, 0, sizeof(*call));
250                 locks_init_lock(&call->a_args.lock.fl);
251                 locks_init_lock(&call->a_res.lock.fl);
252         }
253         call->a_host = host;
254
255         nlmclnt_locks_init_private(fl, host);
256
257         /* Set up the argument struct */
258         nlmclnt_setlockargs(call, fl);
259
260         if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
261                 if (fl->fl_type != F_UNLCK) {
262                         call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
263                         status = nlmclnt_lock(call, fl);
264                 } else
265                         status = nlmclnt_unlock(call, fl);
266         } else if (IS_GETLK(cmd))
267                 status = nlmclnt_test(call, fl);
268         else
269                 status = -EINVAL;
270
271  out_restore:
272         spin_lock_irqsave(&current->sighand->siglock, flags);
273         current->blocked = oldset;
274         recalc_sigpending();
275         spin_unlock_irqrestore(&current->sighand->siglock, flags);
276
277 done:
278         dprintk("lockd: clnt proc returns %d\n", status);
279         nlm_release_host(host);
280         return status;
281 }
282 EXPORT_SYMBOL(nlmclnt_proc);
283
284 /*
285  * Allocate an NLM RPC call struct
286  */
287 struct nlm_rqst *
288 nlmclnt_alloc_call(void)
289 {
290         struct nlm_rqst *call;
291
292         while (!signalled()) {
293                 call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
294                 if (call) {
295                         memset(call, 0, sizeof(*call));
296                         locks_init_lock(&call->a_args.lock.fl);
297                         locks_init_lock(&call->a_res.lock.fl);
298                         return call;
299                 }
300                 printk("nlmclnt_alloc_call: failed, waiting for memory\n");
301                 schedule_timeout_interruptible(5*HZ);
302         }
303         return NULL;
304 }
305
306 static int nlm_wait_on_grace(wait_queue_head_t *queue)
307 {
308         DEFINE_WAIT(wait);
309         int status = -EINTR;
310
311         prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
312         if (!signalled ()) {
313                 schedule_timeout(NLMCLNT_GRACE_WAIT);
314                 try_to_freeze();
315                 if (!signalled ())
316                         status = 0;
317         }
318         finish_wait(queue, &wait);
319         return status;
320 }
321
322 /*
323  * Generic NLM call
324  */
325 static int
326 nlmclnt_call(struct nlm_rqst *req, u32 proc)
327 {
328         struct nlm_host *host = req->a_host;
329         struct rpc_clnt *clnt;
330         struct nlm_args *argp = &req->a_args;
331         struct nlm_res  *resp = &req->a_res;
332         struct rpc_message msg = {
333                 .rpc_argp       = argp,
334                 .rpc_resp       = resp,
335         };
336         int             status;
337
338         dprintk("lockd: call procedure %d on %s\n",
339                         (int)proc, host->h_name);
340
341         do {
342                 if (host->h_reclaiming && !argp->reclaim)
343                         goto in_grace_period;
344
345                 /* If we have no RPC client yet, create one. */
346                 if ((clnt = nlm_bind_host(host)) == NULL)
347                         return -ENOLCK;
348                 msg.rpc_proc = &clnt->cl_procinfo[proc];
349
350                 /* Perform the RPC call. If an error occurs, try again */
351                 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
352                         dprintk("lockd: rpc_call returned error %d\n", -status);
353                         switch (status) {
354                         case -EPROTONOSUPPORT:
355                                 status = -EINVAL;
356                                 break;
357                         case -ECONNREFUSED:
358                         case -ETIMEDOUT:
359                         case -ENOTCONN:
360                                 nlm_rebind_host(host);
361                                 status = -EAGAIN;
362                                 break;
363                         case -ERESTARTSYS:
364                                 return signalled () ? -EINTR : status;
365                         default:
366                                 break;
367                         }
368                         break;
369                 } else
370                 if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
371                         dprintk("lockd: server in grace period\n");
372                         if (argp->reclaim) {
373                                 printk(KERN_WARNING
374                                      "lockd: spurious grace period reject?!\n");
375                                 return -ENOLCK;
376                         }
377                 } else {
378                         if (!argp->reclaim) {
379                                 /* We appear to be out of the grace period */
380                                 wake_up_all(&host->h_gracewait);
381                         }
382                         dprintk("lockd: server returns status %d\n", resp->status);
383                         return 0;       /* Okay, call complete */
384                 }
385
386 in_grace_period:
387                 /*
388                  * The server has rebooted and appears to be in the grace
389                  * period during which locks are only allowed to be
390                  * reclaimed.
391                  * We can only back off and try again later.
392                  */
393                 status = nlm_wait_on_grace(&host->h_gracewait);
394         } while (status == 0);
395
396         return status;
397 }
398
399 /*
400  * Generic NLM call, async version.
401  */
402 int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
403 {
404         struct nlm_host *host = req->a_host;
405         struct rpc_clnt *clnt;
406         struct rpc_message msg = {
407                 .rpc_argp       = &req->a_args,
408                 .rpc_resp       = &req->a_res,
409         };
410         int             status;
411
412         dprintk("lockd: call procedure %d on %s (async)\n",
413                         (int)proc, host->h_name);
414
415         /* If we have no RPC client yet, create one. */
416         if ((clnt = nlm_bind_host(host)) == NULL)
417                 return -ENOLCK;
418         msg.rpc_proc = &clnt->cl_procinfo[proc];
419
420         /* bootstrap and kick off the async RPC call */
421         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
422
423         return status;
424 }
425
426 static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
427 {
428         struct nlm_host *host = req->a_host;
429         struct rpc_clnt *clnt;
430         struct nlm_args *argp = &req->a_args;
431         struct nlm_res  *resp = &req->a_res;
432         struct rpc_message msg = {
433                 .rpc_argp       = argp,
434                 .rpc_resp       = resp,
435         };
436         int             status;
437
438         dprintk("lockd: call procedure %d on %s (async)\n",
439                         (int)proc, host->h_name);
440
441         /* If we have no RPC client yet, create one. */
442         if ((clnt = nlm_bind_host(host)) == NULL)
443                 return -ENOLCK;
444         msg.rpc_proc = &clnt->cl_procinfo[proc];
445
446         /* Increment host refcount */
447         nlm_get_host(host);
448         /* bootstrap and kick off the async RPC call */
449         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
450         if (status < 0)
451                 nlm_release_host(host);
452         return status;
453 }
454
455 /*
456  * TEST for the presence of a conflicting lock
457  */
458 static int
459 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
460 {
461         int     status;
462
463         status = nlmclnt_call(req, NLMPROC_TEST);
464         nlmclnt_release_lockargs(req);
465         if (status < 0)
466                 return status;
467
468         status = req->a_res.status;
469         if (status == NLM_LCK_GRANTED) {
470                 fl->fl_type = F_UNLCK;
471         } if (status == NLM_LCK_DENIED) {
472                 /*
473                  * Report the conflicting lock back to the application.
474                  */
475                 locks_copy_lock(fl, &req->a_res.lock.fl);
476                 fl->fl_pid = 0;
477         } else {
478                 return nlm_stat_to_errno(req->a_res.status);
479         }
480
481         return 0;
482 }
483
484 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
485 {
486         memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
487         nlm_get_lockowner(new->fl_u.nfs_fl.owner);
488 }
489
490 static void nlmclnt_locks_release_private(struct file_lock *fl)
491 {
492         nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
493         fl->fl_ops = NULL;
494 }
495
496 static struct file_lock_operations nlmclnt_lock_ops = {
497         .fl_copy_lock = nlmclnt_locks_copy_lock,
498         .fl_release_private = nlmclnt_locks_release_private,
499 };
500
501 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
502 {
503         BUG_ON(fl->fl_ops != NULL);
504         fl->fl_u.nfs_fl.state = 0;
505         fl->fl_u.nfs_fl.flags = 0;
506         fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
507         fl->fl_ops = &nlmclnt_lock_ops;
508 }
509
510 static void do_vfs_lock(struct file_lock *fl)
511 {
512         int res = 0;
513         switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
514                 case FL_POSIX:
515                         res = posix_lock_file_wait(fl->fl_file, fl);
516                         break;
517                 case FL_FLOCK:
518                         res = flock_lock_file_wait(fl->fl_file, fl);
519                         break;
520                 default:
521                         BUG();
522         }
523         if (res < 0)
524                 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
525                                 __FUNCTION__);
526 }
527
528 /*
529  * LOCK: Try to create a lock
530  *
531  *                      Programmer Harassment Alert
532  *
533  * When given a blocking lock request in a sync RPC call, the HPUX lockd
534  * will faithfully return LCK_BLOCKED but never cares to notify us when
535  * the lock could be granted. This way, our local process could hang
536  * around forever waiting for the callback.
537  *
538  *  Solution A: Implement busy-waiting
539  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
540  *
541  * For now I am implementing solution A, because I hate the idea of
542  * re-implementing lockd for a third time in two months. The async
543  * calls shouldn't be too hard to do, however.
544  *
545  * This is one of the lovely things about standards in the NFS area:
546  * they're so soft and squishy you can't really blame HP for doing this.
547  */
548 static int
549 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
550 {
551         struct nlm_host *host = req->a_host;
552         struct nlm_res  *resp = &req->a_res;
553         long timeout;
554         int status;
555
556         if (!host->h_monitored && nsm_monitor(host) < 0) {
557                 printk(KERN_NOTICE "lockd: failed to monitor %s\n",
558                                         host->h_name);
559                 status = -ENOLCK;
560                 goto out;
561         }
562
563         if (req->a_args.block) {
564                 status = nlmclnt_prepare_block(req, host, fl);
565                 if (status < 0)
566                         goto out;
567         }
568         for(;;) {
569                 status = nlmclnt_call(req, NLMPROC_LOCK);
570                 if (status < 0)
571                         goto out_unblock;
572                 if (resp->status != NLM_LCK_BLOCKED)
573                         break;
574                 /* Wait on an NLM blocking lock */
575                 timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);
576                 /* Did a reclaimer thread notify us of a server reboot? */
577                 if (resp->status ==  NLM_LCK_DENIED_GRACE_PERIOD)
578                         continue;
579                 if (resp->status != NLM_LCK_BLOCKED)
580                         break;
581                 if (timeout >= 0)
582                         continue;
583                 /* We were interrupted. Send a CANCEL request to the server
584                  * and exit
585                  */
586                 status = (int)timeout;
587                 goto out_unblock;
588         }
589
590         if (resp->status == NLM_LCK_GRANTED) {
591                 fl->fl_u.nfs_fl.state = host->h_state;
592                 fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
593                 fl->fl_flags |= FL_SLEEP;
594                 do_vfs_lock(fl);
595         }
596         status = nlm_stat_to_errno(resp->status);
597 out_unblock:
598         nlmclnt_finish_block(req);
599         /* Cancel the blocked request if it is still pending */
600         if (resp->status == NLM_LCK_BLOCKED)
601                 nlmclnt_cancel(host, fl);
602 out:
603         nlmclnt_release_lockargs(req);
604         return status;
605 }
606
607 /*
608  * RECLAIM: Try to reclaim a lock
609  */
610 int
611 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
612 {
613         struct nlm_rqst reqst, *req;
614         int             status;
615
616         req = &reqst;
617         memset(req, 0, sizeof(*req));
618         locks_init_lock(&req->a_args.lock.fl);
619         locks_init_lock(&req->a_res.lock.fl);
620         req->a_host  = host;
621         req->a_flags = 0;
622
623         /* Set up the argument struct */
624         nlmclnt_setlockargs(req, fl);
625         req->a_args.reclaim = 1;
626
627         if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
628          && req->a_res.status == NLM_LCK_GRANTED)
629                 return 0;
630
631         printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
632                                 "(errno %d, status %d)\n", fl->fl_pid,
633                                 status, req->a_res.status);
634
635         /*
636          * FIXME: This is a serious failure. We can
637          *
638          *  a.  Ignore the problem
639          *  b.  Send the owning process some signal (Linux doesn't have
640          *      SIGLOST, though...)
641          *  c.  Retry the operation
642          *
643          * Until someone comes up with a simple implementation
644          * for b or c, I'll choose option a.
645          */
646
647         return -ENOLCK;
648 }
649
650 /*
651  * UNLOCK: remove an existing lock
652  */
653 static int
654 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
655 {
656         struct nlm_res  *resp = &req->a_res;
657         int             status;
658
659         /* Clean the GRANTED flag now so the lock doesn't get
660          * reclaimed while we're stuck in the unlock call. */
661         fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
662
663         if (req->a_flags & RPC_TASK_ASYNC) {
664                 status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
665                                         &nlmclnt_unlock_ops);
666                 /* Hrmf... Do the unlock early since locks_remove_posix()
667                  * really expects us to free the lock synchronously */
668                 do_vfs_lock(fl);
669                 if (status < 0) {
670                         nlmclnt_release_lockargs(req);
671                         kfree(req);
672                 }
673                 return status;
674         }
675
676         status = nlmclnt_call(req, NLMPROC_UNLOCK);
677         nlmclnt_release_lockargs(req);
678         if (status < 0)
679                 return status;
680
681         do_vfs_lock(fl);
682         if (resp->status == NLM_LCK_GRANTED)
683                 return 0;
684
685         if (resp->status != NLM_LCK_DENIED_NOLOCKS)
686                 printk("lockd: unexpected unlock status: %d\n", resp->status);
687
688         /* What to do now? I'm out of my depth... */
689
690         return -ENOLCK;
691 }
692
693 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
694 {
695         struct nlm_rqst *req = data;
696         int             status = req->a_res.status;
697
698         if (RPC_ASSASSINATED(task))
699                 goto die;
700
701         if (task->tk_status < 0) {
702                 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
703                 goto retry_rebind;
704         }
705         if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
706                 rpc_delay(task, NLMCLNT_GRACE_WAIT);
707                 goto retry_unlock;
708         }
709         if (status != NLM_LCK_GRANTED)
710                 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
711 die:
712         nlm_release_host(req->a_host);
713         nlmclnt_release_lockargs(req);
714         kfree(req);
715         return;
716  retry_rebind:
717         nlm_rebind_host(req->a_host);
718  retry_unlock:
719         rpc_restart_call(task);
720 }
721
722 static const struct rpc_call_ops nlmclnt_unlock_ops = {
723         .rpc_call_done = nlmclnt_unlock_callback,
724 };
725
726 /*
727  * Cancel a blocked lock request.
728  * We always use an async RPC call for this in order not to hang a
729  * process that has been Ctrl-C'ed.
730  */
731 int
732 nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
733 {
734         struct nlm_rqst *req;
735         unsigned long   flags;
736         sigset_t        oldset;
737         int             status;
738
739         /* Block all signals while setting up call */
740         spin_lock_irqsave(&current->sighand->siglock, flags);
741         oldset = current->blocked;
742         sigfillset(&current->blocked);
743         recalc_sigpending();
744         spin_unlock_irqrestore(&current->sighand->siglock, flags);
745
746         req = nlmclnt_alloc_call();
747         if (!req)
748                 return -ENOMEM;
749         req->a_host  = host;
750         req->a_flags = RPC_TASK_ASYNC;
751
752         nlmclnt_setlockargs(req, fl);
753
754         status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
755         if (status < 0) {
756                 nlmclnt_release_lockargs(req);
757                 kfree(req);
758         }
759
760         spin_lock_irqsave(&current->sighand->siglock, flags);
761         current->blocked = oldset;
762         recalc_sigpending();
763         spin_unlock_irqrestore(&current->sighand->siglock, flags);
764
765         return status;
766 }
767
768 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
769 {
770         struct nlm_rqst *req = data;
771
772         if (RPC_ASSASSINATED(task))
773                 goto die;
774
775         if (task->tk_status < 0) {
776                 dprintk("lockd: CANCEL call error %d, retrying.\n",
777                                         task->tk_status);
778                 goto retry_cancel;
779         }
780
781         dprintk("lockd: cancel status %d (task %d)\n",
782                         req->a_res.status, task->tk_pid);
783
784         switch (req->a_res.status) {
785         case NLM_LCK_GRANTED:
786         case NLM_LCK_DENIED_GRACE_PERIOD:
787                 /* Everything's good */
788                 break;
789         case NLM_LCK_DENIED_NOLOCKS:
790                 dprintk("lockd: CANCEL failed (server has no locks)\n");
791                 goto retry_cancel;
792         default:
793                 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
794                         req->a_res.status);
795         }
796
797 die:
798         nlm_release_host(req->a_host);
799         nlmclnt_release_lockargs(req);
800         kfree(req);
801         return;
802
803 retry_cancel:
804         nlm_rebind_host(req->a_host);
805         rpc_restart_call(task);
806         rpc_delay(task, 30 * HZ);
807 }
808
809 static const struct rpc_call_ops nlmclnt_cancel_ops = {
810         .rpc_call_done = nlmclnt_cancel_callback,
811 };
812
813 /*
814  * Convert an NLM status code to a generic kernel errno
815  */
816 static int
817 nlm_stat_to_errno(u32 status)
818 {
819         switch(status) {
820         case NLM_LCK_GRANTED:
821                 return 0;
822         case NLM_LCK_DENIED:
823                 return -EAGAIN;
824         case NLM_LCK_DENIED_NOLOCKS:
825         case NLM_LCK_DENIED_GRACE_PERIOD:
826                 return -ENOLCK;
827         case NLM_LCK_BLOCKED:
828                 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
829                 return -ENOLCK;
830 #ifdef CONFIG_LOCKD_V4
831         case NLM_DEADLCK:
832                 return -EDEADLK;
833         case NLM_ROFS:
834                 return -EROFS;
835         case NLM_STALE_FH:
836                 return -ESTALE;
837         case NLM_FBIG:
838                 return -EOVERFLOW;
839         case NLM_FAILED:
840                 return -ENOLCK;
841 #endif
842         }
843         printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
844         return -ENOLCK;
845 }