nfsd4: CREATE_SESSION should update backchannel immediately
[pandora-kernel.git] / fs / nfsd / nfs4state.c
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/hash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51
52 #define NFSDDBG_FACILITY                NFSDDBG_PROC
53
54 #define all_ones {{~0,~0},~0}
55 static const stateid_t one_stateid = {
56         .si_generation = ~0,
57         .si_opaque = all_ones,
58 };
59 static const stateid_t zero_stateid = {
60         /* all fields zero */
61 };
62 static const stateid_t currentstateid = {
63         .si_generation = 1,
64 };
65
66 static u64 current_sessionid = 1;
67
68 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
69 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
71
72 /* forward declarations */
73 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
74
75 /* Locking: */
76
77 /* Currently used for almost all code touching nfsv4 state: */
78 static DEFINE_MUTEX(client_mutex);
79
80 /*
81  * Currently used for the del_recall_lru and file hash table.  In an
82  * effort to decrease the scope of the client_mutex, this spinlock may
83  * eventually cover more:
84  */
85 static DEFINE_SPINLOCK(state_lock);
86
87 static struct kmem_cache *openowner_slab;
88 static struct kmem_cache *lockowner_slab;
89 static struct kmem_cache *file_slab;
90 static struct kmem_cache *stateid_slab;
91 static struct kmem_cache *deleg_slab;
92
93 void
94 nfs4_lock_state(void)
95 {
96         mutex_lock(&client_mutex);
97 }
98
99 static void free_session(struct nfsd4_session *);
100
101 static bool is_session_dead(struct nfsd4_session *ses)
102 {
103         return ses->se_flags & NFS4_SESSION_DEAD;
104 }
105
106 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
107 {
108         if (atomic_read(&ses->se_ref) > ref_held_by_me)
109                 return nfserr_jukebox;
110         ses->se_flags |= NFS4_SESSION_DEAD;
111         return nfs_ok;
112 }
113
114 void
115 nfs4_unlock_state(void)
116 {
117         mutex_unlock(&client_mutex);
118 }
119
120 static bool is_client_expired(struct nfs4_client *clp)
121 {
122         return clp->cl_time == 0;
123 }
124
125 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
126 {
127         if (atomic_read(&clp->cl_refcount))
128                 return nfserr_jukebox;
129         clp->cl_time = 0;
130         return nfs_ok;
131 }
132
133 static __be32 mark_client_expired(struct nfs4_client *clp)
134 {
135         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
136         __be32 ret;
137
138         spin_lock(&nn->client_lock);
139         ret = mark_client_expired_locked(clp);
140         spin_unlock(&nn->client_lock);
141         return ret;
142 }
143
144 static __be32 get_client_locked(struct nfs4_client *clp)
145 {
146         if (is_client_expired(clp))
147                 return nfserr_expired;
148         atomic_inc(&clp->cl_refcount);
149         return nfs_ok;
150 }
151
152 /* must be called under the client_lock */
153 static inline void
154 renew_client_locked(struct nfs4_client *clp)
155 {
156         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
157
158         if (is_client_expired(clp)) {
159                 WARN_ON(1);
160                 printk("%s: client (clientid %08x/%08x) already expired\n",
161                         __func__,
162                         clp->cl_clientid.cl_boot,
163                         clp->cl_clientid.cl_id);
164                 return;
165         }
166
167         dprintk("renewing client (clientid %08x/%08x)\n",
168                         clp->cl_clientid.cl_boot,
169                         clp->cl_clientid.cl_id);
170         list_move_tail(&clp->cl_lru, &nn->client_lru);
171         clp->cl_time = get_seconds();
172 }
173
174 static inline void
175 renew_client(struct nfs4_client *clp)
176 {
177         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179         spin_lock(&nn->client_lock);
180         renew_client_locked(clp);
181         spin_unlock(&nn->client_lock);
182 }
183
184 static void put_client_renew_locked(struct nfs4_client *clp)
185 {
186         if (!atomic_dec_and_test(&clp->cl_refcount))
187                 return;
188         if (!is_client_expired(clp))
189                 renew_client_locked(clp);
190 }
191
192 static void put_client_renew(struct nfs4_client *clp)
193 {
194         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
195
196         if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
197                 return;
198         if (!is_client_expired(clp))
199                 renew_client_locked(clp);
200         spin_unlock(&nn->client_lock);
201 }
202
203 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
204 {
205         __be32 status;
206
207         if (is_session_dead(ses))
208                 return nfserr_badsession;
209         status = get_client_locked(ses->se_client);
210         if (status)
211                 return status;
212         atomic_inc(&ses->se_ref);
213         return nfs_ok;
214 }
215
216 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
217 {
218         struct nfs4_client *clp = ses->se_client;
219
220         if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
221                 free_session(ses);
222         put_client_renew_locked(clp);
223 }
224
225 static void nfsd4_put_session(struct nfsd4_session *ses)
226 {
227         struct nfs4_client *clp = ses->se_client;
228         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
229
230         spin_lock(&nn->client_lock);
231         nfsd4_put_session_locked(ses);
232         spin_unlock(&nn->client_lock);
233 }
234
235
236 static inline u32
237 opaque_hashval(const void *ptr, int nbytes)
238 {
239         unsigned char *cptr = (unsigned char *) ptr;
240
241         u32 x = 0;
242         while (nbytes--) {
243                 x *= 37;
244                 x += *cptr++;
245         }
246         return x;
247 }
248
249 static void nfsd4_free_file(struct nfs4_file *f)
250 {
251         kmem_cache_free(file_slab, f);
252 }
253
254 static inline void
255 put_nfs4_file(struct nfs4_file *fi)
256 {
257         might_lock(&state_lock);
258
259         if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
260                 hlist_del(&fi->fi_hash);
261                 spin_unlock(&state_lock);
262                 iput(fi->fi_inode);
263                 nfsd4_free_file(fi);
264         }
265 }
266
267 static inline void
268 get_nfs4_file(struct nfs4_file *fi)
269 {
270         atomic_inc(&fi->fi_ref);
271 }
272
273 static struct file *
274 __nfs4_get_fd(struct nfs4_file *f, int oflag)
275 {
276         if (f->fi_fds[oflag])
277                 return get_file(f->fi_fds[oflag]);
278         return NULL;
279 }
280
281 static struct file *
282 find_writeable_file_locked(struct nfs4_file *f)
283 {
284         struct file *ret;
285
286         lockdep_assert_held(&f->fi_lock);
287
288         ret = __nfs4_get_fd(f, O_WRONLY);
289         if (!ret)
290                 ret = __nfs4_get_fd(f, O_RDWR);
291         return ret;
292 }
293
294 static struct file *
295 find_writeable_file(struct nfs4_file *f)
296 {
297         struct file *ret;
298
299         spin_lock(&f->fi_lock);
300         ret = find_writeable_file_locked(f);
301         spin_unlock(&f->fi_lock);
302
303         return ret;
304 }
305
306 static struct file *find_readable_file_locked(struct nfs4_file *f)
307 {
308         struct file *ret;
309
310         lockdep_assert_held(&f->fi_lock);
311
312         ret = __nfs4_get_fd(f, O_RDONLY);
313         if (!ret)
314                 ret = __nfs4_get_fd(f, O_RDWR);
315         return ret;
316 }
317
318 static struct file *
319 find_readable_file(struct nfs4_file *f)
320 {
321         struct file *ret;
322
323         spin_lock(&f->fi_lock);
324         ret = find_readable_file_locked(f);
325         spin_unlock(&f->fi_lock);
326
327         return ret;
328 }
329
330 static struct file *
331 find_any_file(struct nfs4_file *f)
332 {
333         struct file *ret;
334
335         spin_lock(&f->fi_lock);
336         ret = __nfs4_get_fd(f, O_RDWR);
337         if (!ret) {
338                 ret = __nfs4_get_fd(f, O_WRONLY);
339                 if (!ret)
340                         ret = __nfs4_get_fd(f, O_RDONLY);
341         }
342         spin_unlock(&f->fi_lock);
343         return ret;
344 }
345
346 static int num_delegations;
347 unsigned long max_delegations;
348
349 /*
350  * Open owner state (share locks)
351  */
352
353 /* hash tables for lock and open owners */
354 #define OWNER_HASH_BITS              8
355 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
356 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
357
358 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
359 {
360         unsigned int ret;
361
362         ret = opaque_hashval(ownername->data, ownername->len);
363         ret += clientid;
364         return ret & OWNER_HASH_MASK;
365 }
366
367 /* hash table for nfs4_file */
368 #define FILE_HASH_BITS                   8
369 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
370
371 static unsigned int file_hashval(struct inode *ino)
372 {
373         /* XXX: why are we hashing on inode pointer, anyway? */
374         return hash_ptr(ino, FILE_HASH_BITS);
375 }
376
377 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
378
379 static void
380 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
381 {
382         lockdep_assert_held(&fp->fi_lock);
383
384         if (access & NFS4_SHARE_ACCESS_WRITE)
385                 atomic_inc(&fp->fi_access[O_WRONLY]);
386         if (access & NFS4_SHARE_ACCESS_READ)
387                 atomic_inc(&fp->fi_access[O_RDONLY]);
388 }
389
390 static __be32
391 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
392 {
393         lockdep_assert_held(&fp->fi_lock);
394
395         /* Does this access mode make sense? */
396         if (access & ~NFS4_SHARE_ACCESS_BOTH)
397                 return nfserr_inval;
398
399         /* Does it conflict with a deny mode already set? */
400         if ((access & fp->fi_share_deny) != 0)
401                 return nfserr_share_denied;
402
403         __nfs4_file_get_access(fp, access);
404         return nfs_ok;
405 }
406
407 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
408 {
409         /* Common case is that there is no deny mode. */
410         if (deny) {
411                 /* Does this deny mode make sense? */
412                 if (deny & ~NFS4_SHARE_DENY_BOTH)
413                         return nfserr_inval;
414
415                 if ((deny & NFS4_SHARE_DENY_READ) &&
416                     atomic_read(&fp->fi_access[O_RDONLY]))
417                         return nfserr_share_denied;
418
419                 if ((deny & NFS4_SHARE_DENY_WRITE) &&
420                     atomic_read(&fp->fi_access[O_WRONLY]))
421                         return nfserr_share_denied;
422         }
423         return nfs_ok;
424 }
425
426 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
427 {
428         might_lock(&fp->fi_lock);
429
430         if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
431                 struct file *f1 = NULL;
432                 struct file *f2 = NULL;
433
434                 swap(f1, fp->fi_fds[oflag]);
435                 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
436                         swap(f2, fp->fi_fds[O_RDWR]);
437                 spin_unlock(&fp->fi_lock);
438                 if (f1)
439                         fput(f1);
440                 if (f2)
441                         fput(f2);
442         }
443 }
444
445 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
446 {
447         WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
448
449         if (access & NFS4_SHARE_ACCESS_WRITE)
450                 __nfs4_file_put_access(fp, O_WRONLY);
451         if (access & NFS4_SHARE_ACCESS_READ)
452                 __nfs4_file_put_access(fp, O_RDONLY);
453 }
454
455 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
456 kmem_cache *slab)
457 {
458         struct idr *stateids = &cl->cl_stateids;
459         struct nfs4_stid *stid;
460         int new_id;
461
462         stid = kmem_cache_alloc(slab, GFP_KERNEL);
463         if (!stid)
464                 return NULL;
465
466         new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL);
467         if (new_id < 0)
468                 goto out_free;
469         stid->sc_client = cl;
470         stid->sc_type = 0;
471         stid->sc_stateid.si_opaque.so_id = new_id;
472         stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
473         /* Will be incremented before return to client: */
474         stid->sc_stateid.si_generation = 0;
475
476         /*
477          * It shouldn't be a problem to reuse an opaque stateid value.
478          * I don't think it is for 4.1.  But with 4.0 I worry that, for
479          * example, a stray write retransmission could be accepted by
480          * the server when it should have been rejected.  Therefore,
481          * adopt a trick from the sctp code to attempt to maximize the
482          * amount of time until an id is reused, by ensuring they always
483          * "increase" (mod INT_MAX):
484          */
485         return stid;
486 out_free:
487         kmem_cache_free(slab, stid);
488         return NULL;
489 }
490
491 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
492 {
493         return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
494 }
495
496 /*
497  * When we recall a delegation, we should be careful not to hand it
498  * out again straight away.
499  * To ensure this we keep a pair of bloom filters ('new' and 'old')
500  * in which the filehandles of recalled delegations are "stored".
501  * If a filehandle appear in either filter, a delegation is blocked.
502  * When a delegation is recalled, the filehandle is stored in the "new"
503  * filter.
504  * Every 30 seconds we swap the filters and clear the "new" one,
505  * unless both are empty of course.
506  *
507  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
508  * low 3 bytes as hash-table indices.
509  *
510  * 'state_lock', which is always held when block_delegations() is called,
511  * is used to manage concurrent access.  Testing does not need the lock
512  * except when swapping the two filters.
513  */
514 static struct bloom_pair {
515         int     entries, old_entries;
516         time_t  swap_time;
517         int     new; /* index into 'set' */
518         DECLARE_BITMAP(set[2], 256);
519 } blocked_delegations;
520
521 static int delegation_blocked(struct knfsd_fh *fh)
522 {
523         u32 hash;
524         struct bloom_pair *bd = &blocked_delegations;
525
526         if (bd->entries == 0)
527                 return 0;
528         if (seconds_since_boot() - bd->swap_time > 30) {
529                 spin_lock(&state_lock);
530                 if (seconds_since_boot() - bd->swap_time > 30) {
531                         bd->entries -= bd->old_entries;
532                         bd->old_entries = bd->entries;
533                         memset(bd->set[bd->new], 0,
534                                sizeof(bd->set[0]));
535                         bd->new = 1-bd->new;
536                         bd->swap_time = seconds_since_boot();
537                 }
538                 spin_unlock(&state_lock);
539         }
540         hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
541         if (test_bit(hash&255, bd->set[0]) &&
542             test_bit((hash>>8)&255, bd->set[0]) &&
543             test_bit((hash>>16)&255, bd->set[0]))
544                 return 1;
545
546         if (test_bit(hash&255, bd->set[1]) &&
547             test_bit((hash>>8)&255, bd->set[1]) &&
548             test_bit((hash>>16)&255, bd->set[1]))
549                 return 1;
550
551         return 0;
552 }
553
554 static void block_delegations(struct knfsd_fh *fh)
555 {
556         u32 hash;
557         struct bloom_pair *bd = &blocked_delegations;
558
559         lockdep_assert_held(&state_lock);
560
561         hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
562
563         __set_bit(hash&255, bd->set[bd->new]);
564         __set_bit((hash>>8)&255, bd->set[bd->new]);
565         __set_bit((hash>>16)&255, bd->set[bd->new]);
566         if (bd->entries == 0)
567                 bd->swap_time = seconds_since_boot();
568         bd->entries += 1;
569 }
570
571 static struct nfs4_delegation *
572 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
573 {
574         struct nfs4_delegation *dp;
575
576         dprintk("NFSD alloc_init_deleg\n");
577         if (num_delegations > max_delegations)
578                 return NULL;
579         if (delegation_blocked(&current_fh->fh_handle))
580                 return NULL;
581         dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
582         if (dp == NULL)
583                 return dp;
584         /*
585          * delegation seqid's are never incremented.  The 4.1 special
586          * meaning of seqid 0 isn't meaningful, really, but let's avoid
587          * 0 anyway just for consistency and use 1:
588          */
589         dp->dl_stid.sc_stateid.si_generation = 1;
590         num_delegations++;
591         INIT_LIST_HEAD(&dp->dl_perfile);
592         INIT_LIST_HEAD(&dp->dl_perclnt);
593         INIT_LIST_HEAD(&dp->dl_recall_lru);
594         dp->dl_file = NULL;
595         dp->dl_type = NFS4_OPEN_DELEGATE_READ;
596         fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
597         dp->dl_time = 0;
598         atomic_set(&dp->dl_count, 1);
599         INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall);
600         return dp;
601 }
602
603 static void remove_stid(struct nfs4_stid *s)
604 {
605         struct idr *stateids = &s->sc_client->cl_stateids;
606
607         idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
608 }
609
610 static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
611 {
612         kmem_cache_free(slab, s);
613 }
614
615 void
616 nfs4_put_delegation(struct nfs4_delegation *dp)
617 {
618         if (atomic_dec_and_test(&dp->dl_count)) {
619                 remove_stid(&dp->dl_stid);
620                 nfs4_free_stid(deleg_slab, &dp->dl_stid);
621                 num_delegations--;
622         }
623 }
624
625 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
626 {
627         if (!fp->fi_lease)
628                 return;
629         if (atomic_dec_and_test(&fp->fi_delegees)) {
630                 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
631                 fp->fi_lease = NULL;
632                 fput(fp->fi_deleg_file);
633                 fp->fi_deleg_file = NULL;
634         }
635 }
636
637 static void unhash_stid(struct nfs4_stid *s)
638 {
639         s->sc_type = 0;
640 }
641
642 static void
643 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
644 {
645         lockdep_assert_held(&state_lock);
646
647         dp->dl_stid.sc_type = NFS4_DELEG_STID;
648         spin_lock(&fp->fi_lock);
649         list_add(&dp->dl_perfile, &fp->fi_delegations);
650         spin_unlock(&fp->fi_lock);
651         list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
652 }
653
654 /* Called under the state lock. */
655 static void
656 unhash_delegation(struct nfs4_delegation *dp)
657 {
658         struct nfs4_file *fp = dp->dl_file;
659
660         spin_lock(&state_lock);
661         dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
662         list_del_init(&dp->dl_perclnt);
663         list_del_init(&dp->dl_recall_lru);
664         spin_lock(&fp->fi_lock);
665         list_del_init(&dp->dl_perfile);
666         spin_unlock(&fp->fi_lock);
667         spin_unlock(&state_lock);
668         if (fp) {
669                 nfs4_put_deleg_lease(fp);
670                 put_nfs4_file(fp);
671                 dp->dl_file = NULL;
672         }
673 }
674
675 static void destroy_revoked_delegation(struct nfs4_delegation *dp)
676 {
677         list_del_init(&dp->dl_recall_lru);
678         nfs4_put_delegation(dp);
679 }
680
681 static void destroy_delegation(struct nfs4_delegation *dp)
682 {
683         unhash_delegation(dp);
684         nfs4_put_delegation(dp);
685 }
686
687 static void revoke_delegation(struct nfs4_delegation *dp)
688 {
689         struct nfs4_client *clp = dp->dl_stid.sc_client;
690
691         if (clp->cl_minorversion == 0)
692                 destroy_delegation(dp);
693         else {
694                 unhash_delegation(dp);
695                 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
696                 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
697         }
698 }
699
700 /* 
701  * SETCLIENTID state 
702  */
703
704 static unsigned int clientid_hashval(u32 id)
705 {
706         return id & CLIENT_HASH_MASK;
707 }
708
709 static unsigned int clientstr_hashval(const char *name)
710 {
711         return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
712 }
713
714 /*
715  * We store the NONE, READ, WRITE, and BOTH bits separately in the
716  * st_{access,deny}_bmap field of the stateid, in order to track not
717  * only what share bits are currently in force, but also what
718  * combinations of share bits previous opens have used.  This allows us
719  * to enforce the recommendation of rfc 3530 14.2.19 that the server
720  * return an error if the client attempt to downgrade to a combination
721  * of share bits not explicable by closing some of its previous opens.
722  *
723  * XXX: This enforcement is actually incomplete, since we don't keep
724  * track of access/deny bit combinations; so, e.g., we allow:
725  *
726  *      OPEN allow read, deny write
727  *      OPEN allow both, deny none
728  *      DOWNGRADE allow read, deny none
729  *
730  * which we should reject.
731  */
732 static unsigned int
733 bmap_to_share_mode(unsigned long bmap) {
734         int i;
735         unsigned int access = 0;
736
737         for (i = 1; i < 4; i++) {
738                 if (test_bit(i, &bmap))
739                         access |= i;
740         }
741         return access;
742 }
743
744 /* set share access for a given stateid */
745 static inline void
746 set_access(u32 access, struct nfs4_ol_stateid *stp)
747 {
748         unsigned char mask = 1 << access;
749
750         WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
751         stp->st_access_bmap |= mask;
752 }
753
754 /* clear share access for a given stateid */
755 static inline void
756 clear_access(u32 access, struct nfs4_ol_stateid *stp)
757 {
758         unsigned char mask = 1 << access;
759
760         WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
761         stp->st_access_bmap &= ~mask;
762 }
763
764 /* test whether a given stateid has access */
765 static inline bool
766 test_access(u32 access, struct nfs4_ol_stateid *stp)
767 {
768         unsigned char mask = 1 << access;
769
770         return (bool)(stp->st_access_bmap & mask);
771 }
772
773 /* set share deny for a given stateid */
774 static inline void
775 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
776 {
777         unsigned char mask = 1 << deny;
778
779         WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
780         stp->st_deny_bmap |= mask;
781 }
782
783 /* clear share deny for a given stateid */
784 static inline void
785 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
786 {
787         unsigned char mask = 1 << deny;
788
789         WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
790         stp->st_deny_bmap &= ~mask;
791 }
792
793 /* test whether a given stateid is denying specific access */
794 static inline bool
795 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
796 {
797         unsigned char mask = 1 << deny;
798
799         return (bool)(stp->st_deny_bmap & mask);
800 }
801
802 static int nfs4_access_to_omode(u32 access)
803 {
804         switch (access & NFS4_SHARE_ACCESS_BOTH) {
805         case NFS4_SHARE_ACCESS_READ:
806                 return O_RDONLY;
807         case NFS4_SHARE_ACCESS_WRITE:
808                 return O_WRONLY;
809         case NFS4_SHARE_ACCESS_BOTH:
810                 return O_RDWR;
811         }
812         WARN_ON_ONCE(1);
813         return O_RDONLY;
814 }
815
816 /*
817  * A stateid that had a deny mode associated with it is being released
818  * or downgraded. Recalculate the deny mode on the file.
819  */
820 static void
821 recalculate_deny_mode(struct nfs4_file *fp)
822 {
823         struct nfs4_ol_stateid *stp;
824
825         spin_lock(&fp->fi_lock);
826         fp->fi_share_deny = 0;
827         list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
828                 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
829         spin_unlock(&fp->fi_lock);
830 }
831
832 static void
833 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
834 {
835         int i;
836         bool change = false;
837
838         for (i = 1; i < 4; i++) {
839                 if ((i & deny) != i) {
840                         change = true;
841                         clear_deny(i, stp);
842                 }
843         }
844
845         /* Recalculate per-file deny mode if there was a change */
846         if (change)
847                 recalculate_deny_mode(stp->st_file);
848 }
849
850 /* release all access and file references for a given stateid */
851 static void
852 release_all_access(struct nfs4_ol_stateid *stp)
853 {
854         int i;
855         struct nfs4_file *fp = stp->st_file;
856
857         if (fp && stp->st_deny_bmap != 0)
858                 recalculate_deny_mode(fp);
859
860         for (i = 1; i < 4; i++) {
861                 if (test_access(i, stp))
862                         nfs4_file_put_access(stp->st_file, i);
863                 clear_access(i, stp);
864         }
865 }
866
867 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
868 {
869         struct nfs4_file *fp = stp->st_file;
870
871         spin_lock(&fp->fi_lock);
872         list_del(&stp->st_perfile);
873         spin_unlock(&fp->fi_lock);
874         list_del(&stp->st_perstateowner);
875 }
876
877 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
878 {
879         release_all_access(stp);
880         put_nfs4_file(stp->st_file);
881         stp->st_file = NULL;
882 }
883
884 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
885 {
886         remove_stid(&stp->st_stid);
887         nfs4_free_stid(stateid_slab, &stp->st_stid);
888 }
889
890 static void __release_lock_stateid(struct nfs4_ol_stateid *stp)
891 {
892         struct file *file;
893
894         list_del(&stp->st_locks);
895         unhash_generic_stateid(stp);
896         unhash_stid(&stp->st_stid);
897         file = find_any_file(stp->st_file);
898         if (file)
899                 filp_close(file, (fl_owner_t)lockowner(stp->st_stateowner));
900         close_generic_stateid(stp);
901         free_generic_stateid(stp);
902 }
903
904 static void unhash_lockowner(struct nfs4_lockowner *lo)
905 {
906         struct nfs4_ol_stateid *stp;
907
908         list_del(&lo->lo_owner.so_strhash);
909         while (!list_empty(&lo->lo_owner.so_stateids)) {
910                 stp = list_first_entry(&lo->lo_owner.so_stateids,
911                                 struct nfs4_ol_stateid, st_perstateowner);
912                 __release_lock_stateid(stp);
913         }
914 }
915
916 static void nfs4_free_lockowner(struct nfs4_lockowner *lo)
917 {
918         kfree(lo->lo_owner.so_owner.data);
919         kmem_cache_free(lockowner_slab, lo);
920 }
921
922 static void release_lockowner(struct nfs4_lockowner *lo)
923 {
924         unhash_lockowner(lo);
925         nfs4_free_lockowner(lo);
926 }
927
928 static void release_lockowner_if_empty(struct nfs4_lockowner *lo)
929 {
930         if (list_empty(&lo->lo_owner.so_stateids))
931                 release_lockowner(lo);
932 }
933
934 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
935 {
936         struct nfs4_lockowner *lo;
937
938         lo = lockowner(stp->st_stateowner);
939         __release_lock_stateid(stp);
940         release_lockowner_if_empty(lo);
941 }
942
943 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp)
944 {
945         struct nfs4_ol_stateid *stp;
946
947         while (!list_empty(&open_stp->st_locks)) {
948                 stp = list_entry(open_stp->st_locks.next,
949                                 struct nfs4_ol_stateid, st_locks);
950                 release_lock_stateid(stp);
951         }
952 }
953
954 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
955 {
956         unhash_generic_stateid(stp);
957         release_open_stateid_locks(stp);
958         close_generic_stateid(stp);
959 }
960
961 static void release_open_stateid(struct nfs4_ol_stateid *stp)
962 {
963         unhash_open_stateid(stp);
964         free_generic_stateid(stp);
965 }
966
967 static void unhash_openowner(struct nfs4_openowner *oo)
968 {
969         struct nfs4_ol_stateid *stp;
970
971         list_del(&oo->oo_owner.so_strhash);
972         list_del(&oo->oo_perclient);
973         while (!list_empty(&oo->oo_owner.so_stateids)) {
974                 stp = list_first_entry(&oo->oo_owner.so_stateids,
975                                 struct nfs4_ol_stateid, st_perstateowner);
976                 release_open_stateid(stp);
977         }
978 }
979
980 static void release_last_closed_stateid(struct nfs4_openowner *oo)
981 {
982         struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
983
984         if (s) {
985                 free_generic_stateid(s);
986                 oo->oo_last_closed_stid = NULL;
987         }
988 }
989
990 static void nfs4_free_openowner(struct nfs4_openowner *oo)
991 {
992         kfree(oo->oo_owner.so_owner.data);
993         kmem_cache_free(openowner_slab, oo);
994 }
995
996 static void release_openowner(struct nfs4_openowner *oo)
997 {
998         unhash_openowner(oo);
999         list_del(&oo->oo_close_lru);
1000         release_last_closed_stateid(oo);
1001         nfs4_free_openowner(oo);
1002 }
1003
1004 static inline int
1005 hash_sessionid(struct nfs4_sessionid *sessionid)
1006 {
1007         struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1008
1009         return sid->sequence % SESSION_HASH_SIZE;
1010 }
1011
1012 #ifdef NFSD_DEBUG
1013 static inline void
1014 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1015 {
1016         u32 *ptr = (u32 *)(&sessionid->data[0]);
1017         dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1018 }
1019 #else
1020 static inline void
1021 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1022 {
1023 }
1024 #endif
1025
1026 /*
1027  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1028  * won't be used for replay.
1029  */
1030 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1031 {
1032         struct nfs4_stateowner *so = cstate->replay_owner;
1033
1034         if (nfserr == nfserr_replay_me)
1035                 return;
1036
1037         if (!seqid_mutating_err(ntohl(nfserr))) {
1038                 cstate->replay_owner = NULL;
1039                 return;
1040         }
1041         if (!so)
1042                 return;
1043         if (so->so_is_open_owner)
1044                 release_last_closed_stateid(openowner(so));
1045         so->so_seqid++;
1046         return;
1047 }
1048
1049 static void
1050 gen_sessionid(struct nfsd4_session *ses)
1051 {
1052         struct nfs4_client *clp = ses->se_client;
1053         struct nfsd4_sessionid *sid;
1054
1055         sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1056         sid->clientid = clp->cl_clientid;
1057         sid->sequence = current_sessionid++;
1058         sid->reserved = 0;
1059 }
1060
1061 /*
1062  * The protocol defines ca_maxresponssize_cached to include the size of
1063  * the rpc header, but all we need to cache is the data starting after
1064  * the end of the initial SEQUENCE operation--the rest we regenerate
1065  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1066  * value that is the number of bytes in our cache plus a few additional
1067  * bytes.  In order to stay on the safe side, and not promise more than
1068  * we can cache, those additional bytes must be the minimum possible: 24
1069  * bytes of rpc header (xid through accept state, with AUTH_NULL
1070  * verifier), 12 for the compound header (with zero-length tag), and 44
1071  * for the SEQUENCE op response:
1072  */
1073 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1074
1075 static void
1076 free_session_slots(struct nfsd4_session *ses)
1077 {
1078         int i;
1079
1080         for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1081                 kfree(ses->se_slots[i]);
1082 }
1083
1084 /*
1085  * We don't actually need to cache the rpc and session headers, so we
1086  * can allocate a little less for each slot:
1087  */
1088 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1089 {
1090         u32 size;
1091
1092         if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1093                 size = 0;
1094         else
1095                 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1096         return size + sizeof(struct nfsd4_slot);
1097 }
1098
1099 /*
1100  * XXX: If we run out of reserved DRC memory we could (up to a point)
1101  * re-negotiate active sessions and reduce their slot usage to make
1102  * room for new connections. For now we just fail the create session.
1103  */
1104 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1105 {
1106         u32 slotsize = slot_bytes(ca);
1107         u32 num = ca->maxreqs;
1108         int avail;
1109
1110         spin_lock(&nfsd_drc_lock);
1111         avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1112                     nfsd_drc_max_mem - nfsd_drc_mem_used);
1113         num = min_t(int, num, avail / slotsize);
1114         nfsd_drc_mem_used += num * slotsize;
1115         spin_unlock(&nfsd_drc_lock);
1116
1117         return num;
1118 }
1119
1120 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1121 {
1122         int slotsize = slot_bytes(ca);
1123
1124         spin_lock(&nfsd_drc_lock);
1125         nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1126         spin_unlock(&nfsd_drc_lock);
1127 }
1128
1129 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1130                                            struct nfsd4_channel_attrs *battrs)
1131 {
1132         int numslots = fattrs->maxreqs;
1133         int slotsize = slot_bytes(fattrs);
1134         struct nfsd4_session *new;
1135         int mem, i;
1136
1137         BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1138                         + sizeof(struct nfsd4_session) > PAGE_SIZE);
1139         mem = numslots * sizeof(struct nfsd4_slot *);
1140
1141         new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1142         if (!new)
1143                 return NULL;
1144         /* allocate each struct nfsd4_slot and data cache in one piece */
1145         for (i = 0; i < numslots; i++) {
1146                 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1147                 if (!new->se_slots[i])
1148                         goto out_free;
1149         }
1150
1151         memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1152         memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1153
1154         return new;
1155 out_free:
1156         while (i--)
1157                 kfree(new->se_slots[i]);
1158         kfree(new);
1159         return NULL;
1160 }
1161
1162 static void free_conn(struct nfsd4_conn *c)
1163 {
1164         svc_xprt_put(c->cn_xprt);
1165         kfree(c);
1166 }
1167
1168 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1169 {
1170         struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1171         struct nfs4_client *clp = c->cn_session->se_client;
1172
1173         spin_lock(&clp->cl_lock);
1174         if (!list_empty(&c->cn_persession)) {
1175                 list_del(&c->cn_persession);
1176                 free_conn(c);
1177         }
1178         nfsd4_probe_callback(clp);
1179         spin_unlock(&clp->cl_lock);
1180 }
1181
1182 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1183 {
1184         struct nfsd4_conn *conn;
1185
1186         conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1187         if (!conn)
1188                 return NULL;
1189         svc_xprt_get(rqstp->rq_xprt);
1190         conn->cn_xprt = rqstp->rq_xprt;
1191         conn->cn_flags = flags;
1192         INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1193         return conn;
1194 }
1195
1196 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1197 {
1198         conn->cn_session = ses;
1199         list_add(&conn->cn_persession, &ses->se_conns);
1200 }
1201
1202 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1203 {
1204         struct nfs4_client *clp = ses->se_client;
1205
1206         spin_lock(&clp->cl_lock);
1207         __nfsd4_hash_conn(conn, ses);
1208         spin_unlock(&clp->cl_lock);
1209 }
1210
1211 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1212 {
1213         conn->cn_xpt_user.callback = nfsd4_conn_lost;
1214         return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1215 }
1216
1217 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1218 {
1219         int ret;
1220
1221         nfsd4_hash_conn(conn, ses);
1222         ret = nfsd4_register_conn(conn);
1223         if (ret)
1224                 /* oops; xprt is already down: */
1225                 nfsd4_conn_lost(&conn->cn_xpt_user);
1226         /* We may have gained or lost a callback channel: */
1227         nfsd4_probe_callback_sync(ses->se_client);
1228 }
1229
1230 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1231 {
1232         u32 dir = NFS4_CDFC4_FORE;
1233
1234         if (cses->flags & SESSION4_BACK_CHAN)
1235                 dir |= NFS4_CDFC4_BACK;
1236         return alloc_conn(rqstp, dir);
1237 }
1238
1239 /* must be called under client_lock */
1240 static void nfsd4_del_conns(struct nfsd4_session *s)
1241 {
1242         struct nfs4_client *clp = s->se_client;
1243         struct nfsd4_conn *c;
1244
1245         spin_lock(&clp->cl_lock);
1246         while (!list_empty(&s->se_conns)) {
1247                 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1248                 list_del_init(&c->cn_persession);
1249                 spin_unlock(&clp->cl_lock);
1250
1251                 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1252                 free_conn(c);
1253
1254                 spin_lock(&clp->cl_lock);
1255         }
1256         spin_unlock(&clp->cl_lock);
1257 }
1258
1259 static void __free_session(struct nfsd4_session *ses)
1260 {
1261         free_session_slots(ses);
1262         kfree(ses);
1263 }
1264
1265 static void free_session(struct nfsd4_session *ses)
1266 {
1267         struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
1268
1269         lockdep_assert_held(&nn->client_lock);
1270         nfsd4_del_conns(ses);
1271         nfsd4_put_drc_mem(&ses->se_fchannel);
1272         __free_session(ses);
1273 }
1274
1275 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1276 {
1277         int idx;
1278         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1279
1280         new->se_client = clp;
1281         gen_sessionid(new);
1282
1283         INIT_LIST_HEAD(&new->se_conns);
1284
1285         new->se_cb_seq_nr = 1;
1286         new->se_flags = cses->flags;
1287         new->se_cb_prog = cses->callback_prog;
1288         new->se_cb_sec = cses->cb_sec;
1289         atomic_set(&new->se_ref, 0);
1290         idx = hash_sessionid(&new->se_sessionid);
1291         spin_lock(&nn->client_lock);
1292         list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1293         spin_lock(&clp->cl_lock);
1294         list_add(&new->se_perclnt, &clp->cl_sessions);
1295         spin_unlock(&clp->cl_lock);
1296         spin_unlock(&nn->client_lock);
1297
1298         if (cses->flags & SESSION4_BACK_CHAN) {
1299                 struct sockaddr *sa = svc_addr(rqstp);
1300                 /*
1301                  * This is a little silly; with sessions there's no real
1302                  * use for the callback address.  Use the peer address
1303                  * as a reasonable default for now, but consider fixing
1304                  * the rpc client not to require an address in the
1305                  * future:
1306                  */
1307                 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1308                 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1309         }
1310 }
1311
1312 /* caller must hold client_lock */
1313 static struct nfsd4_session *
1314 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1315 {
1316         struct nfsd4_session *elem;
1317         int idx;
1318         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1319
1320         dump_sessionid(__func__, sessionid);
1321         idx = hash_sessionid(sessionid);
1322         /* Search in the appropriate list */
1323         list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1324                 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1325                             NFS4_MAX_SESSIONID_LEN)) {
1326                         return elem;
1327                 }
1328         }
1329
1330         dprintk("%s: session not found\n", __func__);
1331         return NULL;
1332 }
1333
1334 static struct nfsd4_session *
1335 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1336                 __be32 *ret)
1337 {
1338         struct nfsd4_session *session;
1339         __be32 status = nfserr_badsession;
1340
1341         session = __find_in_sessionid_hashtbl(sessionid, net);
1342         if (!session)
1343                 goto out;
1344         status = nfsd4_get_session_locked(session);
1345         if (status)
1346                 session = NULL;
1347 out:
1348         *ret = status;
1349         return session;
1350 }
1351
1352 /* caller must hold client_lock */
1353 static void
1354 unhash_session(struct nfsd4_session *ses)
1355 {
1356         list_del(&ses->se_hash);
1357         spin_lock(&ses->se_client->cl_lock);
1358         list_del(&ses->se_perclnt);
1359         spin_unlock(&ses->se_client->cl_lock);
1360 }
1361
1362 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1363 static int
1364 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1365 {
1366         if (clid->cl_boot == nn->boot_time)
1367                 return 0;
1368         dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1369                 clid->cl_boot, clid->cl_id, nn->boot_time);
1370         return 1;
1371 }
1372
1373 /* 
1374  * XXX Should we use a slab cache ?
1375  * This type of memory management is somewhat inefficient, but we use it
1376  * anyway since SETCLIENTID is not a common operation.
1377  */
1378 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1379 {
1380         struct nfs4_client *clp;
1381
1382         clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1383         if (clp == NULL)
1384                 return NULL;
1385         clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1386         if (clp->cl_name.data == NULL) {
1387                 kfree(clp);
1388                 return NULL;
1389         }
1390         clp->cl_name.len = name.len;
1391         INIT_LIST_HEAD(&clp->cl_sessions);
1392         idr_init(&clp->cl_stateids);
1393         atomic_set(&clp->cl_refcount, 0);
1394         clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1395         INIT_LIST_HEAD(&clp->cl_idhash);
1396         INIT_LIST_HEAD(&clp->cl_openowners);
1397         INIT_LIST_HEAD(&clp->cl_delegations);
1398         INIT_LIST_HEAD(&clp->cl_lru);
1399         INIT_LIST_HEAD(&clp->cl_callbacks);
1400         INIT_LIST_HEAD(&clp->cl_revoked);
1401         spin_lock_init(&clp->cl_lock);
1402         rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1403         return clp;
1404 }
1405
1406 static void
1407 free_client(struct nfs4_client *clp)
1408 {
1409         struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
1410
1411         lockdep_assert_held(&nn->client_lock);
1412         while (!list_empty(&clp->cl_sessions)) {
1413                 struct nfsd4_session *ses;
1414                 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1415                                 se_perclnt);
1416                 list_del(&ses->se_perclnt);
1417                 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1418                 free_session(ses);
1419         }
1420         rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1421         free_svc_cred(&clp->cl_cred);
1422         kfree(clp->cl_name.data);
1423         idr_destroy(&clp->cl_stateids);
1424         kfree(clp);
1425 }
1426
1427 /* must be called under the client_lock */
1428 static inline void
1429 unhash_client_locked(struct nfs4_client *clp)
1430 {
1431         struct nfsd4_session *ses;
1432
1433         list_del(&clp->cl_lru);
1434         spin_lock(&clp->cl_lock);
1435         list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1436                 list_del_init(&ses->se_hash);
1437         spin_unlock(&clp->cl_lock);
1438 }
1439
1440 static void
1441 destroy_client(struct nfs4_client *clp)
1442 {
1443         struct nfs4_openowner *oo;
1444         struct nfs4_delegation *dp;
1445         struct list_head reaplist;
1446         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1447
1448         INIT_LIST_HEAD(&reaplist);
1449         spin_lock(&state_lock);
1450         while (!list_empty(&clp->cl_delegations)) {
1451                 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1452                 list_del_init(&dp->dl_perclnt);
1453                 /* Ensure that deleg break won't try to requeue it */
1454                 ++dp->dl_time;
1455                 list_move(&dp->dl_recall_lru, &reaplist);
1456         }
1457         spin_unlock(&state_lock);
1458         while (!list_empty(&reaplist)) {
1459                 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1460                 destroy_delegation(dp);
1461         }
1462         list_splice_init(&clp->cl_revoked, &reaplist);
1463         while (!list_empty(&reaplist)) {
1464                 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1465                 destroy_revoked_delegation(dp);
1466         }
1467         while (!list_empty(&clp->cl_openowners)) {
1468                 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1469                 release_openowner(oo);
1470         }
1471         nfsd4_shutdown_callback(clp);
1472         if (clp->cl_cb_conn.cb_xprt)
1473                 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1474         list_del(&clp->cl_idhash);
1475         if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1476                 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1477         else
1478                 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1479         spin_lock(&nn->client_lock);
1480         unhash_client_locked(clp);
1481         WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
1482         free_client(clp);
1483         spin_unlock(&nn->client_lock);
1484 }
1485
1486 static void expire_client(struct nfs4_client *clp)
1487 {
1488         nfsd4_client_record_remove(clp);
1489         destroy_client(clp);
1490 }
1491
1492 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1493 {
1494         memcpy(target->cl_verifier.data, source->data,
1495                         sizeof(target->cl_verifier.data));
1496 }
1497
1498 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1499 {
1500         target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
1501         target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
1502 }
1503
1504 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1505 {
1506         if (source->cr_principal) {
1507                 target->cr_principal =
1508                                 kstrdup(source->cr_principal, GFP_KERNEL);
1509                 if (target->cr_principal == NULL)
1510                         return -ENOMEM;
1511         } else
1512                 target->cr_principal = NULL;
1513         target->cr_flavor = source->cr_flavor;
1514         target->cr_uid = source->cr_uid;
1515         target->cr_gid = source->cr_gid;
1516         target->cr_group_info = source->cr_group_info;
1517         get_group_info(target->cr_group_info);
1518         target->cr_gss_mech = source->cr_gss_mech;
1519         if (source->cr_gss_mech)
1520                 gss_mech_get(source->cr_gss_mech);
1521         return 0;
1522 }
1523
1524 static long long
1525 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1526 {
1527         long long res;
1528
1529         res = o1->len - o2->len;
1530         if (res)
1531                 return res;
1532         return (long long)memcmp(o1->data, o2->data, o1->len);
1533 }
1534
1535 static int same_name(const char *n1, const char *n2)
1536 {
1537         return 0 == memcmp(n1, n2, HEXDIR_LEN);
1538 }
1539
1540 static int
1541 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1542 {
1543         return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1544 }
1545
1546 static int
1547 same_clid(clientid_t *cl1, clientid_t *cl2)
1548 {
1549         return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1550 }
1551
1552 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1553 {
1554         int i;
1555
1556         if (g1->ngroups != g2->ngroups)
1557                 return false;
1558         for (i=0; i<g1->ngroups; i++)
1559                 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1560                         return false;
1561         return true;
1562 }
1563
1564 /*
1565  * RFC 3530 language requires clid_inuse be returned when the
1566  * "principal" associated with a requests differs from that previously
1567  * used.  We use uid, gid's, and gss principal string as our best
1568  * approximation.  We also don't want to allow non-gss use of a client
1569  * established using gss: in theory cr_principal should catch that
1570  * change, but in practice cr_principal can be null even in the gss case
1571  * since gssd doesn't always pass down a principal string.
1572  */
1573 static bool is_gss_cred(struct svc_cred *cr)
1574 {
1575         /* Is cr_flavor one of the gss "pseudoflavors"?: */
1576         return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1577 }
1578
1579
1580 static bool
1581 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1582 {
1583         if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1584                 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1585                 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1586                 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1587                 return false;
1588         if (cr1->cr_principal == cr2->cr_principal)
1589                 return true;
1590         if (!cr1->cr_principal || !cr2->cr_principal)
1591                 return false;
1592         return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1593 }
1594
1595 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1596 {
1597         struct svc_cred *cr = &rqstp->rq_cred;
1598         u32 service;
1599
1600         if (!cr->cr_gss_mech)
1601                 return false;
1602         service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1603         return service == RPC_GSS_SVC_INTEGRITY ||
1604                service == RPC_GSS_SVC_PRIVACY;
1605 }
1606
1607 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1608 {
1609         struct svc_cred *cr = &rqstp->rq_cred;
1610
1611         if (!cl->cl_mach_cred)
1612                 return true;
1613         if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1614                 return false;
1615         if (!svc_rqst_integrity_protected(rqstp))
1616                 return false;
1617         if (!cr->cr_principal)
1618                 return false;
1619         return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1620 }
1621
1622 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1623 {
1624         static u32 current_clientid = 1;
1625
1626         clp->cl_clientid.cl_boot = nn->boot_time;
1627         clp->cl_clientid.cl_id = current_clientid++; 
1628 }
1629
1630 static void gen_confirm(struct nfs4_client *clp)
1631 {
1632         __be32 verf[2];
1633         static u32 i;
1634
1635         /*
1636          * This is opaque to client, so no need to byte-swap. Use
1637          * __force to keep sparse happy
1638          */
1639         verf[0] = (__force __be32)get_seconds();
1640         verf[1] = (__force __be32)i++;
1641         memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1642 }
1643
1644 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1645 {
1646         struct nfs4_stid *ret;
1647
1648         ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1649         if (!ret || !ret->sc_type)
1650                 return NULL;
1651         return ret;
1652 }
1653
1654 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1655 {
1656         struct nfs4_stid *s;
1657
1658         s = find_stateid(cl, t);
1659         if (!s)
1660                 return NULL;
1661         if (typemask & s->sc_type)
1662                 return s;
1663         return NULL;
1664 }
1665
1666 static struct nfs4_client *create_client(struct xdr_netobj name,
1667                 struct svc_rqst *rqstp, nfs4_verifier *verf)
1668 {
1669         struct nfs4_client *clp;
1670         struct sockaddr *sa = svc_addr(rqstp);
1671         int ret;
1672         struct net *net = SVC_NET(rqstp);
1673         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1674
1675         clp = alloc_client(name);
1676         if (clp == NULL)
1677                 return NULL;
1678
1679         ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1680         if (ret) {
1681                 spin_lock(&nn->client_lock);
1682                 free_client(clp);
1683                 spin_unlock(&nn->client_lock);
1684                 return NULL;
1685         }
1686         INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null);
1687         clp->cl_time = get_seconds();
1688         clear_bit(0, &clp->cl_cb_slot_busy);
1689         copy_verf(clp, verf);
1690         rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1691         gen_confirm(clp);
1692         clp->cl_cb_session = NULL;
1693         clp->net = net;
1694         return clp;
1695 }
1696
1697 static void
1698 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1699 {
1700         struct rb_node **new = &(root->rb_node), *parent = NULL;
1701         struct nfs4_client *clp;
1702
1703         while (*new) {
1704                 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1705                 parent = *new;
1706
1707                 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1708                         new = &((*new)->rb_left);
1709                 else
1710                         new = &((*new)->rb_right);
1711         }
1712
1713         rb_link_node(&new_clp->cl_namenode, parent, new);
1714         rb_insert_color(&new_clp->cl_namenode, root);
1715 }
1716
1717 static struct nfs4_client *
1718 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1719 {
1720         long long cmp;
1721         struct rb_node *node = root->rb_node;
1722         struct nfs4_client *clp;
1723
1724         while (node) {
1725                 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1726                 cmp = compare_blob(&clp->cl_name, name);
1727                 if (cmp > 0)
1728                         node = node->rb_left;
1729                 else if (cmp < 0)
1730                         node = node->rb_right;
1731                 else
1732                         return clp;
1733         }
1734         return NULL;
1735 }
1736
1737 static void
1738 add_to_unconfirmed(struct nfs4_client *clp)
1739 {
1740         unsigned int idhashval;
1741         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1742
1743         clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1744         add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1745         idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1746         list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1747         renew_client(clp);
1748 }
1749
1750 static void
1751 move_to_confirmed(struct nfs4_client *clp)
1752 {
1753         unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1754         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1755
1756         dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1757         list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1758         rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1759         add_clp_to_name_tree(clp, &nn->conf_name_tree);
1760         set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1761         renew_client(clp);
1762 }
1763
1764 static struct nfs4_client *
1765 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1766 {
1767         struct nfs4_client *clp;
1768         unsigned int idhashval = clientid_hashval(clid->cl_id);
1769
1770         list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1771                 if (same_clid(&clp->cl_clientid, clid)) {
1772                         if ((bool)clp->cl_minorversion != sessions)
1773                                 return NULL;
1774                         renew_client(clp);
1775                         return clp;
1776                 }
1777         }
1778         return NULL;
1779 }
1780
1781 static struct nfs4_client *
1782 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1783 {
1784         struct list_head *tbl = nn->conf_id_hashtbl;
1785
1786         return find_client_in_id_table(tbl, clid, sessions);
1787 }
1788
1789 static struct nfs4_client *
1790 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1791 {
1792         struct list_head *tbl = nn->unconf_id_hashtbl;
1793
1794         return find_client_in_id_table(tbl, clid, sessions);
1795 }
1796
1797 static bool clp_used_exchangeid(struct nfs4_client *clp)
1798 {
1799         return clp->cl_exchange_flags != 0;
1800
1801
1802 static struct nfs4_client *
1803 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1804 {
1805         return find_clp_in_name_tree(name, &nn->conf_name_tree);
1806 }
1807
1808 static struct nfs4_client *
1809 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1810 {
1811         return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1812 }
1813
1814 static void
1815 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1816 {
1817         struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1818         struct sockaddr *sa = svc_addr(rqstp);
1819         u32 scopeid = rpc_get_scope_id(sa);
1820         unsigned short expected_family;
1821
1822         /* Currently, we only support tcp and tcp6 for the callback channel */
1823         if (se->se_callback_netid_len == 3 &&
1824             !memcmp(se->se_callback_netid_val, "tcp", 3))
1825                 expected_family = AF_INET;
1826         else if (se->se_callback_netid_len == 4 &&
1827                  !memcmp(se->se_callback_netid_val, "tcp6", 4))
1828                 expected_family = AF_INET6;
1829         else
1830                 goto out_err;
1831
1832         conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1833                                             se->se_callback_addr_len,
1834                                             (struct sockaddr *)&conn->cb_addr,
1835                                             sizeof(conn->cb_addr));
1836
1837         if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1838                 goto out_err;
1839
1840         if (conn->cb_addr.ss_family == AF_INET6)
1841                 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1842
1843         conn->cb_prog = se->se_callback_prog;
1844         conn->cb_ident = se->se_callback_ident;
1845         memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1846         return;
1847 out_err:
1848         conn->cb_addr.ss_family = AF_UNSPEC;
1849         conn->cb_addrlen = 0;
1850         dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1851                 "will not receive delegations\n",
1852                 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1853
1854         return;
1855 }
1856
1857 /*
1858  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
1859  */
1860 static void
1861 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1862 {
1863         struct xdr_buf *buf = resp->xdr.buf;
1864         struct nfsd4_slot *slot = resp->cstate.slot;
1865         unsigned int base;
1866
1867         dprintk("--> %s slot %p\n", __func__, slot);
1868
1869         slot->sl_opcnt = resp->opcnt;
1870         slot->sl_status = resp->cstate.status;
1871
1872         slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1873         if (nfsd4_not_cached(resp)) {
1874                 slot->sl_datalen = 0;
1875                 return;
1876         }
1877         base = resp->cstate.data_offset;
1878         slot->sl_datalen = buf->len - base;
1879         if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
1880                 WARN("%s: sessions DRC could not cache compound\n", __func__);
1881         return;
1882 }
1883
1884 /*
1885  * Encode the replay sequence operation from the slot values.
1886  * If cachethis is FALSE encode the uncached rep error on the next
1887  * operation which sets resp->p and increments resp->opcnt for
1888  * nfs4svc_encode_compoundres.
1889  *
1890  */
1891 static __be32
1892 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1893                           struct nfsd4_compoundres *resp)
1894 {
1895         struct nfsd4_op *op;
1896         struct nfsd4_slot *slot = resp->cstate.slot;
1897
1898         /* Encode the replayed sequence operation */
1899         op = &args->ops[resp->opcnt - 1];
1900         nfsd4_encode_operation(resp, op);
1901
1902         /* Return nfserr_retry_uncached_rep in next operation. */
1903         if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1904                 op = &args->ops[resp->opcnt++];
1905                 op->status = nfserr_retry_uncached_rep;
1906                 nfsd4_encode_operation(resp, op);
1907         }
1908         return op->status;
1909 }
1910
1911 /*
1912  * The sequence operation is not cached because we can use the slot and
1913  * session values.
1914  */
1915 static __be32
1916 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1917                          struct nfsd4_sequence *seq)
1918 {
1919         struct nfsd4_slot *slot = resp->cstate.slot;
1920         struct xdr_stream *xdr = &resp->xdr;
1921         __be32 *p;
1922         __be32 status;
1923
1924         dprintk("--> %s slot %p\n", __func__, slot);
1925
1926         status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1927         if (status)
1928                 return status;
1929
1930         p = xdr_reserve_space(xdr, slot->sl_datalen);
1931         if (!p) {
1932                 WARN_ON_ONCE(1);
1933                 return nfserr_serverfault;
1934         }
1935         xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
1936         xdr_commit_encode(xdr);
1937
1938         resp->opcnt = slot->sl_opcnt;
1939         return slot->sl_status;
1940 }
1941
1942 /*
1943  * Set the exchange_id flags returned by the server.
1944  */
1945 static void
1946 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1947 {
1948         /* pNFS is not supported */
1949         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1950
1951         /* Referrals are supported, Migration is not. */
1952         new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1953
1954         /* set the wire flags to return to client. */
1955         clid->flags = new->cl_exchange_flags;
1956 }
1957
1958 static bool client_has_state(struct nfs4_client *clp)
1959 {
1960         /*
1961          * Note clp->cl_openowners check isn't quite right: there's no
1962          * need to count owners without stateid's.
1963          *
1964          * Also note we should probably be using this in 4.0 case too.
1965          */
1966         return !list_empty(&clp->cl_openowners)
1967                 || !list_empty(&clp->cl_delegations)
1968                 || !list_empty(&clp->cl_sessions);
1969 }
1970
1971 __be32
1972 nfsd4_exchange_id(struct svc_rqst *rqstp,
1973                   struct nfsd4_compound_state *cstate,
1974                   struct nfsd4_exchange_id *exid)
1975 {
1976         struct nfs4_client *unconf, *conf, *new;
1977         __be32 status;
1978         char                    addr_str[INET6_ADDRSTRLEN];
1979         nfs4_verifier           verf = exid->verifier;
1980         struct sockaddr         *sa = svc_addr(rqstp);
1981         bool    update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1982         struct nfsd_net         *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1983
1984         rpc_ntop(sa, addr_str, sizeof(addr_str));
1985         dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1986                 "ip_addr=%s flags %x, spa_how %d\n",
1987                 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1988                 addr_str, exid->flags, exid->spa_how);
1989
1990         if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1991                 return nfserr_inval;
1992
1993         switch (exid->spa_how) {
1994         case SP4_MACH_CRED:
1995                 if (!svc_rqst_integrity_protected(rqstp))
1996                         return nfserr_inval;
1997         case SP4_NONE:
1998                 break;
1999         default:                                /* checked by xdr code */
2000                 WARN_ON_ONCE(1);
2001         case SP4_SSV:
2002                 return nfserr_encr_alg_unsupp;
2003         }
2004
2005         /* Cases below refer to rfc 5661 section 18.35.4: */
2006         nfs4_lock_state();
2007         conf = find_confirmed_client_by_name(&exid->clname, nn);
2008         if (conf) {
2009                 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2010                 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2011
2012                 if (update) {
2013                         if (!clp_used_exchangeid(conf)) { /* buggy client */
2014                                 status = nfserr_inval;
2015                                 goto out;
2016                         }
2017                         if (!mach_creds_match(conf, rqstp)) {
2018                                 status = nfserr_wrong_cred;
2019                                 goto out;
2020                         }
2021                         if (!creds_match) { /* case 9 */
2022                                 status = nfserr_perm;
2023                                 goto out;
2024                         }
2025                         if (!verfs_match) { /* case 8 */
2026                                 status = nfserr_not_same;
2027                                 goto out;
2028                         }
2029                         /* case 6 */
2030                         exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2031                         new = conf;
2032                         goto out_copy;
2033                 }
2034                 if (!creds_match) { /* case 3 */
2035                         if (client_has_state(conf)) {
2036                                 status = nfserr_clid_inuse;
2037                                 goto out;
2038                         }
2039                         expire_client(conf);
2040                         goto out_new;
2041                 }
2042                 if (verfs_match) { /* case 2 */
2043                         conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2044                         new = conf;
2045                         goto out_copy;
2046                 }
2047                 /* case 5, client reboot */
2048                 goto out_new;
2049         }
2050
2051         if (update) { /* case 7 */
2052                 status = nfserr_noent;
2053                 goto out;
2054         }
2055
2056         unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2057         if (unconf) /* case 4, possible retry or client restart */
2058                 expire_client(unconf);
2059
2060         /* case 1 (normal case) */
2061 out_new:
2062         new = create_client(exid->clname, rqstp, &verf);
2063         if (new == NULL) {
2064                 status = nfserr_jukebox;
2065                 goto out;
2066         }
2067         new->cl_minorversion = cstate->minorversion;
2068         new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
2069
2070         gen_clid(new, nn);
2071         add_to_unconfirmed(new);
2072 out_copy:
2073         exid->clientid.cl_boot = new->cl_clientid.cl_boot;
2074         exid->clientid.cl_id = new->cl_clientid.cl_id;
2075
2076         exid->seqid = new->cl_cs_slot.sl_seqid + 1;
2077         nfsd4_set_ex_flags(new, exid);
2078
2079         dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2080                 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
2081         status = nfs_ok;
2082
2083 out:
2084         nfs4_unlock_state();
2085         return status;
2086 }
2087
2088 static __be32
2089 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2090 {
2091         dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2092                 slot_seqid);
2093
2094         /* The slot is in use, and no response has been sent. */
2095         if (slot_inuse) {
2096                 if (seqid == slot_seqid)
2097                         return nfserr_jukebox;
2098                 else
2099                         return nfserr_seq_misordered;
2100         }
2101         /* Note unsigned 32-bit arithmetic handles wraparound: */
2102         if (likely(seqid == slot_seqid + 1))
2103                 return nfs_ok;
2104         if (seqid == slot_seqid)
2105                 return nfserr_replay_cache;
2106         return nfserr_seq_misordered;
2107 }
2108
2109 /*
2110  * Cache the create session result into the create session single DRC
2111  * slot cache by saving the xdr structure. sl_seqid has been set.
2112  * Do this for solo or embedded create session operations.
2113  */
2114 static void
2115 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2116                            struct nfsd4_clid_slot *slot, __be32 nfserr)
2117 {
2118         slot->sl_status = nfserr;
2119         memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2120 }
2121
2122 static __be32
2123 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2124                             struct nfsd4_clid_slot *slot)
2125 {
2126         memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2127         return slot->sl_status;
2128 }
2129
2130 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2131                         2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2132                         1 +     /* MIN tag is length with zero, only length */ \
2133                         3 +     /* version, opcount, opcode */ \
2134                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2135                                 /* seqid, slotID, slotID, cache */ \
2136                         4 ) * sizeof(__be32))
2137
2138 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2139                         2 +     /* verifier: AUTH_NULL, length 0 */\
2140                         1 +     /* status */ \
2141                         1 +     /* MIN tag is length with zero, only length */ \
2142                         3 +     /* opcount, opcode, opstatus*/ \
2143                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2144                                 /* seqid, slotID, slotID, slotID, status */ \
2145                         5 ) * sizeof(__be32))
2146
2147 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2148 {
2149         u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2150
2151         if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2152                 return nfserr_toosmall;
2153         if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2154                 return nfserr_toosmall;
2155         ca->headerpadsz = 0;
2156         ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2157         ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2158         ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2159         ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2160                         NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2161         ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2162         /*
2163          * Note decreasing slot size below client's request may make it
2164          * difficult for client to function correctly, whereas
2165          * decreasing the number of slots will (just?) affect
2166          * performance.  When short on memory we therefore prefer to
2167          * decrease number of slots instead of their size.  Clients that
2168          * request larger slots than they need will get poor results:
2169          */
2170         ca->maxreqs = nfsd4_get_drc_mem(ca);
2171         if (!ca->maxreqs)
2172                 return nfserr_jukebox;
2173
2174         return nfs_ok;
2175 }
2176
2177 #define NFSD_CB_MAX_REQ_SZ      ((NFS4_enc_cb_recall_sz + \
2178                                  RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2179 #define NFSD_CB_MAX_RESP_SZ     ((NFS4_dec_cb_recall_sz + \
2180                                  RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2181
2182 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2183 {
2184         ca->headerpadsz = 0;
2185
2186         /*
2187          * These RPC_MAX_HEADER macros are overkill, especially since we
2188          * don't even do gss on the backchannel yet.  But this is still
2189          * less than 1k.  Tighten up this estimate in the unlikely event
2190          * it turns out to be a problem for some client:
2191          */
2192         if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2193                 return nfserr_toosmall;
2194         if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2195                 return nfserr_toosmall;
2196         ca->maxresp_cached = 0;
2197         if (ca->maxops < 2)
2198                 return nfserr_toosmall;
2199
2200         return nfs_ok;
2201 }
2202
2203 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2204 {
2205         switch (cbs->flavor) {
2206         case RPC_AUTH_NULL:
2207         case RPC_AUTH_UNIX:
2208                 return nfs_ok;
2209         default:
2210                 /*
2211                  * GSS case: the spec doesn't allow us to return this
2212                  * error.  But it also doesn't allow us not to support
2213                  * GSS.
2214                  * I'd rather this fail hard than return some error the
2215                  * client might think it can already handle:
2216                  */
2217                 return nfserr_encr_alg_unsupp;
2218         }
2219 }
2220
2221 __be32
2222 nfsd4_create_session(struct svc_rqst *rqstp,
2223                      struct nfsd4_compound_state *cstate,
2224                      struct nfsd4_create_session *cr_ses)
2225 {
2226         struct sockaddr *sa = svc_addr(rqstp);
2227         struct nfs4_client *conf, *unconf;
2228         struct nfsd4_session *new;
2229         struct nfsd4_conn *conn;
2230         struct nfsd4_clid_slot *cs_slot = NULL;
2231         __be32 status = 0;
2232         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2233
2234         if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2235                 return nfserr_inval;
2236         status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2237         if (status)
2238                 return status;
2239         status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2240         if (status)
2241                 return status;
2242         status = check_backchannel_attrs(&cr_ses->back_channel);
2243         if (status)
2244                 goto out_release_drc_mem;
2245         status = nfserr_jukebox;
2246         new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2247         if (!new)
2248                 goto out_release_drc_mem;
2249         conn = alloc_conn_from_crses(rqstp, cr_ses);
2250         if (!conn)
2251                 goto out_free_session;
2252
2253         nfs4_lock_state();
2254         unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2255         conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2256         WARN_ON_ONCE(conf && unconf);
2257
2258         if (conf) {
2259                 status = nfserr_wrong_cred;
2260                 if (!mach_creds_match(conf, rqstp))
2261                         goto out_free_conn;
2262                 cs_slot = &conf->cl_cs_slot;
2263                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2264                 if (status == nfserr_replay_cache) {
2265                         status = nfsd4_replay_create_session(cr_ses, cs_slot);
2266                         goto out_free_conn;
2267                 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
2268                         status = nfserr_seq_misordered;
2269                         goto out_free_conn;
2270                 }
2271         } else if (unconf) {
2272                 struct nfs4_client *old;
2273                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2274                     !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2275                         status = nfserr_clid_inuse;
2276                         goto out_free_conn;
2277                 }
2278                 status = nfserr_wrong_cred;
2279                 if (!mach_creds_match(unconf, rqstp))
2280                         goto out_free_conn;
2281                 cs_slot = &unconf->cl_cs_slot;
2282                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2283                 if (status) {
2284                         /* an unconfirmed replay returns misordered */
2285                         status = nfserr_seq_misordered;
2286                         goto out_free_conn;
2287                 }
2288                 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2289                 if (old) {
2290                         status = mark_client_expired(old);
2291                         if (status)
2292                                 goto out_free_conn;
2293                         expire_client(old);
2294                 }
2295                 move_to_confirmed(unconf);
2296                 conf = unconf;
2297         } else {
2298                 status = nfserr_stale_clientid;
2299                 goto out_free_conn;
2300         }
2301         status = nfs_ok;
2302         /*
2303          * We do not support RDMA or persistent sessions
2304          */
2305         cr_ses->flags &= ~SESSION4_PERSIST;
2306         cr_ses->flags &= ~SESSION4_RDMA;
2307
2308         init_session(rqstp, new, conf, cr_ses);
2309         nfsd4_init_conn(rqstp, conn, new);
2310
2311         memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2312                NFS4_MAX_SESSIONID_LEN);
2313         cs_slot->sl_seqid++;
2314         cr_ses->seqid = cs_slot->sl_seqid;
2315
2316         /* cache solo and embedded create sessions under the state lock */
2317         nfsd4_cache_create_session(cr_ses, cs_slot, status);
2318         nfs4_unlock_state();
2319         return status;
2320 out_free_conn:
2321         nfs4_unlock_state();
2322         free_conn(conn);
2323 out_free_session:
2324         __free_session(new);
2325 out_release_drc_mem:
2326         nfsd4_put_drc_mem(&cr_ses->fore_channel);
2327         return status;
2328 }
2329
2330 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2331 {
2332         switch (*dir) {
2333         case NFS4_CDFC4_FORE:
2334         case NFS4_CDFC4_BACK:
2335                 return nfs_ok;
2336         case NFS4_CDFC4_FORE_OR_BOTH:
2337         case NFS4_CDFC4_BACK_OR_BOTH:
2338                 *dir = NFS4_CDFC4_BOTH;
2339                 return nfs_ok;
2340         };
2341         return nfserr_inval;
2342 }
2343
2344 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2345 {
2346         struct nfsd4_session *session = cstate->session;
2347         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2348         __be32 status;
2349
2350         status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2351         if (status)
2352                 return status;
2353         spin_lock(&nn->client_lock);
2354         session->se_cb_prog = bc->bc_cb_program;
2355         session->se_cb_sec = bc->bc_cb_sec;
2356         spin_unlock(&nn->client_lock);
2357
2358         nfsd4_probe_callback(session->se_client);
2359
2360         return nfs_ok;
2361 }
2362
2363 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2364                      struct nfsd4_compound_state *cstate,
2365                      struct nfsd4_bind_conn_to_session *bcts)
2366 {
2367         __be32 status;
2368         struct nfsd4_conn *conn;
2369         struct nfsd4_session *session;
2370         struct net *net = SVC_NET(rqstp);
2371         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2372
2373         if (!nfsd4_last_compound_op(rqstp))
2374                 return nfserr_not_only_op;
2375         nfs4_lock_state();
2376         spin_lock(&nn->client_lock);
2377         session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2378         spin_unlock(&nn->client_lock);
2379         if (!session)
2380                 goto out_no_session;
2381         status = nfserr_wrong_cred;
2382         if (!mach_creds_match(session->se_client, rqstp))
2383                 goto out;
2384         status = nfsd4_map_bcts_dir(&bcts->dir);
2385         if (status)
2386                 goto out;
2387         conn = alloc_conn(rqstp, bcts->dir);
2388         status = nfserr_jukebox;
2389         if (!conn)
2390                 goto out;
2391         nfsd4_init_conn(rqstp, conn, session);
2392         status = nfs_ok;
2393 out:
2394         nfsd4_put_session(session);
2395 out_no_session:
2396         nfs4_unlock_state();
2397         return status;
2398 }
2399
2400 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2401 {
2402         if (!session)
2403                 return 0;
2404         return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2405 }
2406
2407 __be32
2408 nfsd4_destroy_session(struct svc_rqst *r,
2409                       struct nfsd4_compound_state *cstate,
2410                       struct nfsd4_destroy_session *sessionid)
2411 {
2412         struct nfsd4_session *ses;
2413         __be32 status;
2414         int ref_held_by_me = 0;
2415         struct net *net = SVC_NET(r);
2416         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2417
2418         nfs4_lock_state();
2419         status = nfserr_not_only_op;
2420         if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2421                 if (!nfsd4_last_compound_op(r))
2422                         goto out;
2423                 ref_held_by_me++;
2424         }
2425         dump_sessionid(__func__, &sessionid->sessionid);
2426         spin_lock(&nn->client_lock);
2427         ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2428         if (!ses)
2429                 goto out_client_lock;
2430         status = nfserr_wrong_cred;
2431         if (!mach_creds_match(ses->se_client, r))
2432                 goto out_put_session;
2433         status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2434         if (status)
2435                 goto out_put_session;
2436         unhash_session(ses);
2437         spin_unlock(&nn->client_lock);
2438
2439         nfsd4_probe_callback_sync(ses->se_client);
2440
2441         spin_lock(&nn->client_lock);
2442         status = nfs_ok;
2443 out_put_session:
2444         nfsd4_put_session_locked(ses);
2445 out_client_lock:
2446         spin_unlock(&nn->client_lock);
2447 out:
2448         nfs4_unlock_state();
2449         return status;
2450 }
2451
2452 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2453 {
2454         struct nfsd4_conn *c;
2455
2456         list_for_each_entry(c, &s->se_conns, cn_persession) {
2457                 if (c->cn_xprt == xpt) {
2458                         return c;
2459                 }
2460         }
2461         return NULL;
2462 }
2463
2464 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2465 {
2466         struct nfs4_client *clp = ses->se_client;
2467         struct nfsd4_conn *c;
2468         __be32 status = nfs_ok;
2469         int ret;
2470
2471         spin_lock(&clp->cl_lock);
2472         c = __nfsd4_find_conn(new->cn_xprt, ses);
2473         if (c)
2474                 goto out_free;
2475         status = nfserr_conn_not_bound_to_session;
2476         if (clp->cl_mach_cred)
2477                 goto out_free;
2478         __nfsd4_hash_conn(new, ses);
2479         spin_unlock(&clp->cl_lock);
2480         ret = nfsd4_register_conn(new);
2481         if (ret)
2482                 /* oops; xprt is already down: */
2483                 nfsd4_conn_lost(&new->cn_xpt_user);
2484         return nfs_ok;
2485 out_free:
2486         spin_unlock(&clp->cl_lock);
2487         free_conn(new);
2488         return status;
2489 }
2490
2491 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2492 {
2493         struct nfsd4_compoundargs *args = rqstp->rq_argp;
2494
2495         return args->opcnt > session->se_fchannel.maxops;
2496 }
2497
2498 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2499                                   struct nfsd4_session *session)
2500 {
2501         struct xdr_buf *xb = &rqstp->rq_arg;
2502
2503         return xb->len > session->se_fchannel.maxreq_sz;
2504 }
2505
2506 __be32
2507 nfsd4_sequence(struct svc_rqst *rqstp,
2508                struct nfsd4_compound_state *cstate,
2509                struct nfsd4_sequence *seq)
2510 {
2511         struct nfsd4_compoundres *resp = rqstp->rq_resp;
2512         struct xdr_stream *xdr = &resp->xdr;
2513         struct nfsd4_session *session;
2514         struct nfs4_client *clp;
2515         struct nfsd4_slot *slot;
2516         struct nfsd4_conn *conn;
2517         __be32 status;
2518         int buflen;
2519         struct net *net = SVC_NET(rqstp);
2520         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2521
2522         if (resp->opcnt != 1)
2523                 return nfserr_sequence_pos;
2524
2525         /*
2526          * Will be either used or freed by nfsd4_sequence_check_conn
2527          * below.
2528          */
2529         conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2530         if (!conn)
2531                 return nfserr_jukebox;
2532
2533         spin_lock(&nn->client_lock);
2534         session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2535         if (!session)
2536                 goto out_no_session;
2537         clp = session->se_client;
2538
2539         status = nfserr_too_many_ops;
2540         if (nfsd4_session_too_many_ops(rqstp, session))
2541                 goto out_put_session;
2542
2543         status = nfserr_req_too_big;
2544         if (nfsd4_request_too_big(rqstp, session))
2545                 goto out_put_session;
2546
2547         status = nfserr_badslot;
2548         if (seq->slotid >= session->se_fchannel.maxreqs)
2549                 goto out_put_session;
2550
2551         slot = session->se_slots[seq->slotid];
2552         dprintk("%s: slotid %d\n", __func__, seq->slotid);
2553
2554         /* We do not negotiate the number of slots yet, so set the
2555          * maxslots to the session maxreqs which is used to encode
2556          * sr_highest_slotid and the sr_target_slot id to maxslots */
2557         seq->maxslots = session->se_fchannel.maxreqs;
2558
2559         status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2560                                         slot->sl_flags & NFSD4_SLOT_INUSE);
2561         if (status == nfserr_replay_cache) {
2562                 status = nfserr_seq_misordered;
2563                 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2564                         goto out_put_session;
2565                 cstate->slot = slot;
2566                 cstate->session = session;
2567                 cstate->clp = clp;
2568                 /* Return the cached reply status and set cstate->status
2569                  * for nfsd4_proc_compound processing */
2570                 status = nfsd4_replay_cache_entry(resp, seq);
2571                 cstate->status = nfserr_replay_cache;
2572                 goto out;
2573         }
2574         if (status)
2575                 goto out_put_session;
2576
2577         status = nfsd4_sequence_check_conn(conn, session);
2578         conn = NULL;
2579         if (status)
2580                 goto out_put_session;
2581
2582         buflen = (seq->cachethis) ?
2583                         session->se_fchannel.maxresp_cached :
2584                         session->se_fchannel.maxresp_sz;
2585         status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2586                                     nfserr_rep_too_big;
2587         if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2588                 goto out_put_session;
2589         svc_reserve(rqstp, buflen);
2590
2591         status = nfs_ok;
2592         /* Success! bump slot seqid */
2593         slot->sl_seqid = seq->seqid;
2594         slot->sl_flags |= NFSD4_SLOT_INUSE;
2595         if (seq->cachethis)
2596                 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2597         else
2598                 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2599
2600         cstate->slot = slot;
2601         cstate->session = session;
2602         cstate->clp = clp;
2603
2604 out:
2605         switch (clp->cl_cb_state) {
2606         case NFSD4_CB_DOWN:
2607                 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2608                 break;
2609         case NFSD4_CB_FAULT:
2610                 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2611                 break;
2612         default:
2613                 seq->status_flags = 0;
2614         }
2615         if (!list_empty(&clp->cl_revoked))
2616                 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2617 out_no_session:
2618         if (conn)
2619                 free_conn(conn);
2620         spin_unlock(&nn->client_lock);
2621         return status;
2622 out_put_session:
2623         nfsd4_put_session_locked(session);
2624         goto out_no_session;
2625 }
2626
2627 void
2628 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2629 {
2630         struct nfsd4_compound_state *cs = &resp->cstate;
2631
2632         if (nfsd4_has_session(cs)) {
2633                 if (cs->status != nfserr_replay_cache) {
2634                         nfsd4_store_cache_entry(resp);
2635                         cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2636                 }
2637                 /* Drop session reference that was taken in nfsd4_sequence() */
2638                 nfsd4_put_session(cs->session);
2639         } else if (cs->clp)
2640                 put_client_renew(cs->clp);
2641 }
2642
2643 __be32
2644 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2645 {
2646         struct nfs4_client *conf, *unconf, *clp;
2647         __be32 status = 0;
2648         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2649
2650         nfs4_lock_state();
2651         unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2652         conf = find_confirmed_client(&dc->clientid, true, nn);
2653         WARN_ON_ONCE(conf && unconf);
2654
2655         if (conf) {
2656                 clp = conf;
2657
2658                 if (client_has_state(conf)) {
2659                         status = nfserr_clientid_busy;
2660                         goto out;
2661                 }
2662         } else if (unconf)
2663                 clp = unconf;
2664         else {
2665                 status = nfserr_stale_clientid;
2666                 goto out;
2667         }
2668         if (!mach_creds_match(clp, rqstp)) {
2669                 status = nfserr_wrong_cred;
2670                 goto out;
2671         }
2672         expire_client(clp);
2673 out:
2674         nfs4_unlock_state();
2675         return status;
2676 }
2677
2678 __be32
2679 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2680 {
2681         __be32 status = 0;
2682
2683         if (rc->rca_one_fs) {
2684                 if (!cstate->current_fh.fh_dentry)
2685                         return nfserr_nofilehandle;
2686                 /*
2687                  * We don't take advantage of the rca_one_fs case.
2688                  * That's OK, it's optional, we can safely ignore it.
2689                  */
2690                  return nfs_ok;
2691         }
2692
2693         nfs4_lock_state();
2694         status = nfserr_complete_already;
2695         if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2696                              &cstate->session->se_client->cl_flags))
2697                 goto out;
2698
2699         status = nfserr_stale_clientid;
2700         if (is_client_expired(cstate->session->se_client))
2701                 /*
2702                  * The following error isn't really legal.
2703                  * But we only get here if the client just explicitly
2704                  * destroyed the client.  Surely it no longer cares what
2705                  * error it gets back on an operation for the dead
2706                  * client.
2707                  */
2708                 goto out;
2709
2710         status = nfs_ok;
2711         nfsd4_client_record_create(cstate->session->se_client);
2712 out:
2713         nfs4_unlock_state();
2714         return status;
2715 }
2716
2717 __be32
2718 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2719                   struct nfsd4_setclientid *setclid)
2720 {
2721         struct xdr_netobj       clname = setclid->se_name;
2722         nfs4_verifier           clverifier = setclid->se_verf;
2723         struct nfs4_client      *conf, *unconf, *new;
2724         __be32                  status;
2725         struct nfsd_net         *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2726
2727         /* Cases below refer to rfc 3530 section 14.2.33: */
2728         nfs4_lock_state();
2729         conf = find_confirmed_client_by_name(&clname, nn);
2730         if (conf) {
2731                 /* case 0: */
2732                 status = nfserr_clid_inuse;
2733                 if (clp_used_exchangeid(conf))
2734                         goto out;
2735                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2736                         char addr_str[INET6_ADDRSTRLEN];
2737                         rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2738                                  sizeof(addr_str));
2739                         dprintk("NFSD: setclientid: string in use by client "
2740                                 "at %s\n", addr_str);
2741                         goto out;
2742                 }
2743         }
2744         unconf = find_unconfirmed_client_by_name(&clname, nn);
2745         if (unconf)
2746                 expire_client(unconf);
2747         status = nfserr_jukebox;
2748         new = create_client(clname, rqstp, &clverifier);
2749         if (new == NULL)
2750                 goto out;
2751         if (conf && same_verf(&conf->cl_verifier, &clverifier))
2752                 /* case 1: probable callback update */
2753                 copy_clid(new, conf);
2754         else /* case 4 (new client) or cases 2, 3 (client reboot): */
2755                 gen_clid(new, nn);
2756         new->cl_minorversion = 0;
2757         gen_callback(new, setclid, rqstp);
2758         add_to_unconfirmed(new);
2759         setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2760         setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2761         memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2762         status = nfs_ok;
2763 out:
2764         nfs4_unlock_state();
2765         return status;
2766 }
2767
2768
2769 __be32
2770 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2771                          struct nfsd4_compound_state *cstate,
2772                          struct nfsd4_setclientid_confirm *setclientid_confirm)
2773 {
2774         struct nfs4_client *conf, *unconf;
2775         nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
2776         clientid_t * clid = &setclientid_confirm->sc_clientid;
2777         __be32 status;
2778         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2779
2780         if (STALE_CLIENTID(clid, nn))
2781                 return nfserr_stale_clientid;
2782         nfs4_lock_state();
2783
2784         conf = find_confirmed_client(clid, false, nn);
2785         unconf = find_unconfirmed_client(clid, false, nn);
2786         /*
2787          * We try hard to give out unique clientid's, so if we get an
2788          * attempt to confirm the same clientid with a different cred,
2789          * there's a bug somewhere.  Let's charitably assume it's our
2790          * bug.
2791          */
2792         status = nfserr_serverfault;
2793         if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2794                 goto out;
2795         if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2796                 goto out;
2797         /* cases below refer to rfc 3530 section 14.2.34: */
2798         if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2799                 if (conf && !unconf) /* case 2: probable retransmit */
2800                         status = nfs_ok;
2801                 else /* case 4: client hasn't noticed we rebooted yet? */
2802                         status = nfserr_stale_clientid;
2803                 goto out;
2804         }
2805         status = nfs_ok;
2806         if (conf) { /* case 1: callback update */
2807                 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2808                 nfsd4_probe_callback(conf);
2809                 expire_client(unconf);
2810         } else { /* case 3: normal case; new or rebooted client */
2811                 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2812                 if (conf) {
2813                         status = mark_client_expired(conf);
2814                         if (status)
2815                                 goto out;
2816                         expire_client(conf);
2817                 }
2818                 move_to_confirmed(unconf);
2819                 nfsd4_probe_callback(unconf);
2820         }
2821 out:
2822         nfs4_unlock_state();
2823         return status;
2824 }
2825
2826 static struct nfs4_file *nfsd4_alloc_file(void)
2827 {
2828         return kmem_cache_alloc(file_slab, GFP_KERNEL);
2829 }
2830
2831 /* OPEN Share state helper functions */
2832 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2833 {
2834         unsigned int hashval = file_hashval(ino);
2835
2836         lockdep_assert_held(&state_lock);
2837
2838         atomic_set(&fp->fi_ref, 1);
2839         spin_lock_init(&fp->fi_lock);
2840         INIT_LIST_HEAD(&fp->fi_stateids);
2841         INIT_LIST_HEAD(&fp->fi_delegations);
2842         ihold(ino);
2843         fp->fi_inode = ino;
2844         fp->fi_had_conflict = false;
2845         fp->fi_lease = NULL;
2846         fp->fi_share_deny = 0;
2847         memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2848         memset(fp->fi_access, 0, sizeof(fp->fi_access));
2849         hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
2850 }
2851
2852 void
2853 nfsd4_free_slabs(void)
2854 {
2855         kmem_cache_destroy(openowner_slab);
2856         kmem_cache_destroy(lockowner_slab);
2857         kmem_cache_destroy(file_slab);
2858         kmem_cache_destroy(stateid_slab);
2859         kmem_cache_destroy(deleg_slab);
2860 }
2861
2862 int
2863 nfsd4_init_slabs(void)
2864 {
2865         openowner_slab = kmem_cache_create("nfsd4_openowners",
2866                         sizeof(struct nfs4_openowner), 0, 0, NULL);
2867         if (openowner_slab == NULL)
2868                 goto out;
2869         lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2870                         sizeof(struct nfs4_lockowner), 0, 0, NULL);
2871         if (lockowner_slab == NULL)
2872                 goto out_free_openowner_slab;
2873         file_slab = kmem_cache_create("nfsd4_files",
2874                         sizeof(struct nfs4_file), 0, 0, NULL);
2875         if (file_slab == NULL)
2876                 goto out_free_lockowner_slab;
2877         stateid_slab = kmem_cache_create("nfsd4_stateids",
2878                         sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2879         if (stateid_slab == NULL)
2880                 goto out_free_file_slab;
2881         deleg_slab = kmem_cache_create("nfsd4_delegations",
2882                         sizeof(struct nfs4_delegation), 0, 0, NULL);
2883         if (deleg_slab == NULL)
2884                 goto out_free_stateid_slab;
2885         return 0;
2886
2887 out_free_stateid_slab:
2888         kmem_cache_destroy(stateid_slab);
2889 out_free_file_slab:
2890         kmem_cache_destroy(file_slab);
2891 out_free_lockowner_slab:
2892         kmem_cache_destroy(lockowner_slab);
2893 out_free_openowner_slab:
2894         kmem_cache_destroy(openowner_slab);
2895 out:
2896         dprintk("nfsd4: out of memory while initializing nfsv4\n");
2897         return -ENOMEM;
2898 }
2899
2900 static void init_nfs4_replay(struct nfs4_replay *rp)
2901 {
2902         rp->rp_status = nfserr_serverfault;
2903         rp->rp_buflen = 0;
2904         rp->rp_buf = rp->rp_ibuf;
2905 }
2906
2907 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2908 {
2909         struct nfs4_stateowner *sop;
2910
2911         sop = kmem_cache_alloc(slab, GFP_KERNEL);
2912         if (!sop)
2913                 return NULL;
2914
2915         sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2916         if (!sop->so_owner.data) {
2917                 kmem_cache_free(slab, sop);
2918                 return NULL;
2919         }
2920         sop->so_owner.len = owner->len;
2921
2922         INIT_LIST_HEAD(&sop->so_stateids);
2923         sop->so_client = clp;
2924         init_nfs4_replay(&sop->so_replay);
2925         return sop;
2926 }
2927
2928 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2929 {
2930         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2931
2932         list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2933         list_add(&oo->oo_perclient, &clp->cl_openowners);
2934 }
2935
2936 static struct nfs4_openowner *
2937 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
2938                            struct nfsd4_compound_state *cstate)
2939 {
2940         struct nfs4_client *clp = cstate->clp;
2941         struct nfs4_openowner *oo;
2942
2943         oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2944         if (!oo)
2945                 return NULL;
2946         oo->oo_owner.so_is_open_owner = 1;
2947         oo->oo_owner.so_seqid = open->op_seqid;
2948         oo->oo_flags = NFS4_OO_NEW;
2949         if (nfsd4_has_session(cstate))
2950                 oo->oo_flags |= NFS4_OO_CONFIRMED;
2951         oo->oo_time = 0;
2952         oo->oo_last_closed_stid = NULL;
2953         INIT_LIST_HEAD(&oo->oo_close_lru);
2954         hash_openowner(oo, clp, strhashval);
2955         return oo;
2956 }
2957
2958 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2959         struct nfs4_openowner *oo = open->op_openowner;
2960
2961         stp->st_stid.sc_type = NFS4_OPEN_STID;
2962         INIT_LIST_HEAD(&stp->st_locks);
2963         list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2964         stp->st_stateowner = &oo->oo_owner;
2965         get_nfs4_file(fp);
2966         stp->st_file = fp;
2967         stp->st_access_bmap = 0;
2968         stp->st_deny_bmap = 0;
2969         set_access(open->op_share_access, stp);
2970         set_deny(open->op_share_deny, stp);
2971         stp->st_openstp = NULL;
2972         spin_lock(&fp->fi_lock);
2973         list_add(&stp->st_perfile, &fp->fi_stateids);
2974         spin_unlock(&fp->fi_lock);
2975 }
2976
2977 static void
2978 move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
2979 {
2980         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2981
2982         dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2983
2984         list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2985         oo->oo_time = get_seconds();
2986 }
2987
2988 static int
2989 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2990                                                         clientid_t *clid)
2991 {
2992         return (sop->so_owner.len == owner->len) &&
2993                 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2994                 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2995 }
2996
2997 static struct nfs4_openowner *
2998 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2999                         bool sessions, struct nfsd_net *nn)
3000 {
3001         struct nfs4_stateowner *so;
3002         struct nfs4_openowner *oo;
3003         struct nfs4_client *clp;
3004
3005         list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
3006                 if (!so->so_is_open_owner)
3007                         continue;
3008                 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
3009                         oo = openowner(so);
3010                         clp = oo->oo_owner.so_client;
3011                         if ((bool)clp->cl_minorversion != sessions)
3012                                 return NULL;
3013                         renew_client(oo->oo_owner.so_client);
3014                         return oo;
3015                 }
3016         }
3017         return NULL;
3018 }
3019
3020 /* search file_hashtbl[] for file */
3021 static struct nfs4_file *
3022 find_file_locked(struct inode *ino)
3023 {
3024         unsigned int hashval = file_hashval(ino);
3025         struct nfs4_file *fp;
3026
3027         lockdep_assert_held(&state_lock);
3028
3029         hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
3030                 if (fp->fi_inode == ino) {
3031                         get_nfs4_file(fp);
3032                         return fp;
3033                 }
3034         }
3035         return NULL;
3036 }
3037
3038 static struct nfs4_file *
3039 find_file(struct inode *ino)
3040 {
3041         struct nfs4_file *fp;
3042
3043         spin_lock(&state_lock);
3044         fp = find_file_locked(ino);
3045         spin_unlock(&state_lock);
3046         return fp;
3047 }
3048
3049 static struct nfs4_file *
3050 find_or_add_file(struct inode *ino, struct nfs4_file *new)
3051 {
3052         struct nfs4_file *fp;
3053
3054         spin_lock(&state_lock);
3055         fp = find_file_locked(ino);
3056         if (fp == NULL) {
3057                 nfsd4_init_file(new, ino);
3058                 fp = new;
3059         }
3060         spin_unlock(&state_lock);
3061
3062         return fp;
3063 }
3064
3065 /*
3066  * Called to check deny when READ with all zero stateid or
3067  * WRITE with all zero or all one stateid
3068  */
3069 static __be32
3070 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3071 {
3072         struct inode *ino = current_fh->fh_dentry->d_inode;
3073         struct nfs4_file *fp;
3074         __be32 ret = nfs_ok;
3075
3076         fp = find_file(ino);
3077         if (!fp)
3078                 return ret;
3079         /* Check for conflicting share reservations */
3080         spin_lock(&fp->fi_lock);
3081         if (fp->fi_share_deny & deny_type)
3082                 ret = nfserr_locked;
3083         spin_unlock(&fp->fi_lock);
3084         put_nfs4_file(fp);
3085         return ret;
3086 }
3087
3088 void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp)
3089 {
3090         struct nfs4_client *clp = dp->dl_stid.sc_client;
3091         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3092
3093         /*
3094          * We can't do this in nfsd_break_deleg_cb because it is
3095          * already holding inode->i_lock
3096          */
3097         spin_lock(&state_lock);
3098         block_delegations(&dp->dl_fh);
3099         /*
3100          * If the dl_time != 0, then we know that it has already been
3101          * queued for a lease break. Don't queue it again.
3102          */
3103         if (dp->dl_time == 0) {
3104                 dp->dl_time = get_seconds();
3105                 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3106         }
3107         spin_unlock(&state_lock);
3108 }
3109
3110 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3111 {
3112         /*
3113          * We're assuming the state code never drops its reference
3114          * without first removing the lease.  Since we're in this lease
3115          * callback (and since the lease code is serialized by the kernel
3116          * lock) we know the server hasn't removed the lease yet, we know
3117          * it's safe to take a reference.
3118          */
3119         atomic_inc(&dp->dl_count);
3120         nfsd4_cb_recall(dp);
3121 }
3122
3123 /* Called from break_lease() with i_lock held. */
3124 static void nfsd_break_deleg_cb(struct file_lock *fl)
3125 {
3126         struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3127         struct nfs4_delegation *dp;
3128
3129         if (!fp) {
3130                 WARN(1, "(%p)->fl_owner NULL\n", fl);
3131                 return;
3132         }
3133         if (fp->fi_had_conflict) {
3134                 WARN(1, "duplicate break on %p\n", fp);
3135                 return;
3136         }
3137         /*
3138          * We don't want the locks code to timeout the lease for us;
3139          * we'll remove it ourself if a delegation isn't returned
3140          * in time:
3141          */
3142         fl->fl_break_time = 0;
3143
3144         fp->fi_had_conflict = true;
3145         spin_lock(&fp->fi_lock);
3146         list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3147                 nfsd_break_one_deleg(dp);
3148         spin_unlock(&fp->fi_lock);
3149 }
3150
3151 static
3152 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
3153 {
3154         if (arg & F_UNLCK)
3155                 return lease_modify(onlist, arg);
3156         else
3157                 return -EAGAIN;
3158 }
3159
3160 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3161         .lm_break = nfsd_break_deleg_cb,
3162         .lm_change = nfsd_change_deleg_cb,
3163 };
3164
3165 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3166 {
3167         if (nfsd4_has_session(cstate))
3168                 return nfs_ok;
3169         if (seqid == so->so_seqid - 1)
3170                 return nfserr_replay_me;
3171         if (seqid == so->so_seqid)
3172                 return nfs_ok;
3173         return nfserr_bad_seqid;
3174 }
3175
3176 static __be32 lookup_clientid(clientid_t *clid,
3177                 struct nfsd4_compound_state *cstate,
3178                 struct nfsd_net *nn)
3179 {
3180         struct nfs4_client *found;
3181
3182         if (cstate->clp) {
3183                 found = cstate->clp;
3184                 if (!same_clid(&found->cl_clientid, clid))
3185                         return nfserr_stale_clientid;
3186                 return nfs_ok;
3187         }
3188
3189         if (STALE_CLIENTID(clid, nn))
3190                 return nfserr_stale_clientid;
3191
3192         /*
3193          * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3194          * cached already then we know this is for is for v4.0 and "sessions"
3195          * will be false.
3196          */
3197         WARN_ON_ONCE(cstate->session);
3198         found = find_confirmed_client(clid, false, nn);
3199         if (!found)
3200                 return nfserr_expired;
3201
3202         /* Cache the nfs4_client in cstate! */
3203         cstate->clp = found;
3204         atomic_inc(&found->cl_refcount);
3205         return nfs_ok;
3206 }
3207
3208 __be32
3209 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3210                     struct nfsd4_open *open, struct nfsd_net *nn)
3211 {
3212         clientid_t *clientid = &open->op_clientid;
3213         struct nfs4_client *clp = NULL;
3214         unsigned int strhashval;
3215         struct nfs4_openowner *oo = NULL;
3216         __be32 status;
3217
3218         if (STALE_CLIENTID(&open->op_clientid, nn))
3219                 return nfserr_stale_clientid;
3220         /*
3221          * In case we need it later, after we've already created the
3222          * file and don't want to risk a further failure:
3223          */
3224         open->op_file = nfsd4_alloc_file();
3225         if (open->op_file == NULL)
3226                 return nfserr_jukebox;
3227
3228         status = lookup_clientid(clientid, cstate, nn);
3229         if (status)
3230                 return status;
3231         clp = cstate->clp;
3232
3233         strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
3234         oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
3235         open->op_openowner = oo;
3236         if (!oo) {
3237                 goto new_owner;
3238         }
3239         if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3240                 /* Replace unconfirmed owners without checking for replay. */
3241                 release_openowner(oo);
3242                 open->op_openowner = NULL;
3243                 goto new_owner;
3244         }
3245         status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3246         if (status)
3247                 return status;
3248         goto alloc_stateid;
3249 new_owner:
3250         oo = alloc_init_open_stateowner(strhashval, open, cstate);
3251         if (oo == NULL)
3252                 return nfserr_jukebox;
3253         open->op_openowner = oo;
3254 alloc_stateid:
3255         open->op_stp = nfs4_alloc_stateid(clp);
3256         if (!open->op_stp)
3257                 return nfserr_jukebox;
3258         return nfs_ok;
3259 }
3260
3261 static inline __be32
3262 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3263 {
3264         if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3265                 return nfserr_openmode;
3266         else
3267                 return nfs_ok;
3268 }
3269
3270 static int share_access_to_flags(u32 share_access)
3271 {
3272         return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3273 }
3274
3275 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3276 {
3277         struct nfs4_stid *ret;
3278
3279         ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3280         if (!ret)
3281                 return NULL;
3282         return delegstateid(ret);
3283 }
3284
3285 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3286 {
3287         return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3288                open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3289 }
3290
3291 static __be32
3292 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3293                 struct nfs4_delegation **dp)
3294 {
3295         int flags;
3296         __be32 status = nfserr_bad_stateid;
3297
3298         *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
3299         if (*dp == NULL)
3300                 goto out;
3301         flags = share_access_to_flags(open->op_share_access);
3302         status = nfs4_check_delegmode(*dp, flags);
3303         if (status)
3304                 *dp = NULL;
3305 out:
3306         if (!nfsd4_is_deleg_cur(open))
3307                 return nfs_ok;
3308         if (status)
3309                 return status;
3310         open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3311         return nfs_ok;
3312 }
3313
3314 static struct nfs4_ol_stateid *
3315 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3316 {
3317         struct nfs4_ol_stateid *local, *ret = NULL;
3318         struct nfs4_openowner *oo = open->op_openowner;
3319
3320         spin_lock(&fp->fi_lock);
3321         list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3322                 /* ignore lock owners */
3323                 if (local->st_stateowner->so_is_open_owner == 0)
3324                         continue;
3325                 if (local->st_stateowner == &oo->oo_owner) {
3326                         ret = local;
3327                         break;
3328                 }
3329         }
3330         spin_unlock(&fp->fi_lock);
3331         return ret;
3332 }
3333
3334 static inline int nfs4_access_to_access(u32 nfs4_access)
3335 {
3336         int flags = 0;
3337
3338         if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3339                 flags |= NFSD_MAY_READ;
3340         if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3341                 flags |= NFSD_MAY_WRITE;
3342         return flags;
3343 }
3344
3345 static inline __be32
3346 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3347                 struct nfsd4_open *open)
3348 {
3349         struct iattr iattr = {
3350                 .ia_valid = ATTR_SIZE,
3351                 .ia_size = 0,
3352         };
3353         if (!open->op_truncate)
3354                 return 0;
3355         if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3356                 return nfserr_inval;
3357         return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3358 }
3359
3360 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3361                 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3362                 struct nfsd4_open *open)
3363 {
3364         struct file *filp = NULL;
3365         __be32 status;
3366         int oflag = nfs4_access_to_omode(open->op_share_access);
3367         int access = nfs4_access_to_access(open->op_share_access);
3368         unsigned char old_access_bmap, old_deny_bmap;
3369
3370         spin_lock(&fp->fi_lock);
3371
3372         /*
3373          * Are we trying to set a deny mode that would conflict with
3374          * current access?
3375          */
3376         status = nfs4_file_check_deny(fp, open->op_share_deny);
3377         if (status != nfs_ok) {
3378                 spin_unlock(&fp->fi_lock);
3379                 goto out;
3380         }
3381
3382         /* set access to the file */
3383         status = nfs4_file_get_access(fp, open->op_share_access);
3384         if (status != nfs_ok) {
3385                 spin_unlock(&fp->fi_lock);
3386                 goto out;
3387         }
3388
3389         /* Set access bits in stateid */
3390         old_access_bmap = stp->st_access_bmap;
3391         set_access(open->op_share_access, stp);
3392
3393         /* Set new deny mask */
3394         old_deny_bmap = stp->st_deny_bmap;
3395         set_deny(open->op_share_deny, stp);
3396         fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3397
3398         if (!fp->fi_fds[oflag]) {
3399                 spin_unlock(&fp->fi_lock);
3400                 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3401                 if (status)
3402                         goto out_put_access;
3403                 spin_lock(&fp->fi_lock);
3404                 if (!fp->fi_fds[oflag]) {
3405                         fp->fi_fds[oflag] = filp;
3406                         filp = NULL;
3407                 }
3408         }
3409         spin_unlock(&fp->fi_lock);
3410         if (filp)
3411                 fput(filp);
3412
3413         status = nfsd4_truncate(rqstp, cur_fh, open);
3414         if (status)
3415                 goto out_put_access;
3416 out:
3417         return status;
3418 out_put_access:
3419         stp->st_access_bmap = old_access_bmap;
3420         nfs4_file_put_access(fp, open->op_share_access);
3421         reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
3422         goto out;
3423 }
3424
3425 static __be32
3426 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3427 {
3428         __be32 status;
3429         unsigned char old_deny_bmap;
3430
3431         if (!test_access(open->op_share_access, stp))
3432                 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3433
3434         /* test and set deny mode */
3435         spin_lock(&fp->fi_lock);
3436         status = nfs4_file_check_deny(fp, open->op_share_deny);
3437         if (status == nfs_ok) {
3438                 old_deny_bmap = stp->st_deny_bmap;
3439                 set_deny(open->op_share_deny, stp);
3440                 fp->fi_share_deny |=
3441                                 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3442         }
3443         spin_unlock(&fp->fi_lock);
3444
3445         if (status != nfs_ok)
3446                 return status;
3447
3448         status = nfsd4_truncate(rqstp, cur_fh, open);
3449         if (status != nfs_ok)
3450                 reset_union_bmap_deny(old_deny_bmap, stp);
3451         return status;
3452 }
3453
3454 static void
3455 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
3456 {
3457         open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3458 }
3459
3460 /* Should we give out recallable state?: */
3461 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3462 {
3463         if (clp->cl_cb_state == NFSD4_CB_UP)
3464                 return true;
3465         /*
3466          * In the sessions case, since we don't have to establish a
3467          * separate connection for callbacks, we assume it's OK
3468          * until we hear otherwise:
3469          */
3470         return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3471 }
3472
3473 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
3474 {
3475         struct file_lock *fl;
3476
3477         fl = locks_alloc_lock();
3478         if (!fl)
3479                 return NULL;
3480         locks_init_lock(fl);
3481         fl->fl_lmops = &nfsd_lease_mng_ops;
3482         fl->fl_flags = FL_DELEG;
3483         fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3484         fl->fl_end = OFFSET_MAX;
3485         fl->fl_owner = (fl_owner_t)fp;
3486         fl->fl_pid = current->tgid;
3487         return fl;
3488 }
3489
3490 static int nfs4_setlease(struct nfs4_delegation *dp)
3491 {
3492         struct nfs4_file *fp = dp->dl_file;
3493         struct file_lock *fl;
3494         int status;
3495
3496         fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
3497         if (!fl)
3498                 return -ENOMEM;
3499         fl->fl_file = find_readable_file(fp);
3500         status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
3501         if (status)
3502                 goto out_free;
3503         fp->fi_lease = fl;
3504         fp->fi_deleg_file = fl->fl_file;
3505         atomic_set(&fp->fi_delegees, 1);
3506         spin_lock(&state_lock);
3507         hash_delegation_locked(dp, fp);
3508         spin_unlock(&state_lock);
3509         return 0;
3510 out_free:
3511         if (fl->fl_file)
3512                 fput(fl->fl_file);
3513         locks_free_lock(fl);
3514         return status;
3515 }
3516
3517 static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
3518 {
3519         if (fp->fi_had_conflict)
3520                 return -EAGAIN;
3521         get_nfs4_file(fp);
3522         dp->dl_file = fp;
3523         if (!fp->fi_lease)
3524                 return nfs4_setlease(dp);
3525         spin_lock(&state_lock);
3526         atomic_inc(&fp->fi_delegees);
3527         if (fp->fi_had_conflict) {
3528                 spin_unlock(&state_lock);
3529                 return -EAGAIN;
3530         }
3531         hash_delegation_locked(dp, fp);
3532         spin_unlock(&state_lock);
3533         return 0;
3534 }
3535
3536 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3537 {
3538         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3539         if (status == -EAGAIN)
3540                 open->op_why_no_deleg = WND4_CONTENTION;
3541         else {
3542                 open->op_why_no_deleg = WND4_RESOURCE;
3543                 switch (open->op_deleg_want) {
3544                 case NFS4_SHARE_WANT_READ_DELEG:
3545                 case NFS4_SHARE_WANT_WRITE_DELEG:
3546                 case NFS4_SHARE_WANT_ANY_DELEG:
3547                         break;
3548                 case NFS4_SHARE_WANT_CANCEL:
3549                         open->op_why_no_deleg = WND4_CANCELLED;
3550                         break;
3551                 case NFS4_SHARE_WANT_NO_DELEG:
3552                         WARN_ON_ONCE(1);
3553                 }
3554         }
3555 }
3556
3557 /*
3558  * Attempt to hand out a delegation.
3559  *
3560  * Note we don't support write delegations, and won't until the vfs has
3561  * proper support for them.
3562  */
3563 static void
3564 nfs4_open_delegation(struct net *net, struct svc_fh *fh,
3565                      struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
3566 {
3567         struct nfs4_delegation *dp;
3568         struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
3569         int cb_up;
3570         int status = 0;
3571
3572         cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3573         open->op_recall = 0;
3574         switch (open->op_claim_type) {
3575                 case NFS4_OPEN_CLAIM_PREVIOUS:
3576                         if (!cb_up)
3577                                 open->op_recall = 1;
3578                         if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
3579                                 goto out_no_deleg;
3580                         break;
3581                 case NFS4_OPEN_CLAIM_NULL:
3582                 case NFS4_OPEN_CLAIM_FH:
3583                         /*
3584                          * Let's not give out any delegations till everyone's
3585                          * had the chance to reclaim theirs....
3586                          */
3587                         if (locks_in_grace(net))
3588                                 goto out_no_deleg;
3589                         if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3590                                 goto out_no_deleg;
3591                         /*
3592                          * Also, if the file was opened for write or
3593                          * create, there's a good chance the client's
3594                          * about to write to it, resulting in an
3595                          * immediate recall (since we don't support
3596                          * write delegations):
3597                          */
3598                         if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3599                                 goto out_no_deleg;
3600                         if (open->op_create == NFS4_OPEN_CREATE)
3601                                 goto out_no_deleg;
3602                         break;
3603                 default:
3604                         goto out_no_deleg;
3605         }
3606         dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
3607         if (dp == NULL)
3608                 goto out_no_deleg;
3609         status = nfs4_set_delegation(dp, stp->st_file);
3610         if (status)
3611                 goto out_free;
3612
3613         memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3614
3615         dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3616                 STATEID_VAL(&dp->dl_stid.sc_stateid));
3617         open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3618         return;
3619 out_free:
3620         destroy_delegation(dp);
3621 out_no_deleg:
3622         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
3623         if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3624             open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3625                 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3626                 open->op_recall = 1;
3627         }
3628
3629         /* 4.1 client asking for a delegation? */
3630         if (open->op_deleg_want)
3631                 nfsd4_open_deleg_none_ext(open, status);
3632         return;
3633 }
3634
3635 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
3636                                         struct nfs4_delegation *dp)
3637 {
3638         if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3639             dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3640                 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3641                 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3642         } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3643                    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3644                 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3645                 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3646         }
3647         /* Otherwise the client must be confused wanting a delegation
3648          * it already has, therefore we don't return
3649          * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3650          */
3651 }
3652
3653 /*
3654  * called with nfs4_lock_state() held.
3655  */
3656 __be32
3657 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3658 {
3659         struct nfsd4_compoundres *resp = rqstp->rq_resp;
3660         struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3661         struct nfs4_file *fp = NULL;
3662         struct inode *ino = current_fh->fh_dentry->d_inode;
3663         struct nfs4_ol_stateid *stp = NULL;
3664         struct nfs4_delegation *dp = NULL;
3665         __be32 status;
3666
3667         /*
3668          * Lookup file; if found, lookup stateid and check open request,
3669          * and check for delegations in the process of being recalled.
3670          * If not found, create the nfs4_file struct
3671          */
3672         fp = find_or_add_file(ino, open->op_file);
3673         if (fp != open->op_file) {
3674                 status = nfs4_check_deleg(cl, open, &dp);
3675                 if (status)
3676                         goto out;
3677                 stp = nfsd4_find_existing_open(fp, open);
3678         } else {
3679                 open->op_file = NULL;
3680                 status = nfserr_bad_stateid;
3681                 if (nfsd4_is_deleg_cur(open))
3682                         goto out;
3683                 status = nfserr_jukebox;
3684         }
3685
3686         /*
3687          * OPEN the file, or upgrade an existing OPEN.
3688          * If truncate fails, the OPEN fails.
3689          */
3690         if (stp) {
3691                 /* Stateid was found, this is an OPEN upgrade */
3692                 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3693                 if (status)
3694                         goto out;
3695         } else {
3696                 stp = open->op_stp;
3697                 open->op_stp = NULL;
3698                 init_open_stateid(stp, fp, open);
3699                 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
3700                 if (status) {
3701                         release_open_stateid(stp);
3702                         goto out;
3703                 }
3704         }
3705         update_stateid(&stp->st_stid.sc_stateid);
3706         memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3707
3708         if (nfsd4_has_session(&resp->cstate)) {
3709                 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3710                         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3711                         open->op_why_no_deleg = WND4_NOT_WANTED;
3712                         goto nodeleg;
3713                 }
3714         }
3715
3716         /*
3717         * Attempt to hand out a delegation. No error return, because the
3718         * OPEN succeeds even if we fail.
3719         */
3720         nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3721 nodeleg:
3722         status = nfs_ok;
3723
3724         dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3725                 STATEID_VAL(&stp->st_stid.sc_stateid));
3726 out:
3727         /* 4.1 client trying to upgrade/downgrade delegation? */
3728         if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3729             open->op_deleg_want)
3730                 nfsd4_deleg_xgrade_none_ext(open, dp);
3731
3732         if (fp)
3733                 put_nfs4_file(fp);
3734         if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3735                 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3736         /*
3737         * To finish the open response, we just need to set the rflags.
3738         */
3739         open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3740         if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3741             !nfsd4_has_session(&resp->cstate))
3742                 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3743
3744         return status;
3745 }
3746
3747 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3748 {
3749         if (open->op_openowner) {
3750                 struct nfs4_openowner *oo = open->op_openowner;
3751
3752                 if (!list_empty(&oo->oo_owner.so_stateids))
3753                         list_del_init(&oo->oo_close_lru);
3754                 if (oo->oo_flags & NFS4_OO_NEW) {
3755                         if (status) {
3756                                 release_openowner(oo);
3757                                 open->op_openowner = NULL;
3758                         } else
3759                                 oo->oo_flags &= ~NFS4_OO_NEW;
3760                 }
3761         }
3762         if (open->op_file)
3763                 nfsd4_free_file(open->op_file);
3764         if (open->op_stp)
3765                 free_generic_stateid(open->op_stp);
3766 }
3767
3768 __be32
3769 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3770             clientid_t *clid)
3771 {
3772         struct nfs4_client *clp;
3773         __be32 status;
3774         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3775
3776         nfs4_lock_state();
3777         dprintk("process_renew(%08x/%08x): starting\n", 
3778                         clid->cl_boot, clid->cl_id);
3779         status = lookup_clientid(clid, cstate, nn);
3780         if (status)
3781                 goto out;
3782         clp = cstate->clp;
3783         status = nfserr_cb_path_down;
3784         if (!list_empty(&clp->cl_delegations)
3785                         && clp->cl_cb_state != NFSD4_CB_UP)
3786                 goto out;
3787         status = nfs_ok;
3788 out:
3789         nfs4_unlock_state();
3790         return status;
3791 }
3792
3793 static void
3794 nfsd4_end_grace(struct nfsd_net *nn)
3795 {
3796         /* do nothing if grace period already ended */
3797         if (nn->grace_ended)
3798                 return;
3799
3800         dprintk("NFSD: end of grace period\n");
3801         nn->grace_ended = true;
3802         nfsd4_record_grace_done(nn, nn->boot_time);
3803         locks_end_grace(&nn->nfsd4_manager);
3804         /*
3805          * Now that every NFSv4 client has had the chance to recover and
3806          * to see the (possibly new, possibly shorter) lease time, we
3807          * can safely set the next grace time to the current lease time:
3808          */
3809         nn->nfsd4_grace = nn->nfsd4_lease;
3810 }
3811
3812 static time_t
3813 nfs4_laundromat(struct nfsd_net *nn)
3814 {
3815         struct nfs4_client *clp;
3816         struct nfs4_openowner *oo;
3817         struct nfs4_delegation *dp;
3818         struct list_head *pos, *next, reaplist;
3819         time_t cutoff = get_seconds() - nn->nfsd4_lease;
3820         time_t t, new_timeo = nn->nfsd4_lease;
3821
3822         nfs4_lock_state();
3823
3824         dprintk("NFSD: laundromat service - starting\n");
3825         nfsd4_end_grace(nn);
3826         INIT_LIST_HEAD(&reaplist);
3827         spin_lock(&nn->client_lock);
3828         list_for_each_safe(pos, next, &nn->client_lru) {
3829                 clp = list_entry(pos, struct nfs4_client, cl_lru);
3830                 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3831                         t = clp->cl_time - cutoff;
3832                         new_timeo = min(new_timeo, t);
3833                         break;
3834                 }
3835                 if (mark_client_expired_locked(clp)) {
3836                         dprintk("NFSD: client in use (clientid %08x)\n",
3837                                 clp->cl_clientid.cl_id);
3838                         continue;
3839                 }
3840                 list_move(&clp->cl_lru, &reaplist);
3841         }
3842         spin_unlock(&nn->client_lock);
3843         list_for_each_safe(pos, next, &reaplist) {
3844                 clp = list_entry(pos, struct nfs4_client, cl_lru);
3845                 dprintk("NFSD: purging unused client (clientid %08x)\n",
3846                         clp->cl_clientid.cl_id);
3847                 expire_client(clp);
3848         }
3849         spin_lock(&state_lock);
3850         list_for_each_safe(pos, next, &nn->del_recall_lru) {
3851                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3852                 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
3853                         continue;
3854                 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3855                         t = dp->dl_time - cutoff;
3856                         new_timeo = min(new_timeo, t);
3857                         break;
3858                 }
3859                 list_move(&dp->dl_recall_lru, &reaplist);
3860         }
3861         spin_unlock(&state_lock);
3862         list_for_each_safe(pos, next, &reaplist) {
3863                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3864                 revoke_delegation(dp);
3865         }
3866         list_for_each_safe(pos, next, &nn->close_lru) {
3867                 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3868                 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3869                         t = oo->oo_time - cutoff;
3870                         new_timeo = min(new_timeo, t);
3871                         break;
3872                 }
3873                 release_openowner(oo);
3874         }
3875         new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
3876         nfs4_unlock_state();
3877         return new_timeo;
3878 }
3879
3880 static struct workqueue_struct *laundry_wq;
3881 static void laundromat_main(struct work_struct *);
3882
3883 static void
3884 laundromat_main(struct work_struct *laundry)
3885 {
3886         time_t t;
3887         struct delayed_work *dwork = container_of(laundry, struct delayed_work,
3888                                                   work);
3889         struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
3890                                            laundromat_work);
3891
3892         t = nfs4_laundromat(nn);
3893         dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3894         queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3895 }
3896
3897 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3898 {
3899         if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3900                 return nfserr_bad_stateid;
3901         return nfs_ok;
3902 }
3903
3904 static inline int
3905 access_permit_read(struct nfs4_ol_stateid *stp)
3906 {
3907         return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3908                 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3909                 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3910 }
3911
3912 static inline int
3913 access_permit_write(struct nfs4_ol_stateid *stp)
3914 {
3915         return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3916                 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3917 }
3918
3919 static
3920 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3921 {
3922         __be32 status = nfserr_openmode;
3923
3924         /* For lock stateid's, we test the parent open, not the lock: */
3925         if (stp->st_openstp)
3926                 stp = stp->st_openstp;
3927         if ((flags & WR_STATE) && !access_permit_write(stp))
3928                 goto out;
3929         if ((flags & RD_STATE) && !access_permit_read(stp))
3930                 goto out;
3931         status = nfs_ok;
3932 out:
3933         return status;
3934 }
3935
3936 static inline __be32
3937 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3938 {
3939         if (ONE_STATEID(stateid) && (flags & RD_STATE))
3940                 return nfs_ok;
3941         else if (locks_in_grace(net)) {
3942                 /* Answer in remaining cases depends on existence of
3943                  * conflicting state; so we must wait out the grace period. */
3944                 return nfserr_grace;
3945         } else if (flags & WR_STATE)
3946                 return nfs4_share_conflict(current_fh,
3947                                 NFS4_SHARE_DENY_WRITE);
3948         else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3949                 return nfs4_share_conflict(current_fh,
3950                                 NFS4_SHARE_DENY_READ);
3951 }
3952
3953 /*
3954  * Allow READ/WRITE during grace period on recovered state only for files
3955  * that are not able to provide mandatory locking.
3956  */
3957 static inline int
3958 grace_disallows_io(struct net *net, struct inode *inode)
3959 {
3960         return locks_in_grace(net) && mandatory_lock(inode);
3961 }
3962
3963 /* Returns true iff a is later than b: */
3964 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3965 {
3966         return (s32)(a->si_generation - b->si_generation) > 0;
3967 }
3968
3969 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3970 {
3971         /*
3972          * When sessions are used the stateid generation number is ignored
3973          * when it is zero.
3974          */
3975         if (has_session && in->si_generation == 0)
3976                 return nfs_ok;
3977
3978         if (in->si_generation == ref->si_generation)
3979                 return nfs_ok;
3980
3981         /* If the client sends us a stateid from the future, it's buggy: */
3982         if (stateid_generation_after(in, ref))
3983                 return nfserr_bad_stateid;
3984         /*
3985          * However, we could see a stateid from the past, even from a
3986          * non-buggy client.  For example, if the client sends a lock
3987          * while some IO is outstanding, the lock may bump si_generation
3988          * while the IO is still in flight.  The client could avoid that
3989          * situation by waiting for responses on all the IO requests,
3990          * but better performance may result in retrying IO that
3991          * receives an old_stateid error if requests are rarely
3992          * reordered in flight:
3993          */
3994         return nfserr_old_stateid;
3995 }
3996
3997 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3998 {
3999         struct nfs4_stid *s;
4000         struct nfs4_ol_stateid *ols;
4001         __be32 status;
4002
4003         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4004                 return nfserr_bad_stateid;
4005         /* Client debugging aid. */
4006         if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4007                 char addr_str[INET6_ADDRSTRLEN];
4008                 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4009                                  sizeof(addr_str));
4010                 pr_warn_ratelimited("NFSD: client %s testing state ID "
4011                                         "with incorrect client ID\n", addr_str);
4012                 return nfserr_bad_stateid;
4013         }
4014         s = find_stateid(cl, stateid);
4015         if (!s)
4016                 return nfserr_bad_stateid;
4017         status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4018         if (status)
4019                 return status;
4020         switch (s->sc_type) {
4021         case NFS4_DELEG_STID:
4022                 return nfs_ok;
4023         case NFS4_REVOKED_DELEG_STID:
4024                 return nfserr_deleg_revoked;
4025         case NFS4_OPEN_STID:
4026         case NFS4_LOCK_STID:
4027                 ols = openlockstateid(s);
4028                 if (ols->st_stateowner->so_is_open_owner
4029                                 && !(openowner(ols->st_stateowner)->oo_flags
4030                                                 & NFS4_OO_CONFIRMED))
4031                         return nfserr_bad_stateid;
4032                 return nfs_ok;
4033         default:
4034                 printk("unknown stateid type %x\n", s->sc_type);
4035                 /* Fallthrough */
4036         case NFS4_CLOSED_STID:
4037         case NFS4_CLOSED_DELEG_STID:
4038                 return nfserr_bad_stateid;
4039         }
4040 }
4041
4042 static __be32
4043 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4044                      stateid_t *stateid, unsigned char typemask,
4045                      struct nfs4_stid **s, struct nfsd_net *nn)
4046 {
4047         __be32 status;
4048
4049         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4050                 return nfserr_bad_stateid;
4051         status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4052         if (status == nfserr_stale_clientid) {
4053                 if (cstate->session)
4054                         return nfserr_bad_stateid;
4055                 return nfserr_stale_stateid;
4056         }
4057         if (status)
4058                 return status;
4059         *s = find_stateid_by_type(cstate->clp, stateid, typemask);
4060         if (!*s)
4061                 return nfserr_bad_stateid;
4062         return nfs_ok;
4063 }
4064
4065 /*
4066 * Checks for stateid operations
4067 */
4068 __be32
4069 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
4070                            stateid_t *stateid, int flags, struct file **filpp)
4071 {
4072         struct nfs4_stid *s;
4073         struct nfs4_ol_stateid *stp = NULL;
4074         struct nfs4_delegation *dp = NULL;
4075         struct svc_fh *current_fh = &cstate->current_fh;
4076         struct inode *ino = current_fh->fh_dentry->d_inode;
4077         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4078         struct file *file = NULL;
4079         __be32 status;
4080
4081         if (filpp)
4082                 *filpp = NULL;
4083
4084         if (grace_disallows_io(net, ino))
4085                 return nfserr_grace;
4086
4087         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4088                 return check_special_stateids(net, current_fh, stateid, flags);
4089
4090         nfs4_lock_state();
4091
4092         status = nfsd4_lookup_stateid(cstate, stateid,
4093                                 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4094                                 &s, nn);
4095         if (status)
4096                 goto out;
4097         status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
4098         if (status)
4099                 goto out;
4100         switch (s->sc_type) {
4101         case NFS4_DELEG_STID:
4102                 dp = delegstateid(s);
4103                 status = nfs4_check_delegmode(dp, flags);
4104                 if (status)
4105                         goto out;
4106                 if (filpp) {
4107                         file = dp->dl_file->fi_deleg_file;
4108                         if (!file) {
4109                                 WARN_ON_ONCE(1);
4110                                 status = nfserr_serverfault;
4111                                 goto out;
4112                         }
4113                         get_file(file);
4114                 }
4115                 break;
4116         case NFS4_OPEN_STID:
4117         case NFS4_LOCK_STID:
4118                 stp = openlockstateid(s);
4119                 status = nfs4_check_fh(current_fh, stp);
4120                 if (status)
4121                         goto out;
4122                 if (stp->st_stateowner->so_is_open_owner
4123                     && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4124                         goto out;
4125                 status = nfs4_check_openmode(stp, flags);
4126                 if (status)
4127                         goto out;
4128                 if (filpp) {
4129                         if (flags & RD_STATE)
4130                                 file = find_readable_file(stp->st_file);
4131                         else
4132                                 file = find_writeable_file(stp->st_file);
4133                 }
4134                 break;
4135         default:
4136                 status = nfserr_bad_stateid;
4137                 goto out;
4138         }
4139         status = nfs_ok;
4140         if (file)
4141                 *filpp = file;
4142 out:
4143         nfs4_unlock_state();
4144         return status;
4145 }
4146
4147 static __be32
4148 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
4149 {
4150         struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
4151
4152         if (check_for_locks(stp->st_file, lo))
4153                 return nfserr_locks_held;
4154         release_lockowner_if_empty(lo);
4155         return nfs_ok;
4156 }
4157
4158 /*
4159  * Test if the stateid is valid
4160  */
4161 __be32
4162 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4163                    struct nfsd4_test_stateid *test_stateid)
4164 {
4165         struct nfsd4_test_stateid_id *stateid;
4166         struct nfs4_client *cl = cstate->session->se_client;
4167
4168         nfs4_lock_state();
4169         list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4170                 stateid->ts_id_status =
4171                         nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4172         nfs4_unlock_state();
4173
4174         return nfs_ok;
4175 }
4176
4177 __be32
4178 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4179                    struct nfsd4_free_stateid *free_stateid)
4180 {
4181         stateid_t *stateid = &free_stateid->fr_stateid;
4182         struct nfs4_stid *s;
4183         struct nfs4_delegation *dp;
4184         struct nfs4_client *cl = cstate->session->se_client;
4185         __be32 ret = nfserr_bad_stateid;
4186
4187         nfs4_lock_state();
4188         s = find_stateid(cl, stateid);
4189         if (!s)
4190                 goto out;
4191         switch (s->sc_type) {
4192         case NFS4_DELEG_STID:
4193                 ret = nfserr_locks_held;
4194                 goto out;
4195         case NFS4_OPEN_STID:
4196         case NFS4_LOCK_STID:
4197                 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4198                 if (ret)
4199                         goto out;
4200                 if (s->sc_type == NFS4_LOCK_STID)
4201                         ret = nfsd4_free_lock_stateid(openlockstateid(s));
4202                 else
4203                         ret = nfserr_locks_held;
4204                 break;
4205         case NFS4_REVOKED_DELEG_STID:
4206                 dp = delegstateid(s);
4207                 destroy_revoked_delegation(dp);
4208                 ret = nfs_ok;
4209                 break;
4210         default:
4211                 ret = nfserr_bad_stateid;
4212         }
4213 out:
4214         nfs4_unlock_state();
4215         return ret;
4216 }
4217
4218 static inline int
4219 setlkflg (int type)
4220 {
4221         return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
4222                 RD_STATE : WR_STATE;
4223 }
4224
4225 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4226 {
4227         struct svc_fh *current_fh = &cstate->current_fh;
4228         struct nfs4_stateowner *sop = stp->st_stateowner;
4229         __be32 status;
4230
4231         status = nfsd4_check_seqid(cstate, sop, seqid);
4232         if (status)
4233                 return status;
4234         if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4235                 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4236                 /*
4237                  * "Closed" stateid's exist *only* to return
4238                  * nfserr_replay_me from the previous step, and
4239                  * revoked delegations are kept only for free_stateid.
4240                  */
4241                 return nfserr_bad_stateid;
4242         status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4243         if (status)
4244                 return status;
4245         return nfs4_check_fh(current_fh, stp);
4246 }
4247
4248 /* 
4249  * Checks for sequence id mutating operations. 
4250  */
4251 static __be32
4252 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4253                          stateid_t *stateid, char typemask,
4254                          struct nfs4_ol_stateid **stpp,
4255                          struct nfsd_net *nn)
4256 {
4257         __be32 status;
4258         struct nfs4_stid *s;
4259         struct nfs4_ol_stateid *stp = NULL;
4260
4261         dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
4262                 seqid, STATEID_VAL(stateid));
4263
4264         *stpp = NULL;
4265         status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4266         if (status)
4267                 return status;
4268         stp = openlockstateid(s);
4269         if (!nfsd4_has_session(cstate))
4270                 cstate->replay_owner = stp->st_stateowner;
4271
4272         status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4273         if (!status)
4274                 *stpp = stp;
4275         return status;
4276 }
4277
4278 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4279                                                  stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4280 {
4281         __be32 status;
4282         struct nfs4_openowner *oo;
4283
4284         status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4285                                                 NFS4_OPEN_STID, stpp, nn);
4286         if (status)
4287                 return status;
4288         oo = openowner((*stpp)->st_stateowner);
4289         if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
4290                 return nfserr_bad_stateid;
4291         return nfs_ok;
4292 }
4293
4294 __be32
4295 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4296                    struct nfsd4_open_confirm *oc)
4297 {
4298         __be32 status;
4299         struct nfs4_openowner *oo;
4300         struct nfs4_ol_stateid *stp;
4301         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4302
4303         dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4304                         cstate->current_fh.fh_dentry);
4305
4306         status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4307         if (status)
4308                 return status;
4309
4310         nfs4_lock_state();
4311
4312         status = nfs4_preprocess_seqid_op(cstate,
4313                                         oc->oc_seqid, &oc->oc_req_stateid,
4314                                         NFS4_OPEN_STID, &stp, nn);
4315         if (status)
4316                 goto out;
4317         oo = openowner(stp->st_stateowner);
4318         status = nfserr_bad_stateid;
4319         if (oo->oo_flags & NFS4_OO_CONFIRMED)
4320                 goto out;
4321         oo->oo_flags |= NFS4_OO_CONFIRMED;
4322         update_stateid(&stp->st_stid.sc_stateid);
4323         memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4324         dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4325                 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4326
4327         nfsd4_client_record_create(oo->oo_owner.so_client);
4328         status = nfs_ok;
4329 out:
4330         nfsd4_bump_seqid(cstate, status);
4331         if (!cstate->replay_owner)
4332                 nfs4_unlock_state();
4333         return status;
4334 }
4335
4336 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
4337 {
4338         if (!test_access(access, stp))
4339                 return;
4340         nfs4_file_put_access(stp->st_file, access);
4341         clear_access(access, stp);
4342 }
4343
4344 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
4345 {
4346         switch (to_access) {
4347         case NFS4_SHARE_ACCESS_READ:
4348                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
4349                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4350                 break;
4351         case NFS4_SHARE_ACCESS_WRITE:
4352                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
4353                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4354                 break;
4355         case NFS4_SHARE_ACCESS_BOTH:
4356                 break;
4357         default:
4358                 WARN_ON_ONCE(1);
4359         }
4360 }
4361
4362 __be32
4363 nfsd4_open_downgrade(struct svc_rqst *rqstp,
4364                      struct nfsd4_compound_state *cstate,
4365                      struct nfsd4_open_downgrade *od)
4366 {
4367         __be32 status;
4368         struct nfs4_ol_stateid *stp;
4369         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4370
4371         dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
4372                         cstate->current_fh.fh_dentry);
4373
4374         /* We don't yet support WANT bits: */
4375         if (od->od_deleg_want)
4376                 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
4377                         od->od_deleg_want);
4378
4379         nfs4_lock_state();
4380         status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4381                                         &od->od_stateid, &stp, nn);
4382         if (status)
4383                 goto out; 
4384         status = nfserr_inval;
4385         if (!test_access(od->od_share_access, stp)) {
4386                 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4387                         stp->st_access_bmap, od->od_share_access);
4388                 goto out;
4389         }
4390         if (!test_deny(od->od_share_deny, stp)) {
4391                 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4392                         stp->st_deny_bmap, od->od_share_deny);
4393                 goto out;
4394         }
4395         nfs4_stateid_downgrade(stp, od->od_share_access);
4396
4397         reset_union_bmap_deny(od->od_share_deny, stp);
4398
4399         update_stateid(&stp->st_stid.sc_stateid);
4400         memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4401         status = nfs_ok;
4402 out:
4403         nfsd4_bump_seqid(cstate, status);
4404         if (!cstate->replay_owner)
4405                 nfs4_unlock_state();
4406         return status;
4407 }
4408
4409 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
4410 {
4411         struct nfs4_client *clp = s->st_stid.sc_client;
4412         struct nfs4_openowner *oo = openowner(s->st_stateowner);
4413
4414         s->st_stid.sc_type = NFS4_CLOSED_STID;
4415         unhash_open_stateid(s);
4416
4417         if (clp->cl_minorversion) {
4418                 free_generic_stateid(s);
4419                 if (list_empty(&oo->oo_owner.so_stateids))
4420                         release_openowner(oo);
4421         } else {
4422                 oo->oo_last_closed_stid = s;
4423                 /*
4424                  * In the 4.0 case we need to keep the owners around a
4425                  * little while to handle CLOSE replay.
4426                  */
4427                 if (list_empty(&oo->oo_owner.so_stateids))
4428                         move_to_close_lru(oo, clp->net);
4429         }
4430 }
4431
4432 /*
4433  * nfs4_unlock_state() called after encode
4434  */
4435 __be32
4436 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4437             struct nfsd4_close *close)
4438 {
4439         __be32 status;
4440         struct nfs4_ol_stateid *stp;
4441         struct net *net = SVC_NET(rqstp);
4442         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4443
4444         dprintk("NFSD: nfsd4_close on file %pd\n", 
4445                         cstate->current_fh.fh_dentry);
4446
4447         nfs4_lock_state();
4448         status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4449                                         &close->cl_stateid,
4450                                         NFS4_OPEN_STID|NFS4_CLOSED_STID,
4451                                         &stp, nn);
4452         nfsd4_bump_seqid(cstate, status);
4453         if (status)
4454                 goto out; 
4455         update_stateid(&stp->st_stid.sc_stateid);
4456         memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4457
4458         nfsd4_close_open_stateid(stp);
4459 out:
4460         if (!cstate->replay_owner)
4461                 nfs4_unlock_state();
4462         return status;
4463 }
4464
4465 __be32
4466 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4467                   struct nfsd4_delegreturn *dr)
4468 {
4469         struct nfs4_delegation *dp;
4470         stateid_t *stateid = &dr->dr_stateid;
4471         struct nfs4_stid *s;
4472         __be32 status;
4473         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4474
4475         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4476                 return status;
4477
4478         nfs4_lock_state();
4479         status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4480         if (status)
4481                 goto out;
4482         dp = delegstateid(s);
4483         status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4484         if (status)
4485                 goto out;
4486
4487         destroy_delegation(dp);
4488 out:
4489         nfs4_unlock_state();
4490
4491         return status;
4492 }
4493
4494
4495 #define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
4496
4497 static inline u64
4498 end_offset(u64 start, u64 len)
4499 {
4500         u64 end;
4501
4502         end = start + len;
4503         return end >= start ? end: NFS4_MAX_UINT64;
4504 }
4505
4506 /* last octet in a range */
4507 static inline u64
4508 last_byte_offset(u64 start, u64 len)
4509 {
4510         u64 end;
4511
4512         WARN_ON_ONCE(!len);
4513         end = start + len;
4514         return end > start ? end - 1: NFS4_MAX_UINT64;
4515 }
4516
4517 /*
4518  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4519  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4520  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
4521  * locking, this prevents us from being completely protocol-compliant.  The
4522  * real solution to this problem is to start using unsigned file offsets in
4523  * the VFS, but this is a very deep change!
4524  */
4525 static inline void
4526 nfs4_transform_lock_offset(struct file_lock *lock)
4527 {
4528         if (lock->fl_start < 0)
4529                 lock->fl_start = OFFSET_MAX;
4530         if (lock->fl_end < 0)
4531                 lock->fl_end = OFFSET_MAX;
4532 }
4533
4534 /* Hack!: For now, we're defining this just so we can use a pointer to it
4535  * as a unique cookie to identify our (NFSv4's) posix locks. */
4536 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
4537 };
4538
4539 static inline void
4540 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
4541 {
4542         struct nfs4_lockowner *lo;
4543
4544         if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4545                 lo = (struct nfs4_lockowner *) fl->fl_owner;
4546                 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
4547                                         lo->lo_owner.so_owner.len, GFP_KERNEL);
4548                 if (!deny->ld_owner.data)
4549                         /* We just don't care that much */
4550                         goto nevermind;
4551                 deny->ld_owner.len = lo->lo_owner.so_owner.len;
4552                 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4553         } else {
4554 nevermind:
4555                 deny->ld_owner.len = 0;
4556                 deny->ld_owner.data = NULL;
4557                 deny->ld_clientid.cl_boot = 0;
4558                 deny->ld_clientid.cl_id = 0;
4559         }
4560         deny->ld_start = fl->fl_start;
4561         deny->ld_length = NFS4_MAX_UINT64;
4562         if (fl->fl_end != NFS4_MAX_UINT64)
4563                 deny->ld_length = fl->fl_end - fl->fl_start + 1;        
4564         deny->ld_type = NFS4_READ_LT;
4565         if (fl->fl_type != F_RDLCK)
4566                 deny->ld_type = NFS4_WRITE_LT;
4567 }
4568
4569 static struct nfs4_lockowner *
4570 find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
4571                 struct nfsd_net *nn)
4572 {
4573         unsigned int strhashval = ownerstr_hashval(clid->cl_id, owner);
4574         struct nfs4_stateowner *so;
4575
4576         list_for_each_entry(so, &nn->ownerstr_hashtbl[strhashval], so_strhash) {
4577                 if (so->so_is_open_owner)
4578                         continue;
4579                 if (!same_owner_str(so, owner, clid))
4580                         continue;
4581                 return lockowner(so);
4582         }
4583         return NULL;
4584 }
4585
4586 /*
4587  * Alloc a lock owner structure.
4588  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
4589  * occurred. 
4590  *
4591  * strhashval = ownerstr_hashval
4592  */
4593 static struct nfs4_lockowner *
4594 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4595         struct nfs4_lockowner *lo;
4596         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
4597
4598         lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4599         if (!lo)
4600                 return NULL;
4601         INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4602         lo->lo_owner.so_is_open_owner = 0;
4603         /* It is the openowner seqid that will be incremented in encode in the
4604          * case of new lockowners; so increment the lock seqid manually: */
4605         lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4606         list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4607         return lo;
4608 }
4609
4610 static struct nfs4_ol_stateid *
4611 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
4612 {
4613         struct nfs4_ol_stateid *stp;
4614         struct nfs4_client *clp = lo->lo_owner.so_client;
4615
4616         stp = nfs4_alloc_stateid(clp);
4617         if (stp == NULL)
4618                 return NULL;
4619         stp->st_stid.sc_type = NFS4_LOCK_STID;
4620         list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4621         stp->st_stateowner = &lo->lo_owner;
4622         get_nfs4_file(fp);
4623         stp->st_file = fp;
4624         stp->st_access_bmap = 0;
4625         stp->st_deny_bmap = open_stp->st_deny_bmap;
4626         stp->st_openstp = open_stp;
4627         list_add(&stp->st_locks, &open_stp->st_locks);
4628         spin_lock(&fp->fi_lock);
4629         list_add(&stp->st_perfile, &fp->fi_stateids);
4630         spin_unlock(&fp->fi_lock);
4631         return stp;
4632 }
4633
4634 static struct nfs4_ol_stateid *
4635 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
4636 {
4637         struct nfs4_ol_stateid *lst;
4638
4639         list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
4640                 if (lst->st_file == fp)
4641                         return lst;
4642         }
4643         return NULL;
4644 }
4645
4646
4647 static int
4648 check_lock_length(u64 offset, u64 length)
4649 {
4650         return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
4651              LOFF_OVERFLOW(offset, length)));
4652 }
4653
4654 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4655 {
4656         struct nfs4_file *fp = lock_stp->st_file;
4657
4658         lockdep_assert_held(&fp->fi_lock);
4659
4660         if (test_access(access, lock_stp))
4661                 return;
4662         __nfs4_file_get_access(fp, access);
4663         set_access(access, lock_stp);
4664 }
4665
4666 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4667 {
4668         struct nfs4_file *fi = ost->st_file;
4669         struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4670         struct nfs4_client *cl = oo->oo_owner.so_client;
4671         struct nfs4_lockowner *lo;
4672         unsigned int strhashval;
4673         struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4674
4675         lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, nn);
4676         if (!lo) {
4677                 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4678                                 &lock->v.new.owner);
4679                 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4680                 if (lo == NULL)
4681                         return nfserr_jukebox;
4682         } else {
4683                 /* with an existing lockowner, seqids must be the same */
4684                 if (!cstate->minorversion &&
4685                     lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
4686                         return nfserr_bad_seqid;
4687         }
4688
4689         *lst = find_lock_stateid(lo, fi);
4690         if (*lst == NULL) {
4691                 *lst = alloc_init_lock_stateid(lo, fi, ost);
4692                 if (*lst == NULL) {
4693                         release_lockowner_if_empty(lo);
4694                         return nfserr_jukebox;
4695                 }
4696                 *new = true;
4697         }
4698         return nfs_ok;
4699 }
4700
4701 /*
4702  *  LOCK operation 
4703  */
4704 __be32
4705 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4706            struct nfsd4_lock *lock)
4707 {
4708         struct nfs4_openowner *open_sop = NULL;
4709         struct nfs4_lockowner *lock_sop = NULL;
4710         struct nfs4_ol_stateid *lock_stp;
4711         struct nfs4_file *fp;
4712         struct file *filp = NULL;
4713         struct file_lock *file_lock = NULL;
4714         struct file_lock *conflock = NULL;
4715         __be32 status = 0;
4716         bool new_state = false;
4717         int lkflg;
4718         int err;
4719         struct net *net = SVC_NET(rqstp);
4720         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4721
4722         dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4723                 (long long) lock->lk_offset,
4724                 (long long) lock->lk_length);
4725
4726         if (check_lock_length(lock->lk_offset, lock->lk_length))
4727                  return nfserr_inval;
4728
4729         if ((status = fh_verify(rqstp, &cstate->current_fh,
4730                                 S_IFREG, NFSD_MAY_LOCK))) {
4731                 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4732                 return status;
4733         }
4734
4735         nfs4_lock_state();
4736
4737         if (lock->lk_is_new) {
4738                 struct nfs4_ol_stateid *open_stp = NULL;
4739
4740                 if (nfsd4_has_session(cstate))
4741                         /* See rfc 5661 18.10.3: given clientid is ignored: */
4742                         memcpy(&lock->v.new.clientid,
4743                                 &cstate->session->se_client->cl_clientid,
4744                                 sizeof(clientid_t));
4745
4746                 status = nfserr_stale_clientid;
4747                 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
4748                         goto out;
4749
4750                 /* validate and update open stateid and open seqid */
4751                 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4752                                         lock->lk_new_open_seqid,
4753                                         &lock->lk_new_open_stateid,
4754                                         &open_stp, nn);
4755                 if (status)
4756                         goto out;
4757                 open_sop = openowner(open_stp->st_stateowner);
4758                 status = nfserr_bad_stateid;
4759                 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4760                                                 &lock->v.new.clientid))
4761                         goto out;
4762                 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4763                                                         &lock_stp, &new_state);
4764         } else
4765                 status = nfs4_preprocess_seqid_op(cstate,
4766                                        lock->lk_old_lock_seqid,
4767                                        &lock->lk_old_lock_stateid,
4768                                        NFS4_LOCK_STID, &lock_stp, nn);
4769         if (status)
4770                 goto out;
4771         lock_sop = lockowner(lock_stp->st_stateowner);
4772
4773         lkflg = setlkflg(lock->lk_type);
4774         status = nfs4_check_openmode(lock_stp, lkflg);
4775         if (status)
4776                 goto out;
4777
4778         status = nfserr_grace;
4779         if (locks_in_grace(net) && !lock->lk_reclaim)
4780                 goto out;
4781         status = nfserr_no_grace;
4782         if (!locks_in_grace(net) && lock->lk_reclaim)
4783                 goto out;
4784
4785         file_lock = locks_alloc_lock();
4786         if (!file_lock) {
4787                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4788                 status = nfserr_jukebox;
4789                 goto out;
4790         }
4791
4792         fp = lock_stp->st_file;
4793         locks_init_lock(file_lock);
4794         switch (lock->lk_type) {
4795                 case NFS4_READ_LT:
4796                 case NFS4_READW_LT:
4797                         spin_lock(&fp->fi_lock);
4798                         filp = find_readable_file_locked(fp);
4799                         if (filp)
4800                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4801                         spin_unlock(&fp->fi_lock);
4802                         file_lock->fl_type = F_RDLCK;
4803                         break;
4804                 case NFS4_WRITE_LT:
4805                 case NFS4_WRITEW_LT:
4806                         spin_lock(&fp->fi_lock);
4807                         filp = find_writeable_file_locked(fp);
4808                         if (filp)
4809                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4810                         spin_unlock(&fp->fi_lock);
4811                         file_lock->fl_type = F_WRLCK;
4812                         break;
4813                 default:
4814                         status = nfserr_inval;
4815                 goto out;
4816         }
4817         if (!filp) {
4818                 status = nfserr_openmode;
4819                 goto out;
4820         }
4821         file_lock->fl_owner = (fl_owner_t)lock_sop;
4822         file_lock->fl_pid = current->tgid;
4823         file_lock->fl_file = filp;
4824         file_lock->fl_flags = FL_POSIX;
4825         file_lock->fl_lmops = &nfsd_posix_mng_ops;
4826         file_lock->fl_start = lock->lk_offset;
4827         file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4828         nfs4_transform_lock_offset(file_lock);
4829
4830         conflock = locks_alloc_lock();
4831         if (!conflock) {
4832                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4833                 status = nfserr_jukebox;
4834                 goto out;
4835         }
4836
4837         err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4838         switch (-err) {
4839         case 0: /* success! */
4840                 update_stateid(&lock_stp->st_stid.sc_stateid);
4841                 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 
4842                                 sizeof(stateid_t));
4843                 status = 0;
4844                 break;
4845         case (EAGAIN):          /* conflock holds conflicting lock */
4846                 status = nfserr_denied;
4847                 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4848                 nfs4_set_lock_denied(conflock, &lock->lk_denied);
4849                 break;
4850         case (EDEADLK):
4851                 status = nfserr_deadlock;
4852                 break;
4853         default:
4854                 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4855                 status = nfserrno(err);
4856                 break;
4857         }
4858 out:
4859         if (filp)
4860                 fput(filp);
4861         if (status && new_state)
4862                 release_lock_stateid(lock_stp);
4863         nfsd4_bump_seqid(cstate, status);
4864         if (!cstate->replay_owner)
4865                 nfs4_unlock_state();
4866         if (file_lock)
4867                 locks_free_lock(file_lock);
4868         if (conflock)
4869                 locks_free_lock(conflock);
4870         return status;
4871 }
4872
4873 /*
4874  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4875  * so we do a temporary open here just to get an open file to pass to
4876  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
4877  * inode operation.)
4878  */
4879 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4880 {
4881         struct file *file;
4882         __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4883         if (!err) {
4884                 err = nfserrno(vfs_test_lock(file, lock));
4885                 nfsd_close(file);
4886         }
4887         return err;
4888 }
4889
4890 /*
4891  * LOCKT operation
4892  */
4893 __be32
4894 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4895             struct nfsd4_lockt *lockt)
4896 {
4897         struct file_lock *file_lock = NULL;
4898         struct nfs4_lockowner *lo;
4899         __be32 status;
4900         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4901
4902         if (locks_in_grace(SVC_NET(rqstp)))
4903                 return nfserr_grace;
4904
4905         if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4906                  return nfserr_inval;
4907
4908         nfs4_lock_state();
4909
4910         if (!nfsd4_has_session(cstate)) {
4911                 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
4912                 if (status)
4913                         goto out;
4914         }
4915
4916         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4917                 goto out;
4918
4919         file_lock = locks_alloc_lock();
4920         if (!file_lock) {
4921                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4922                 status = nfserr_jukebox;
4923                 goto out;
4924         }
4925         locks_init_lock(file_lock);
4926         switch (lockt->lt_type) {
4927                 case NFS4_READ_LT:
4928                 case NFS4_READW_LT:
4929                         file_lock->fl_type = F_RDLCK;
4930                 break;
4931                 case NFS4_WRITE_LT:
4932                 case NFS4_WRITEW_LT:
4933                         file_lock->fl_type = F_WRLCK;
4934                 break;
4935                 default:
4936                         dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4937                         status = nfserr_inval;
4938                 goto out;
4939         }
4940
4941         lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, nn);
4942         if (lo)
4943                 file_lock->fl_owner = (fl_owner_t)lo;
4944         file_lock->fl_pid = current->tgid;
4945         file_lock->fl_flags = FL_POSIX;
4946
4947         file_lock->fl_start = lockt->lt_offset;
4948         file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4949
4950         nfs4_transform_lock_offset(file_lock);
4951
4952         status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4953         if (status)
4954                 goto out;
4955
4956         if (file_lock->fl_type != F_UNLCK) {
4957                 status = nfserr_denied;
4958                 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4959         }
4960 out:
4961         nfs4_unlock_state();
4962         if (file_lock)
4963                 locks_free_lock(file_lock);
4964         return status;
4965 }
4966
4967 __be32
4968 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4969             struct nfsd4_locku *locku)
4970 {
4971         struct nfs4_ol_stateid *stp;
4972         struct file *filp = NULL;
4973         struct file_lock *file_lock = NULL;
4974         __be32 status;
4975         int err;
4976         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4977
4978         dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4979                 (long long) locku->lu_offset,
4980                 (long long) locku->lu_length);
4981
4982         if (check_lock_length(locku->lu_offset, locku->lu_length))
4983                  return nfserr_inval;
4984
4985         nfs4_lock_state();
4986                                                                                 
4987         status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4988                                         &locku->lu_stateid, NFS4_LOCK_STID,
4989                                         &stp, nn);
4990         if (status)
4991                 goto out;
4992         filp = find_any_file(stp->st_file);
4993         if (!filp) {
4994                 status = nfserr_lock_range;
4995                 goto out;
4996         }
4997         file_lock = locks_alloc_lock();
4998         if (!file_lock) {
4999                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5000                 status = nfserr_jukebox;
5001                 goto fput;
5002         }
5003         locks_init_lock(file_lock);
5004         file_lock->fl_type = F_UNLCK;
5005         file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
5006         file_lock->fl_pid = current->tgid;
5007         file_lock->fl_file = filp;
5008         file_lock->fl_flags = FL_POSIX;
5009         file_lock->fl_lmops = &nfsd_posix_mng_ops;
5010         file_lock->fl_start = locku->lu_offset;
5011
5012         file_lock->fl_end = last_byte_offset(locku->lu_offset,
5013                                                 locku->lu_length);
5014         nfs4_transform_lock_offset(file_lock);
5015
5016         err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5017         if (err) {
5018                 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5019                 goto out_nfserr;
5020         }
5021         update_stateid(&stp->st_stid.sc_stateid);
5022         memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
5023 fput:
5024         fput(filp);
5025 out:
5026         nfsd4_bump_seqid(cstate, status);
5027         if (!cstate->replay_owner)
5028                 nfs4_unlock_state();
5029         if (file_lock)
5030                 locks_free_lock(file_lock);
5031         return status;
5032
5033 out_nfserr:
5034         status = nfserrno(err);
5035         goto fput;
5036 }
5037
5038 /*
5039  * returns
5040  *      1: locks held by lockowner
5041  *      0: no locks held by lockowner
5042  */
5043 static int
5044 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
5045 {
5046         struct file_lock **flpp;
5047         struct inode *inode = filp->fi_inode;
5048         int status = 0;
5049
5050         spin_lock(&inode->i_lock);
5051         for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
5052                 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
5053                         status = 1;
5054                         goto out;
5055                 }
5056         }
5057 out:
5058         spin_unlock(&inode->i_lock);
5059         return status;
5060 }
5061
5062 __be32
5063 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5064                         struct nfsd4_compound_state *cstate,
5065                         struct nfsd4_release_lockowner *rlockowner)
5066 {
5067         clientid_t *clid = &rlockowner->rl_clientid;
5068         struct nfs4_stateowner *sop = NULL, *tmp;
5069         struct nfs4_lockowner *lo;
5070         struct nfs4_ol_stateid *stp;
5071         struct xdr_netobj *owner = &rlockowner->rl_owner;
5072         unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
5073         __be32 status;
5074         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5075
5076         dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5077                 clid->cl_boot, clid->cl_id);
5078
5079         nfs4_lock_state();
5080
5081         status = lookup_clientid(clid, cstate, nn);
5082         if (status)
5083                 goto out;
5084
5085         status = nfserr_locks_held;
5086
5087         /* Find the matching lock stateowner */
5088         list_for_each_entry(tmp, &nn->ownerstr_hashtbl[hashval], so_strhash) {
5089                 if (tmp->so_is_open_owner)
5090                         continue;
5091                 if (same_owner_str(tmp, owner, clid)) {
5092                         sop = tmp;
5093                         break;
5094                 }
5095         }
5096
5097         /* No matching owner found, maybe a replay? Just declare victory... */
5098         if (!sop) {
5099                 status = nfs_ok;
5100                 goto out;
5101         }
5102
5103         lo = lockowner(sop);
5104         /* see if there are still any locks associated with it */
5105         list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5106                 if (check_for_locks(stp->st_file, lo))
5107                         goto out;
5108         }
5109
5110         status = nfs_ok;
5111         release_lockowner(lo);
5112 out:
5113         nfs4_unlock_state();
5114         return status;
5115 }
5116
5117 static inline struct nfs4_client_reclaim *
5118 alloc_reclaim(void)
5119 {
5120         return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
5121 }
5122
5123 bool
5124 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5125 {
5126         struct nfs4_client_reclaim *crp;
5127
5128         crp = nfsd4_find_reclaim_client(name, nn);
5129         return (crp && crp->cr_clp);
5130 }
5131
5132 /*
5133  * failure => all reset bets are off, nfserr_no_grace...
5134  */
5135 struct nfs4_client_reclaim *
5136 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
5137 {
5138         unsigned int strhashval;
5139         struct nfs4_client_reclaim *crp;
5140
5141         dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
5142         crp = alloc_reclaim();
5143         if (crp) {
5144                 strhashval = clientstr_hashval(name);
5145                 INIT_LIST_HEAD(&crp->cr_strhash);
5146                 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
5147                 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
5148                 crp->cr_clp = NULL;
5149                 nn->reclaim_str_hashtbl_size++;
5150         }
5151         return crp;
5152 }
5153
5154 void
5155 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
5156 {
5157         list_del(&crp->cr_strhash);
5158         kfree(crp);
5159         nn->reclaim_str_hashtbl_size--;
5160 }
5161
5162 void
5163 nfs4_release_reclaim(struct nfsd_net *nn)
5164 {
5165         struct nfs4_client_reclaim *crp = NULL;
5166         int i;
5167
5168         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5169                 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
5170                         crp = list_entry(nn->reclaim_str_hashtbl[i].next,
5171                                         struct nfs4_client_reclaim, cr_strhash);
5172                         nfs4_remove_reclaim_record(crp, nn);
5173                 }
5174         }
5175         WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
5176 }
5177
5178 /*
5179  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5180 struct nfs4_client_reclaim *
5181 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
5182 {
5183         unsigned int strhashval;
5184         struct nfs4_client_reclaim *crp = NULL;
5185
5186         dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
5187
5188         strhashval = clientstr_hashval(recdir);
5189         list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
5190                 if (same_name(crp->cr_recdir, recdir)) {
5191                         return crp;
5192                 }
5193         }
5194         return NULL;
5195 }
5196
5197 /*
5198 * Called from OPEN. Look for clientid in reclaim list.
5199 */
5200 __be32
5201 nfs4_check_open_reclaim(clientid_t *clid,
5202                 struct nfsd4_compound_state *cstate,
5203                 struct nfsd_net *nn)
5204 {
5205         __be32 status;
5206
5207         /* find clientid in conf_id_hashtbl */
5208         status = lookup_clientid(clid, cstate, nn);
5209         if (status)
5210                 return nfserr_reclaim_bad;
5211
5212         if (nfsd4_client_record_check(cstate->clp))
5213                 return nfserr_reclaim_bad;
5214
5215         return nfs_ok;
5216 }
5217
5218 #ifdef CONFIG_NFSD_FAULT_INJECTION
5219
5220 u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
5221 {
5222         if (mark_client_expired(clp))
5223                 return 0;
5224         expire_client(clp);
5225         return 1;
5226 }
5227
5228 u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
5229 {
5230         char buf[INET6_ADDRSTRLEN];
5231         rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5232         printk(KERN_INFO "NFS Client: %s\n", buf);
5233         return 1;
5234 }
5235
5236 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
5237                              const char *type)
5238 {
5239         char buf[INET6_ADDRSTRLEN];
5240         rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5241         printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
5242 }
5243
5244 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
5245                                     void (*func)(struct nfs4_ol_stateid *))
5246 {
5247         struct nfs4_openowner *oop;
5248         struct nfs4_ol_stateid *stp, *st_next;
5249         struct nfs4_ol_stateid *lst, *lst_next;
5250         u64 count = 0;
5251
5252         list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5253                 list_for_each_entry_safe(stp, st_next,
5254                                 &oop->oo_owner.so_stateids, st_perstateowner) {
5255                         list_for_each_entry_safe(lst, lst_next,
5256                                         &stp->st_locks, st_locks) {
5257                                 if (func)
5258                                         func(lst);
5259                                 if (++count == max)
5260                                         return count;
5261                         }
5262                 }
5263         }
5264
5265         return count;
5266 }
5267
5268 u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
5269 {
5270         return nfsd_foreach_client_lock(clp, max, release_lock_stateid);
5271 }
5272
5273 u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
5274 {
5275         u64 count = nfsd_foreach_client_lock(clp, max, NULL);
5276         nfsd_print_count(clp, count, "locked files");
5277         return count;
5278 }
5279
5280 static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
5281 {
5282         struct nfs4_openowner *oop, *next;
5283         u64 count = 0;
5284
5285         list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
5286                 if (func)
5287                         func(oop);
5288                 if (++count == max)
5289                         break;
5290         }
5291
5292         return count;
5293 }
5294
5295 u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
5296 {
5297         return nfsd_foreach_client_open(clp, max, release_openowner);
5298 }
5299
5300 u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
5301 {
5302         u64 count = nfsd_foreach_client_open(clp, max, NULL);
5303         nfsd_print_count(clp, count, "open files");
5304         return count;
5305 }
5306
5307 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
5308                                      struct list_head *victims)
5309 {
5310         struct nfs4_delegation *dp, *next;
5311         u64 count = 0;
5312
5313         lockdep_assert_held(&state_lock);
5314         list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
5315                 if (victims) {
5316                         /*
5317                          * It's not safe to mess with delegations that have a
5318                          * non-zero dl_time. They might have already been broken
5319                          * and could be processed by the laundromat outside of
5320                          * the state_lock. Just leave them be.
5321                          */
5322                         if (dp->dl_time != 0)
5323                                 continue;
5324
5325                         /*
5326                          * Increment dl_time to ensure that delegation breaks
5327                          * don't monkey with it now that we are.
5328                          */
5329                         ++dp->dl_time;
5330                         list_move(&dp->dl_recall_lru, victims);
5331                 }
5332                 if (++count == max)
5333                         break;
5334         }
5335         return count;
5336 }
5337
5338 u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
5339 {
5340         struct nfs4_delegation *dp, *next;
5341         LIST_HEAD(victims);
5342         u64 count;
5343
5344         spin_lock(&state_lock);
5345         count = nfsd_find_all_delegations(clp, max, &victims);
5346         spin_unlock(&state_lock);
5347
5348         list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
5349                 revoke_delegation(dp);
5350
5351         return count;
5352 }
5353
5354 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
5355 {
5356         struct nfs4_delegation *dp;
5357         LIST_HEAD(victims);
5358         u64 count;
5359
5360         spin_lock(&state_lock);
5361         count = nfsd_find_all_delegations(clp, max, &victims);
5362         while (!list_empty(&victims)) {
5363                 dp = list_first_entry(&victims, struct nfs4_delegation,
5364                                         dl_recall_lru);
5365                 list_del_init(&dp->dl_recall_lru);
5366                 dp->dl_time = 0;
5367                 nfsd_break_one_deleg(dp);
5368         }
5369         spin_unlock(&state_lock);
5370
5371         return count;
5372 }
5373
5374 u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
5375 {
5376         u64 count = 0;
5377
5378         spin_lock(&state_lock);
5379         count = nfsd_find_all_delegations(clp, max, NULL);
5380         spin_unlock(&state_lock);
5381
5382         nfsd_print_count(clp, count, "delegations");
5383         return count;
5384 }
5385
5386 u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
5387 {
5388         struct nfs4_client *clp, *next;
5389         u64 count = 0;
5390         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
5391
5392         if (!nfsd_netns_ready(nn))
5393                 return 0;
5394
5395         list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5396                 count += func(clp, max - count);
5397                 if ((max != 0) && (count >= max))
5398                         break;
5399         }
5400
5401         return count;
5402 }
5403
5404 struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
5405 {
5406         struct nfs4_client *clp;
5407         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
5408
5409         if (!nfsd_netns_ready(nn))
5410                 return NULL;
5411
5412         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5413                 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
5414                         return clp;
5415         }
5416         return NULL;
5417 }
5418
5419 #endif /* CONFIG_NFSD_FAULT_INJECTION */
5420
5421 /*
5422  * Since the lifetime of a delegation isn't limited to that of an open, a
5423  * client may quite reasonably hang on to a delegation as long as it has
5424  * the inode cached.  This becomes an obvious problem the first time a
5425  * client's inode cache approaches the size of the server's total memory.
5426  *
5427  * For now we avoid this problem by imposing a hard limit on the number
5428  * of delegations, which varies according to the server's memory size.
5429  */
5430 static void
5431 set_max_delegations(void)
5432 {
5433         /*
5434          * Allow at most 4 delegations per megabyte of RAM.  Quick
5435          * estimates suggest that in the worst case (where every delegation
5436          * is for a different inode), a delegation could take about 1.5K,
5437          * giving a worst case usage of about 6% of memory.
5438          */
5439         max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
5440 }
5441
5442 static int nfs4_state_create_net(struct net *net)
5443 {
5444         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5445         int i;
5446
5447         nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
5448                         CLIENT_HASH_SIZE, GFP_KERNEL);
5449         if (!nn->conf_id_hashtbl)
5450                 goto err;
5451         nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
5452                         CLIENT_HASH_SIZE, GFP_KERNEL);
5453         if (!nn->unconf_id_hashtbl)
5454                 goto err_unconf_id;
5455         nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
5456                         OWNER_HASH_SIZE, GFP_KERNEL);
5457         if (!nn->ownerstr_hashtbl)
5458                 goto err_ownerstr;
5459         nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
5460                         SESSION_HASH_SIZE, GFP_KERNEL);
5461         if (!nn->sessionid_hashtbl)
5462                 goto err_sessionid;
5463
5464         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5465                 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
5466                 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
5467         }
5468         for (i = 0; i < OWNER_HASH_SIZE; i++)
5469                 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
5470         for (i = 0; i < SESSION_HASH_SIZE; i++)
5471                 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
5472         nn->conf_name_tree = RB_ROOT;
5473         nn->unconf_name_tree = RB_ROOT;
5474         INIT_LIST_HEAD(&nn->client_lru);
5475         INIT_LIST_HEAD(&nn->close_lru);
5476         INIT_LIST_HEAD(&nn->del_recall_lru);
5477         spin_lock_init(&nn->client_lock);
5478
5479         INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
5480         get_net(net);
5481
5482         return 0;
5483
5484 err_sessionid:
5485         kfree(nn->ownerstr_hashtbl);
5486 err_ownerstr:
5487         kfree(nn->unconf_id_hashtbl);
5488 err_unconf_id:
5489         kfree(nn->conf_id_hashtbl);
5490 err:
5491         return -ENOMEM;
5492 }
5493
5494 static void
5495 nfs4_state_destroy_net(struct net *net)
5496 {
5497         int i;
5498         struct nfs4_client *clp = NULL;
5499         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5500
5501         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5502                 while (!list_empty(&nn->conf_id_hashtbl[i])) {
5503                         clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
5504                         destroy_client(clp);
5505                 }
5506         }
5507
5508         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5509                 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
5510                         clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
5511                         destroy_client(clp);
5512                 }
5513         }
5514
5515         kfree(nn->sessionid_hashtbl);
5516         kfree(nn->ownerstr_hashtbl);
5517         kfree(nn->unconf_id_hashtbl);
5518         kfree(nn->conf_id_hashtbl);
5519         put_net(net);
5520 }
5521
5522 int
5523 nfs4_state_start_net(struct net *net)
5524 {
5525         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5526         int ret;
5527
5528         ret = nfs4_state_create_net(net);
5529         if (ret)
5530                 return ret;
5531         nfsd4_client_tracking_init(net);
5532         nn->boot_time = get_seconds();
5533         locks_start_grace(net, &nn->nfsd4_manager);
5534         nn->grace_ended = false;
5535         printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
5536                nn->nfsd4_grace, net);
5537         queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
5538         return 0;
5539 }
5540
5541 /* initialization to perform when the nfsd service is started: */
5542
5543 int
5544 nfs4_state_start(void)
5545 {
5546         int ret;
5547
5548         ret = set_callback_cred();
5549         if (ret)
5550                 return -ENOMEM;
5551         laundry_wq = create_singlethread_workqueue("nfsd4");
5552         if (laundry_wq == NULL) {
5553                 ret = -ENOMEM;
5554                 goto out_recovery;
5555         }
5556         ret = nfsd4_create_callback_queue();
5557         if (ret)
5558                 goto out_free_laundry;
5559
5560         set_max_delegations();
5561
5562         return 0;
5563
5564 out_free_laundry:
5565         destroy_workqueue(laundry_wq);
5566 out_recovery:
5567         return ret;
5568 }
5569
5570 void
5571 nfs4_state_shutdown_net(struct net *net)
5572 {
5573         struct nfs4_delegation *dp = NULL;
5574         struct list_head *pos, *next, reaplist;
5575         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5576
5577         cancel_delayed_work_sync(&nn->laundromat_work);
5578         locks_end_grace(&nn->nfsd4_manager);
5579
5580         nfs4_lock_state();
5581         INIT_LIST_HEAD(&reaplist);
5582         spin_lock(&state_lock);
5583         list_for_each_safe(pos, next, &nn->del_recall_lru) {
5584                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5585                 list_move(&dp->dl_recall_lru, &reaplist);
5586         }
5587         spin_unlock(&state_lock);
5588         list_for_each_safe(pos, next, &reaplist) {
5589                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5590                 destroy_delegation(dp);
5591         }
5592
5593         nfsd4_client_tracking_exit(net);
5594         nfs4_state_destroy_net(net);
5595         nfs4_unlock_state();
5596 }
5597
5598 void
5599 nfs4_state_shutdown(void)
5600 {
5601         destroy_workqueue(laundry_wq);
5602         nfsd4_destroy_callback_queue();
5603 }
5604
5605 static void
5606 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5607 {
5608         if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
5609                 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5610 }
5611
5612 static void
5613 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5614 {
5615         if (cstate->minorversion) {
5616                 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
5617                 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5618         }
5619 }
5620
5621 void
5622 clear_current_stateid(struct nfsd4_compound_state *cstate)
5623 {
5624         CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5625 }
5626
5627 /*
5628  * functions to set current state id
5629  */
5630 void
5631 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5632 {
5633         put_stateid(cstate, &odp->od_stateid);
5634 }
5635
5636 void
5637 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
5638 {
5639         put_stateid(cstate, &open->op_stateid);
5640 }
5641
5642 void
5643 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5644 {
5645         put_stateid(cstate, &close->cl_stateid);
5646 }
5647
5648 void
5649 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
5650 {
5651         put_stateid(cstate, &lock->lk_resp_stateid);
5652 }
5653
5654 /*
5655  * functions to consume current state id
5656  */
5657
5658 void
5659 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5660 {
5661         get_stateid(cstate, &odp->od_stateid);
5662 }
5663
5664 void
5665 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
5666 {
5667         get_stateid(cstate, &drp->dr_stateid);
5668 }
5669
5670 void
5671 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
5672 {
5673         get_stateid(cstate, &fsp->fr_stateid);
5674 }
5675
5676 void
5677 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
5678 {
5679         get_stateid(cstate, &setattr->sa_stateid);
5680 }
5681
5682 void
5683 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5684 {
5685         get_stateid(cstate, &close->cl_stateid);
5686 }
5687
5688 void
5689 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
5690 {
5691         get_stateid(cstate, &locku->lu_stateid);
5692 }
5693
5694 void
5695 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
5696 {
5697         get_stateid(cstate, &read->rd_stateid);
5698 }
5699
5700 void
5701 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
5702 {
5703         get_stateid(cstate, &write->wr_stateid);
5704 }