nfsd4: hash closed stateid's like any other
[pandora-kernel.git] / fs / nfsd / nfs4state.c
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/sunrpc/svcauth_gss.h>
42 #include <linux/sunrpc/clnt.h>
43 #include "xdr4.h"
44 #include "vfs.h"
45
46 #define NFSDDBG_FACILITY                NFSDDBG_PROC
47
48 /* Globals */
49 time_t nfsd4_lease = 90;     /* default lease time */
50 time_t nfsd4_grace = 90;
51 static time_t boot_time;
52 static u32 current_stateid = 1;
53 static stateid_t zerostateid;             /* bits all 0 */
54 static stateid_t onestateid;              /* bits all 1 */
55 static u64 current_sessionid = 1;
56
57 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
58 #define ONE_STATEID(stateid)  (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
59
60 /* forward declarations */
61 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
62
63 /* Locking: */
64
65 /* Currently used for almost all code touching nfsv4 state: */
66 static DEFINE_MUTEX(client_mutex);
67
68 /*
69  * Currently used for the del_recall_lru and file hash table.  In an
70  * effort to decrease the scope of the client_mutex, this spinlock may
71  * eventually cover more:
72  */
73 static DEFINE_SPINLOCK(recall_lock);
74
75 static struct kmem_cache *openowner_slab = NULL;
76 static struct kmem_cache *lockowner_slab = NULL;
77 static struct kmem_cache *file_slab = NULL;
78 static struct kmem_cache *stateid_slab = NULL;
79 static struct kmem_cache *deleg_slab = NULL;
80
81 void
82 nfs4_lock_state(void)
83 {
84         mutex_lock(&client_mutex);
85 }
86
87 void
88 nfs4_unlock_state(void)
89 {
90         mutex_unlock(&client_mutex);
91 }
92
93 static inline u32
94 opaque_hashval(const void *ptr, int nbytes)
95 {
96         unsigned char *cptr = (unsigned char *) ptr;
97
98         u32 x = 0;
99         while (nbytes--) {
100                 x *= 37;
101                 x += *cptr++;
102         }
103         return x;
104 }
105
106 static struct list_head del_recall_lru;
107
108 static inline void
109 put_nfs4_file(struct nfs4_file *fi)
110 {
111         if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
112                 list_del(&fi->fi_hash);
113                 spin_unlock(&recall_lock);
114                 iput(fi->fi_inode);
115                 kmem_cache_free(file_slab, fi);
116         }
117 }
118
119 static inline void
120 get_nfs4_file(struct nfs4_file *fi)
121 {
122         atomic_inc(&fi->fi_ref);
123 }
124
125 static int num_delegations;
126 unsigned int max_delegations;
127
128 /*
129  * Open owner state (share locks)
130  */
131
132 /* hash tables for open owners */
133 #define OPEN_OWNER_HASH_BITS              8
134 #define OPEN_OWNER_HASH_SIZE             (1 << OPEN_OWNER_HASH_BITS)
135 #define OPEN_OWNER_HASH_MASK             (OPEN_OWNER_HASH_SIZE - 1)
136
137 static unsigned int open_ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
138 {
139         unsigned int ret;
140
141         ret = opaque_hashval(ownername->data, ownername->len);
142         ret += clientid;
143         return ret & OPEN_OWNER_HASH_MASK;
144 }
145
146 static struct list_head open_ownerstr_hashtbl[OPEN_OWNER_HASH_SIZE];
147
148 /* hash table for nfs4_file */
149 #define FILE_HASH_BITS                   8
150 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
151
152 /* hash table for (open)nfs4_ol_stateid */
153 #define STATEID_HASH_BITS              10
154 #define STATEID_HASH_SIZE              (1 << STATEID_HASH_BITS)
155 #define STATEID_HASH_MASK              (STATEID_HASH_SIZE - 1)
156
157 static unsigned int file_hashval(struct inode *ino)
158 {
159         /* XXX: why are we hashing on inode pointer, anyway? */
160         return hash_ptr(ino, FILE_HASH_BITS);
161 }
162
163 static unsigned int stateid_hashval(stateid_t *s)
164 {
165         return opaque_hashval(&s->si_opaque, sizeof(stateid_opaque_t)) & STATEID_HASH_MASK;
166 }
167
168 static struct list_head file_hashtbl[FILE_HASH_SIZE];
169 static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
170
171 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
172 {
173         BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
174         atomic_inc(&fp->fi_access[oflag]);
175 }
176
177 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
178 {
179         if (oflag == O_RDWR) {
180                 __nfs4_file_get_access(fp, O_RDONLY);
181                 __nfs4_file_get_access(fp, O_WRONLY);
182         } else
183                 __nfs4_file_get_access(fp, oflag);
184 }
185
186 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
187 {
188         if (fp->fi_fds[oflag]) {
189                 fput(fp->fi_fds[oflag]);
190                 fp->fi_fds[oflag] = NULL;
191         }
192 }
193
194 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
195 {
196         if (atomic_dec_and_test(&fp->fi_access[oflag])) {
197                 nfs4_file_put_fd(fp, O_RDWR);
198                 nfs4_file_put_fd(fp, oflag);
199         }
200 }
201
202 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
203 {
204         if (oflag == O_RDWR) {
205                 __nfs4_file_put_access(fp, O_RDONLY);
206                 __nfs4_file_put_access(fp, O_WRONLY);
207         } else
208                 __nfs4_file_put_access(fp, oflag);
209 }
210
211 static inline void hash_stid(struct nfs4_stid *stid)
212 {
213         stateid_t *s = &stid->sc_stateid;
214         unsigned int hashval;
215
216         hashval = stateid_hashval(s);
217         list_add(&stid->sc_hash, &stateid_hashtbl[hashval]);
218 }
219
220 static struct nfs4_delegation *
221 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
222 {
223         struct nfs4_delegation *dp;
224         struct nfs4_file *fp = stp->st_file;
225
226         dprintk("NFSD alloc_init_deleg\n");
227         /*
228          * Major work on the lease subsystem (for example, to support
229          * calbacks on stat) will be required before we can support
230          * write delegations properly.
231          */
232         if (type != NFS4_OPEN_DELEGATE_READ)
233                 return NULL;
234         if (fp->fi_had_conflict)
235                 return NULL;
236         if (num_delegations > max_delegations)
237                 return NULL;
238         dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
239         if (dp == NULL)
240                 return dp;
241         num_delegations++;
242         INIT_LIST_HEAD(&dp->dl_perfile);
243         INIT_LIST_HEAD(&dp->dl_perclnt);
244         INIT_LIST_HEAD(&dp->dl_recall_lru);
245         dp->dl_client = clp;
246         get_nfs4_file(fp);
247         dp->dl_file = fp;
248         dp->dl_type = type;
249         dp->dl_stid.sc_type = NFS4_DELEG_STID;
250         dp->dl_stid.sc_stateid.si_opaque.so_clid = clp->cl_clientid;
251         dp->dl_stid.sc_stateid.si_opaque.so_id = current_stateid++;
252         dp->dl_stid.sc_stateid.si_generation = 1;
253         hash_stid(&dp->dl_stid);
254         fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
255         dp->dl_time = 0;
256         atomic_set(&dp->dl_count, 1);
257         INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
258         return dp;
259 }
260
261 void
262 nfs4_put_delegation(struct nfs4_delegation *dp)
263 {
264         if (atomic_dec_and_test(&dp->dl_count)) {
265                 dprintk("NFSD: freeing dp %p\n",dp);
266                 put_nfs4_file(dp->dl_file);
267                 kmem_cache_free(deleg_slab, dp);
268                 num_delegations--;
269         }
270 }
271
272 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
273 {
274         if (atomic_dec_and_test(&fp->fi_delegees)) {
275                 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
276                 fp->fi_lease = NULL;
277                 fput(fp->fi_deleg_file);
278                 fp->fi_deleg_file = NULL;
279         }
280 }
281
282 /* Called under the state lock. */
283 static void
284 unhash_delegation(struct nfs4_delegation *dp)
285 {
286         list_del_init(&dp->dl_stid.sc_hash);
287         list_del_init(&dp->dl_perclnt);
288         spin_lock(&recall_lock);
289         list_del_init(&dp->dl_perfile);
290         list_del_init(&dp->dl_recall_lru);
291         spin_unlock(&recall_lock);
292         nfs4_put_deleg_lease(dp->dl_file);
293         nfs4_put_delegation(dp);
294 }
295
296 /* 
297  * SETCLIENTID state 
298  */
299
300 /* client_lock protects the client lru list and session hash table */
301 static DEFINE_SPINLOCK(client_lock);
302
303 /* Hash tables for nfs4_clientid state */
304 #define CLIENT_HASH_BITS                 4
305 #define CLIENT_HASH_SIZE                (1 << CLIENT_HASH_BITS)
306 #define CLIENT_HASH_MASK                (CLIENT_HASH_SIZE - 1)
307
308 static unsigned int clientid_hashval(u32 id)
309 {
310         return id & CLIENT_HASH_MASK;
311 }
312
313 static unsigned int clientstr_hashval(const char *name)
314 {
315         return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
316 }
317
318 /*
319  * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
320  * used in reboot/reset lease grace period processing
321  *
322  * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
323  * setclientid_confirmed info. 
324  *
325  * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 
326  * setclientid info.
327  *
328  * client_lru holds client queue ordered by nfs4_client.cl_time
329  * for lease renewal.
330  *
331  * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
332  * for last close replay.
333  */
334 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
335 static int reclaim_str_hashtbl_size = 0;
336 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
337 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
338 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
339 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
340 static struct list_head client_lru;
341 static struct list_head close_lru;
342
343 /*
344  * We store the NONE, READ, WRITE, and BOTH bits separately in the
345  * st_{access,deny}_bmap field of the stateid, in order to track not
346  * only what share bits are currently in force, but also what
347  * combinations of share bits previous opens have used.  This allows us
348  * to enforce the recommendation of rfc 3530 14.2.19 that the server
349  * return an error if the client attempt to downgrade to a combination
350  * of share bits not explicable by closing some of its previous opens.
351  *
352  * XXX: This enforcement is actually incomplete, since we don't keep
353  * track of access/deny bit combinations; so, e.g., we allow:
354  *
355  *      OPEN allow read, deny write
356  *      OPEN allow both, deny none
357  *      DOWNGRADE allow read, deny none
358  *
359  * which we should reject.
360  */
361 static void
362 set_access(unsigned int *access, unsigned long bmap) {
363         int i;
364
365         *access = 0;
366         for (i = 1; i < 4; i++) {
367                 if (test_bit(i, &bmap))
368                         *access |= i;
369         }
370 }
371
372 static void
373 set_deny(unsigned int *deny, unsigned long bmap) {
374         int i;
375
376         *deny = 0;
377         for (i = 0; i < 4; i++) {
378                 if (test_bit(i, &bmap))
379                         *deny |= i ;
380         }
381 }
382
383 static int
384 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
385         unsigned int access, deny;
386
387         set_access(&access, stp->st_access_bmap);
388         set_deny(&deny, stp->st_deny_bmap);
389         if ((access & open->op_share_deny) || (deny & open->op_share_access))
390                 return 0;
391         return 1;
392 }
393
394 static int nfs4_access_to_omode(u32 access)
395 {
396         switch (access & NFS4_SHARE_ACCESS_BOTH) {
397         case NFS4_SHARE_ACCESS_READ:
398                 return O_RDONLY;
399         case NFS4_SHARE_ACCESS_WRITE:
400                 return O_WRONLY;
401         case NFS4_SHARE_ACCESS_BOTH:
402                 return O_RDWR;
403         }
404         BUG();
405 }
406
407 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
408 {
409         list_del(&stp->st_perfile);
410         list_del(&stp->st_perstateowner);
411 }
412
413 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
414 {
415         int i;
416
417         if (stp->st_access_bmap) {
418                 for (i = 1; i < 4; i++) {
419                         if (test_bit(i, &stp->st_access_bmap))
420                                 nfs4_file_put_access(stp->st_file,
421                                                 nfs4_access_to_omode(i));
422                         __clear_bit(i, &stp->st_access_bmap);
423                 }
424         }
425         put_nfs4_file(stp->st_file);
426         stp->st_file = NULL;
427 }
428
429 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
430 {
431         kmem_cache_free(stateid_slab, stp);
432 }
433
434 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
435 {
436         struct file *file;
437
438         unhash_generic_stateid(stp);
439         list_del(&stp->st_stid.sc_hash);
440         file = find_any_file(stp->st_file);
441         if (file)
442                 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
443         close_generic_stateid(stp);
444         free_generic_stateid(stp);
445 }
446
447 static void unhash_lockowner(struct nfs4_lockowner *lo)
448 {
449         struct nfs4_ol_stateid *stp;
450
451         list_del(&lo->lo_owner.so_strhash);
452         list_del(&lo->lo_perstateid);
453         while (!list_empty(&lo->lo_owner.so_stateids)) {
454                 stp = list_first_entry(&lo->lo_owner.so_stateids,
455                                 struct nfs4_ol_stateid, st_perstateowner);
456                 release_lock_stateid(stp);
457         }
458 }
459
460 static void release_lockowner(struct nfs4_lockowner *lo)
461 {
462         unhash_lockowner(lo);
463         nfs4_free_lockowner(lo);
464 }
465
466 static void
467 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
468 {
469         struct nfs4_lockowner *lo;
470
471         while (!list_empty(&open_stp->st_lockowners)) {
472                 lo = list_entry(open_stp->st_lockowners.next,
473                                 struct nfs4_lockowner, lo_perstateid);
474                 release_lockowner(lo);
475         }
476 }
477
478 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
479 {
480         unhash_generic_stateid(stp);
481         release_stateid_lockowners(stp);
482         close_generic_stateid(stp);
483 }
484
485 static void release_open_stateid(struct nfs4_ol_stateid *stp)
486 {
487         unhash_open_stateid(stp);
488         list_del(&stp->st_stid.sc_hash);
489         free_generic_stateid(stp);
490 }
491
492 static void unhash_openowner(struct nfs4_openowner *oo)
493 {
494         struct nfs4_ol_stateid *stp;
495
496         list_del(&oo->oo_owner.so_strhash);
497         list_del(&oo->oo_perclient);
498         while (!list_empty(&oo->oo_owner.so_stateids)) {
499                 stp = list_first_entry(&oo->oo_owner.so_stateids,
500                                 struct nfs4_ol_stateid, st_perstateowner);
501                 release_open_stateid(stp);
502         }
503 }
504
505 static void release_last_closed_stateid(struct nfs4_openowner *oo)
506 {
507         struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
508
509         if (s) {
510                 list_del_init(&s->st_stid.sc_hash);
511                 free_generic_stateid(s);
512                 oo->oo_last_closed_stid = NULL;
513         }
514 }
515
516 static void release_openowner(struct nfs4_openowner *oo)
517 {
518         unhash_openowner(oo);
519         list_del(&oo->oo_close_lru);
520         release_last_closed_stateid(oo);
521         nfs4_free_openowner(oo);
522 }
523
524 #define SESSION_HASH_SIZE       512
525 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
526
527 static inline int
528 hash_sessionid(struct nfs4_sessionid *sessionid)
529 {
530         struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
531
532         return sid->sequence % SESSION_HASH_SIZE;
533 }
534
535 static inline void
536 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
537 {
538         u32 *ptr = (u32 *)(&sessionid->data[0]);
539         dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
540 }
541
542 static void
543 gen_sessionid(struct nfsd4_session *ses)
544 {
545         struct nfs4_client *clp = ses->se_client;
546         struct nfsd4_sessionid *sid;
547
548         sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
549         sid->clientid = clp->cl_clientid;
550         sid->sequence = current_sessionid++;
551         sid->reserved = 0;
552 }
553
554 /*
555  * The protocol defines ca_maxresponssize_cached to include the size of
556  * the rpc header, but all we need to cache is the data starting after
557  * the end of the initial SEQUENCE operation--the rest we regenerate
558  * each time.  Therefore we can advertise a ca_maxresponssize_cached
559  * value that is the number of bytes in our cache plus a few additional
560  * bytes.  In order to stay on the safe side, and not promise more than
561  * we can cache, those additional bytes must be the minimum possible: 24
562  * bytes of rpc header (xid through accept state, with AUTH_NULL
563  * verifier), 12 for the compound header (with zero-length tag), and 44
564  * for the SEQUENCE op response:
565  */
566 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
567
568 static void
569 free_session_slots(struct nfsd4_session *ses)
570 {
571         int i;
572
573         for (i = 0; i < ses->se_fchannel.maxreqs; i++)
574                 kfree(ses->se_slots[i]);
575 }
576
577 /*
578  * We don't actually need to cache the rpc and session headers, so we
579  * can allocate a little less for each slot:
580  */
581 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
582 {
583         return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
584 }
585
586 static int nfsd4_sanitize_slot_size(u32 size)
587 {
588         size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
589         size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
590
591         return size;
592 }
593
594 /*
595  * XXX: If we run out of reserved DRC memory we could (up to a point)
596  * re-negotiate active sessions and reduce their slot usage to make
597  * rooom for new connections. For now we just fail the create session.
598  */
599 static int nfsd4_get_drc_mem(int slotsize, u32 num)
600 {
601         int avail;
602
603         num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
604
605         spin_lock(&nfsd_drc_lock);
606         avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
607                         nfsd_drc_max_mem - nfsd_drc_mem_used);
608         num = min_t(int, num, avail / slotsize);
609         nfsd_drc_mem_used += num * slotsize;
610         spin_unlock(&nfsd_drc_lock);
611
612         return num;
613 }
614
615 static void nfsd4_put_drc_mem(int slotsize, int num)
616 {
617         spin_lock(&nfsd_drc_lock);
618         nfsd_drc_mem_used -= slotsize * num;
619         spin_unlock(&nfsd_drc_lock);
620 }
621
622 static struct nfsd4_session *alloc_session(int slotsize, int numslots)
623 {
624         struct nfsd4_session *new;
625         int mem, i;
626
627         BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
628                         + sizeof(struct nfsd4_session) > PAGE_SIZE);
629         mem = numslots * sizeof(struct nfsd4_slot *);
630
631         new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
632         if (!new)
633                 return NULL;
634         /* allocate each struct nfsd4_slot and data cache in one piece */
635         for (i = 0; i < numslots; i++) {
636                 mem = sizeof(struct nfsd4_slot) + slotsize;
637                 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
638                 if (!new->se_slots[i])
639                         goto out_free;
640         }
641         return new;
642 out_free:
643         while (i--)
644                 kfree(new->se_slots[i]);
645         kfree(new);
646         return NULL;
647 }
648
649 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
650 {
651         u32 maxrpc = nfsd_serv->sv_max_mesg;
652
653         new->maxreqs = numslots;
654         new->maxresp_cached = min_t(u32, req->maxresp_cached,
655                                         slotsize + NFSD_MIN_HDR_SEQ_SZ);
656         new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
657         new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
658         new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
659 }
660
661 static void free_conn(struct nfsd4_conn *c)
662 {
663         svc_xprt_put(c->cn_xprt);
664         kfree(c);
665 }
666
667 static void nfsd4_conn_lost(struct svc_xpt_user *u)
668 {
669         struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
670         struct nfs4_client *clp = c->cn_session->se_client;
671
672         spin_lock(&clp->cl_lock);
673         if (!list_empty(&c->cn_persession)) {
674                 list_del(&c->cn_persession);
675                 free_conn(c);
676         }
677         spin_unlock(&clp->cl_lock);
678         nfsd4_probe_callback(clp);
679 }
680
681 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
682 {
683         struct nfsd4_conn *conn;
684
685         conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
686         if (!conn)
687                 return NULL;
688         svc_xprt_get(rqstp->rq_xprt);
689         conn->cn_xprt = rqstp->rq_xprt;
690         conn->cn_flags = flags;
691         INIT_LIST_HEAD(&conn->cn_xpt_user.list);
692         return conn;
693 }
694
695 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
696 {
697         conn->cn_session = ses;
698         list_add(&conn->cn_persession, &ses->se_conns);
699 }
700
701 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
702 {
703         struct nfs4_client *clp = ses->se_client;
704
705         spin_lock(&clp->cl_lock);
706         __nfsd4_hash_conn(conn, ses);
707         spin_unlock(&clp->cl_lock);
708 }
709
710 static int nfsd4_register_conn(struct nfsd4_conn *conn)
711 {
712         conn->cn_xpt_user.callback = nfsd4_conn_lost;
713         return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
714 }
715
716 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
717 {
718         struct nfsd4_conn *conn;
719         int ret;
720
721         conn = alloc_conn(rqstp, dir);
722         if (!conn)
723                 return nfserr_jukebox;
724         nfsd4_hash_conn(conn, ses);
725         ret = nfsd4_register_conn(conn);
726         if (ret)
727                 /* oops; xprt is already down: */
728                 nfsd4_conn_lost(&conn->cn_xpt_user);
729         return nfs_ok;
730 }
731
732 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
733 {
734         u32 dir = NFS4_CDFC4_FORE;
735
736         if (ses->se_flags & SESSION4_BACK_CHAN)
737                 dir |= NFS4_CDFC4_BACK;
738
739         return nfsd4_new_conn(rqstp, ses, dir);
740 }
741
742 /* must be called under client_lock */
743 static void nfsd4_del_conns(struct nfsd4_session *s)
744 {
745         struct nfs4_client *clp = s->se_client;
746         struct nfsd4_conn *c;
747
748         spin_lock(&clp->cl_lock);
749         while (!list_empty(&s->se_conns)) {
750                 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
751                 list_del_init(&c->cn_persession);
752                 spin_unlock(&clp->cl_lock);
753
754                 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
755                 free_conn(c);
756
757                 spin_lock(&clp->cl_lock);
758         }
759         spin_unlock(&clp->cl_lock);
760 }
761
762 void free_session(struct kref *kref)
763 {
764         struct nfsd4_session *ses;
765         int mem;
766
767         ses = container_of(kref, struct nfsd4_session, se_ref);
768         nfsd4_del_conns(ses);
769         spin_lock(&nfsd_drc_lock);
770         mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
771         nfsd_drc_mem_used -= mem;
772         spin_unlock(&nfsd_drc_lock);
773         free_session_slots(ses);
774         kfree(ses);
775 }
776
777 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
778 {
779         struct nfsd4_session *new;
780         struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
781         int numslots, slotsize;
782         int status;
783         int idx;
784
785         /*
786          * Note decreasing slot size below client's request may
787          * make it difficult for client to function correctly, whereas
788          * decreasing the number of slots will (just?) affect
789          * performance.  When short on memory we therefore prefer to
790          * decrease number of slots instead of their size.
791          */
792         slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
793         numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
794         if (numslots < 1)
795                 return NULL;
796
797         new = alloc_session(slotsize, numslots);
798         if (!new) {
799                 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
800                 return NULL;
801         }
802         init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
803
804         new->se_client = clp;
805         gen_sessionid(new);
806
807         INIT_LIST_HEAD(&new->se_conns);
808
809         new->se_cb_seq_nr = 1;
810         new->se_flags = cses->flags;
811         new->se_cb_prog = cses->callback_prog;
812         kref_init(&new->se_ref);
813         idx = hash_sessionid(&new->se_sessionid);
814         spin_lock(&client_lock);
815         list_add(&new->se_hash, &sessionid_hashtbl[idx]);
816         spin_lock(&clp->cl_lock);
817         list_add(&new->se_perclnt, &clp->cl_sessions);
818         spin_unlock(&clp->cl_lock);
819         spin_unlock(&client_lock);
820
821         status = nfsd4_new_conn_from_crses(rqstp, new);
822         /* whoops: benny points out, status is ignored! (err, or bogus) */
823         if (status) {
824                 free_session(&new->se_ref);
825                 return NULL;
826         }
827         if (cses->flags & SESSION4_BACK_CHAN) {
828                 struct sockaddr *sa = svc_addr(rqstp);
829                 /*
830                  * This is a little silly; with sessions there's no real
831                  * use for the callback address.  Use the peer address
832                  * as a reasonable default for now, but consider fixing
833                  * the rpc client not to require an address in the
834                  * future:
835                  */
836                 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
837                 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
838         }
839         nfsd4_probe_callback(clp);
840         return new;
841 }
842
843 /* caller must hold client_lock */
844 static struct nfsd4_session *
845 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
846 {
847         struct nfsd4_session *elem;
848         int idx;
849
850         dump_sessionid(__func__, sessionid);
851         idx = hash_sessionid(sessionid);
852         /* Search in the appropriate list */
853         list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
854                 if (!memcmp(elem->se_sessionid.data, sessionid->data,
855                             NFS4_MAX_SESSIONID_LEN)) {
856                         return elem;
857                 }
858         }
859
860         dprintk("%s: session not found\n", __func__);
861         return NULL;
862 }
863
864 /* caller must hold client_lock */
865 static void
866 unhash_session(struct nfsd4_session *ses)
867 {
868         list_del(&ses->se_hash);
869         spin_lock(&ses->se_client->cl_lock);
870         list_del(&ses->se_perclnt);
871         spin_unlock(&ses->se_client->cl_lock);
872 }
873
874 /* must be called under the client_lock */
875 static inline void
876 renew_client_locked(struct nfs4_client *clp)
877 {
878         if (is_client_expired(clp)) {
879                 dprintk("%s: client (clientid %08x/%08x) already expired\n",
880                         __func__,
881                         clp->cl_clientid.cl_boot,
882                         clp->cl_clientid.cl_id);
883                 return;
884         }
885
886         /*
887         * Move client to the end to the LRU list.
888         */
889         dprintk("renewing client (clientid %08x/%08x)\n", 
890                         clp->cl_clientid.cl_boot, 
891                         clp->cl_clientid.cl_id);
892         list_move_tail(&clp->cl_lru, &client_lru);
893         clp->cl_time = get_seconds();
894 }
895
896 static inline void
897 renew_client(struct nfs4_client *clp)
898 {
899         spin_lock(&client_lock);
900         renew_client_locked(clp);
901         spin_unlock(&client_lock);
902 }
903
904 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
905 static int
906 STALE_CLIENTID(clientid_t *clid)
907 {
908         if (clid->cl_boot == boot_time)
909                 return 0;
910         dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
911                 clid->cl_boot, clid->cl_id, boot_time);
912         return 1;
913 }
914
915 /* 
916  * XXX Should we use a slab cache ?
917  * This type of memory management is somewhat inefficient, but we use it
918  * anyway since SETCLIENTID is not a common operation.
919  */
920 static struct nfs4_client *alloc_client(struct xdr_netobj name)
921 {
922         struct nfs4_client *clp;
923
924         clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
925         if (clp == NULL)
926                 return NULL;
927         clp->cl_name.data = kmalloc(name.len, GFP_KERNEL);
928         if (clp->cl_name.data == NULL) {
929                 kfree(clp);
930                 return NULL;
931         }
932         memcpy(clp->cl_name.data, name.data, name.len);
933         clp->cl_name.len = name.len;
934         return clp;
935 }
936
937 static inline void
938 free_client(struct nfs4_client *clp)
939 {
940         while (!list_empty(&clp->cl_sessions)) {
941                 struct nfsd4_session *ses;
942                 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
943                                 se_perclnt);
944                 list_del(&ses->se_perclnt);
945                 nfsd4_put_session(ses);
946         }
947         if (clp->cl_cred.cr_group_info)
948                 put_group_info(clp->cl_cred.cr_group_info);
949         kfree(clp->cl_principal);
950         kfree(clp->cl_name.data);
951         kfree(clp);
952 }
953
954 void
955 release_session_client(struct nfsd4_session *session)
956 {
957         struct nfs4_client *clp = session->se_client;
958
959         if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
960                 return;
961         if (is_client_expired(clp)) {
962                 free_client(clp);
963                 session->se_client = NULL;
964         } else
965                 renew_client_locked(clp);
966         spin_unlock(&client_lock);
967 }
968
969 /* must be called under the client_lock */
970 static inline void
971 unhash_client_locked(struct nfs4_client *clp)
972 {
973         struct nfsd4_session *ses;
974
975         mark_client_expired(clp);
976         list_del(&clp->cl_lru);
977         spin_lock(&clp->cl_lock);
978         list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
979                 list_del_init(&ses->se_hash);
980         spin_unlock(&clp->cl_lock);
981 }
982
983 static void
984 expire_client(struct nfs4_client *clp)
985 {
986         struct nfs4_openowner *oo;
987         struct nfs4_delegation *dp;
988         struct list_head reaplist;
989
990         INIT_LIST_HEAD(&reaplist);
991         spin_lock(&recall_lock);
992         while (!list_empty(&clp->cl_delegations)) {
993                 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
994                 list_del_init(&dp->dl_perclnt);
995                 list_move(&dp->dl_recall_lru, &reaplist);
996         }
997         spin_unlock(&recall_lock);
998         while (!list_empty(&reaplist)) {
999                 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1000                 list_del_init(&dp->dl_recall_lru);
1001                 unhash_delegation(dp);
1002         }
1003         while (!list_empty(&clp->cl_openowners)) {
1004                 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1005                 release_openowner(oo);
1006         }
1007         nfsd4_shutdown_callback(clp);
1008         if (clp->cl_cb_conn.cb_xprt)
1009                 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1010         list_del(&clp->cl_idhash);
1011         list_del(&clp->cl_strhash);
1012         spin_lock(&client_lock);
1013         unhash_client_locked(clp);
1014         if (atomic_read(&clp->cl_refcount) == 0)
1015                 free_client(clp);
1016         spin_unlock(&client_lock);
1017 }
1018
1019 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1020 {
1021         memcpy(target->cl_verifier.data, source->data,
1022                         sizeof(target->cl_verifier.data));
1023 }
1024
1025 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1026 {
1027         target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
1028         target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
1029 }
1030
1031 static void copy_cred(struct svc_cred *target, struct svc_cred *source)
1032 {
1033         target->cr_uid = source->cr_uid;
1034         target->cr_gid = source->cr_gid;
1035         target->cr_group_info = source->cr_group_info;
1036         get_group_info(target->cr_group_info);
1037 }
1038
1039 static int same_name(const char *n1, const char *n2)
1040 {
1041         return 0 == memcmp(n1, n2, HEXDIR_LEN);
1042 }
1043
1044 static int
1045 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1046 {
1047         return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1048 }
1049
1050 static int
1051 same_clid(clientid_t *cl1, clientid_t *cl2)
1052 {
1053         return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1054 }
1055
1056 /* XXX what about NGROUP */
1057 static int
1058 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1059 {
1060         return cr1->cr_uid == cr2->cr_uid;
1061 }
1062
1063 static void gen_clid(struct nfs4_client *clp)
1064 {
1065         static u32 current_clientid = 1;
1066
1067         clp->cl_clientid.cl_boot = boot_time;
1068         clp->cl_clientid.cl_id = current_clientid++; 
1069 }
1070
1071 static void gen_confirm(struct nfs4_client *clp)
1072 {
1073         static u32 i;
1074         u32 *p;
1075
1076         p = (u32 *)clp->cl_confirm.data;
1077         *p++ = get_seconds();
1078         *p++ = i++;
1079 }
1080
1081 static int
1082 same_stateid(stateid_t *id_one, stateid_t *id_two)
1083 {
1084         return 0 == memcmp(&id_one->si_opaque, &id_two->si_opaque,
1085                                         sizeof(stateid_opaque_t));
1086 }
1087
1088 static struct nfs4_stid *find_stateid(stateid_t *t)
1089 {
1090         struct nfs4_stid *s;
1091         unsigned int hashval;
1092
1093         hashval = stateid_hashval(t);
1094         list_for_each_entry(s, &stateid_hashtbl[hashval], sc_hash)
1095                 if (same_stateid(&s->sc_stateid, t))
1096                         return s;
1097         return NULL;
1098 }
1099
1100 static struct nfs4_stid *find_stateid_by_type(stateid_t *t, char typemask)
1101 {
1102         struct nfs4_stid *s;
1103
1104         s = find_stateid(t);
1105         if (!s)
1106                 return NULL;
1107         if (typemask & s->sc_type)
1108                 return s;
1109         return NULL;
1110 }
1111
1112 static struct nfs4_ol_stateid *find_ol_stateid_by_type(stateid_t *t, char typemask)
1113 {
1114         struct nfs4_stid *s;
1115
1116         s = find_stateid_by_type(t, typemask);
1117         if (!s)
1118                 return NULL;
1119         return openlockstateid(s);
1120 }
1121
1122 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1123                 struct svc_rqst *rqstp, nfs4_verifier *verf)
1124 {
1125         struct nfs4_client *clp;
1126         struct sockaddr *sa = svc_addr(rqstp);
1127         char *princ;
1128
1129         clp = alloc_client(name);
1130         if (clp == NULL)
1131                 return NULL;
1132
1133         INIT_LIST_HEAD(&clp->cl_sessions);
1134
1135         princ = svc_gss_principal(rqstp);
1136         if (princ) {
1137                 clp->cl_principal = kstrdup(princ, GFP_KERNEL);
1138                 if (clp->cl_principal == NULL) {
1139                         free_client(clp);
1140                         return NULL;
1141                 }
1142         }
1143
1144         memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1145         atomic_set(&clp->cl_refcount, 0);
1146         clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1147         INIT_LIST_HEAD(&clp->cl_idhash);
1148         INIT_LIST_HEAD(&clp->cl_strhash);
1149         INIT_LIST_HEAD(&clp->cl_openowners);
1150         INIT_LIST_HEAD(&clp->cl_delegations);
1151         INIT_LIST_HEAD(&clp->cl_lru);
1152         INIT_LIST_HEAD(&clp->cl_callbacks);
1153         spin_lock_init(&clp->cl_lock);
1154         INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
1155         clp->cl_time = get_seconds();
1156         clear_bit(0, &clp->cl_cb_slot_busy);
1157         rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1158         copy_verf(clp, verf);
1159         rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1160         clp->cl_flavor = rqstp->rq_flavor;
1161         copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1162         gen_confirm(clp);
1163         clp->cl_cb_session = NULL;
1164         return clp;
1165 }
1166
1167 static int check_name(struct xdr_netobj name)
1168 {
1169         if (name.len == 0) 
1170                 return 0;
1171         if (name.len > NFS4_OPAQUE_LIMIT) {
1172                 dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
1173                 return 0;
1174         }
1175         return 1;
1176 }
1177
1178 static void
1179 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
1180 {
1181         unsigned int idhashval;
1182
1183         list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
1184         idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1185         list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
1186         renew_client(clp);
1187 }
1188
1189 static void
1190 move_to_confirmed(struct nfs4_client *clp)
1191 {
1192         unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1193         unsigned int strhashval;
1194
1195         dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1196         list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
1197         strhashval = clientstr_hashval(clp->cl_recdir);
1198         list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
1199         renew_client(clp);
1200 }
1201
1202 static struct nfs4_client *
1203 find_confirmed_client(clientid_t *clid)
1204 {
1205         struct nfs4_client *clp;
1206         unsigned int idhashval = clientid_hashval(clid->cl_id);
1207
1208         list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
1209                 if (same_clid(&clp->cl_clientid, clid))
1210                         return clp;
1211         }
1212         return NULL;
1213 }
1214
1215 static struct nfs4_client *
1216 find_unconfirmed_client(clientid_t *clid)
1217 {
1218         struct nfs4_client *clp;
1219         unsigned int idhashval = clientid_hashval(clid->cl_id);
1220
1221         list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
1222                 if (same_clid(&clp->cl_clientid, clid))
1223                         return clp;
1224         }
1225         return NULL;
1226 }
1227
1228 static bool clp_used_exchangeid(struct nfs4_client *clp)
1229 {
1230         return clp->cl_exchange_flags != 0;
1231
1232
1233 static struct nfs4_client *
1234 find_confirmed_client_by_str(const char *dname, unsigned int hashval)
1235 {
1236         struct nfs4_client *clp;
1237
1238         list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
1239                 if (same_name(clp->cl_recdir, dname))
1240                         return clp;
1241         }
1242         return NULL;
1243 }
1244
1245 static struct nfs4_client *
1246 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
1247 {
1248         struct nfs4_client *clp;
1249
1250         list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
1251                 if (same_name(clp->cl_recdir, dname))
1252                         return clp;
1253         }
1254         return NULL;
1255 }
1256
1257 static void
1258 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1259 {
1260         struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1261         struct sockaddr *sa = svc_addr(rqstp);
1262         u32 scopeid = rpc_get_scope_id(sa);
1263         unsigned short expected_family;
1264
1265         /* Currently, we only support tcp and tcp6 for the callback channel */
1266         if (se->se_callback_netid_len == 3 &&
1267             !memcmp(se->se_callback_netid_val, "tcp", 3))
1268                 expected_family = AF_INET;
1269         else if (se->se_callback_netid_len == 4 &&
1270                  !memcmp(se->se_callback_netid_val, "tcp6", 4))
1271                 expected_family = AF_INET6;
1272         else
1273                 goto out_err;
1274
1275         conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
1276                                             se->se_callback_addr_len,
1277                                             (struct sockaddr *)&conn->cb_addr,
1278                                             sizeof(conn->cb_addr));
1279
1280         if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1281                 goto out_err;
1282
1283         if (conn->cb_addr.ss_family == AF_INET6)
1284                 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1285
1286         conn->cb_prog = se->se_callback_prog;
1287         conn->cb_ident = se->se_callback_ident;
1288         memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1289         return;
1290 out_err:
1291         conn->cb_addr.ss_family = AF_UNSPEC;
1292         conn->cb_addrlen = 0;
1293         dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1294                 "will not receive delegations\n",
1295                 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1296
1297         return;
1298 }
1299
1300 /*
1301  * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1302  */
1303 void
1304 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1305 {
1306         struct nfsd4_slot *slot = resp->cstate.slot;
1307         unsigned int base;
1308
1309         dprintk("--> %s slot %p\n", __func__, slot);
1310
1311         slot->sl_opcnt = resp->opcnt;
1312         slot->sl_status = resp->cstate.status;
1313
1314         if (nfsd4_not_cached(resp)) {
1315                 slot->sl_datalen = 0;
1316                 return;
1317         }
1318         slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1319         base = (char *)resp->cstate.datap -
1320                                         (char *)resp->xbuf->head[0].iov_base;
1321         if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1322                                     slot->sl_datalen))
1323                 WARN("%s: sessions DRC could not cache compound\n", __func__);
1324         return;
1325 }
1326
1327 /*
1328  * Encode the replay sequence operation from the slot values.
1329  * If cachethis is FALSE encode the uncached rep error on the next
1330  * operation which sets resp->p and increments resp->opcnt for
1331  * nfs4svc_encode_compoundres.
1332  *
1333  */
1334 static __be32
1335 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1336                           struct nfsd4_compoundres *resp)
1337 {
1338         struct nfsd4_op *op;
1339         struct nfsd4_slot *slot = resp->cstate.slot;
1340
1341         dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1342                 resp->opcnt, resp->cstate.slot->sl_cachethis);
1343
1344         /* Encode the replayed sequence operation */
1345         op = &args->ops[resp->opcnt - 1];
1346         nfsd4_encode_operation(resp, op);
1347
1348         /* Return nfserr_retry_uncached_rep in next operation. */
1349         if (args->opcnt > 1 && slot->sl_cachethis == 0) {
1350                 op = &args->ops[resp->opcnt++];
1351                 op->status = nfserr_retry_uncached_rep;
1352                 nfsd4_encode_operation(resp, op);
1353         }
1354         return op->status;
1355 }
1356
1357 /*
1358  * The sequence operation is not cached because we can use the slot and
1359  * session values.
1360  */
1361 __be32
1362 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1363                          struct nfsd4_sequence *seq)
1364 {
1365         struct nfsd4_slot *slot = resp->cstate.slot;
1366         __be32 status;
1367
1368         dprintk("--> %s slot %p\n", __func__, slot);
1369
1370         /* Either returns 0 or nfserr_retry_uncached */
1371         status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1372         if (status == nfserr_retry_uncached_rep)
1373                 return status;
1374
1375         /* The sequence operation has been encoded, cstate->datap set. */
1376         memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1377
1378         resp->opcnt = slot->sl_opcnt;
1379         resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1380         status = slot->sl_status;
1381
1382         return status;
1383 }
1384
1385 /*
1386  * Set the exchange_id flags returned by the server.
1387  */
1388 static void
1389 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1390 {
1391         /* pNFS is not supported */
1392         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1393
1394         /* Referrals are supported, Migration is not. */
1395         new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1396
1397         /* set the wire flags to return to client. */
1398         clid->flags = new->cl_exchange_flags;
1399 }
1400
1401 __be32
1402 nfsd4_exchange_id(struct svc_rqst *rqstp,
1403                   struct nfsd4_compound_state *cstate,
1404                   struct nfsd4_exchange_id *exid)
1405 {
1406         struct nfs4_client *unconf, *conf, *new;
1407         int status;
1408         unsigned int            strhashval;
1409         char                    dname[HEXDIR_LEN];
1410         char                    addr_str[INET6_ADDRSTRLEN];
1411         nfs4_verifier           verf = exid->verifier;
1412         struct sockaddr         *sa = svc_addr(rqstp);
1413
1414         rpc_ntop(sa, addr_str, sizeof(addr_str));
1415         dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1416                 "ip_addr=%s flags %x, spa_how %d\n",
1417                 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1418                 addr_str, exid->flags, exid->spa_how);
1419
1420         if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
1421                 return nfserr_inval;
1422
1423         /* Currently only support SP4_NONE */
1424         switch (exid->spa_how) {
1425         case SP4_NONE:
1426                 break;
1427         case SP4_SSV:
1428                 return nfserr_serverfault;
1429         default:
1430                 BUG();                          /* checked by xdr code */
1431         case SP4_MACH_CRED:
1432                 return nfserr_serverfault;      /* no excuse :-/ */
1433         }
1434
1435         status = nfs4_make_rec_clidname(dname, &exid->clname);
1436
1437         if (status)
1438                 goto error;
1439
1440         strhashval = clientstr_hashval(dname);
1441
1442         nfs4_lock_state();
1443         status = nfs_ok;
1444
1445         conf = find_confirmed_client_by_str(dname, strhashval);
1446         if (conf) {
1447                 if (!clp_used_exchangeid(conf)) {
1448                         status = nfserr_clid_inuse; /* XXX: ? */
1449                         goto out;
1450                 }
1451                 if (!same_verf(&verf, &conf->cl_verifier)) {
1452                         /* 18.35.4 case 8 */
1453                         if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1454                                 status = nfserr_not_same;
1455                                 goto out;
1456                         }
1457                         /* Client reboot: destroy old state */
1458                         expire_client(conf);
1459                         goto out_new;
1460                 }
1461                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
1462                         /* 18.35.4 case 9 */
1463                         if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1464                                 status = nfserr_perm;
1465                                 goto out;
1466                         }
1467                         expire_client(conf);
1468                         goto out_new;
1469                 }
1470                 /*
1471                  * Set bit when the owner id and verifier map to an already
1472                  * confirmed client id (18.35.3).
1473                  */
1474                 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1475
1476                 /*
1477                  * Falling into 18.35.4 case 2, possible router replay.
1478                  * Leave confirmed record intact and return same result.
1479                  */
1480                 copy_verf(conf, &verf);
1481                 new = conf;
1482                 goto out_copy;
1483         }
1484
1485         /* 18.35.4 case 7 */
1486         if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1487                 status = nfserr_noent;
1488                 goto out;
1489         }
1490
1491         unconf  = find_unconfirmed_client_by_str(dname, strhashval);
1492         if (unconf) {
1493                 /*
1494                  * Possible retry or client restart.  Per 18.35.4 case 4,
1495                  * a new unconfirmed record should be generated regardless
1496                  * of whether any properties have changed.
1497                  */
1498                 expire_client(unconf);
1499         }
1500
1501 out_new:
1502         /* Normal case */
1503         new = create_client(exid->clname, dname, rqstp, &verf);
1504         if (new == NULL) {
1505                 status = nfserr_jukebox;
1506                 goto out;
1507         }
1508
1509         gen_clid(new);
1510         add_to_unconfirmed(new, strhashval);
1511 out_copy:
1512         exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1513         exid->clientid.cl_id = new->cl_clientid.cl_id;
1514
1515         exid->seqid = 1;
1516         nfsd4_set_ex_flags(new, exid);
1517
1518         dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1519                 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1520         status = nfs_ok;
1521
1522 out:
1523         nfs4_unlock_state();
1524 error:
1525         dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
1526         return status;
1527 }
1528
1529 static int
1530 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1531 {
1532         dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1533                 slot_seqid);
1534
1535         /* The slot is in use, and no response has been sent. */
1536         if (slot_inuse) {
1537                 if (seqid == slot_seqid)
1538                         return nfserr_jukebox;
1539                 else
1540                         return nfserr_seq_misordered;
1541         }
1542         /* Normal */
1543         if (likely(seqid == slot_seqid + 1))
1544                 return nfs_ok;
1545         /* Replay */
1546         if (seqid == slot_seqid)
1547                 return nfserr_replay_cache;
1548         /* Wraparound */
1549         if (seqid == 1 && (slot_seqid + 1) == 0)
1550                 return nfs_ok;
1551         /* Misordered replay or misordered new request */
1552         return nfserr_seq_misordered;
1553 }
1554
1555 /*
1556  * Cache the create session result into the create session single DRC
1557  * slot cache by saving the xdr structure. sl_seqid has been set.
1558  * Do this for solo or embedded create session operations.
1559  */
1560 static void
1561 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1562                            struct nfsd4_clid_slot *slot, int nfserr)
1563 {
1564         slot->sl_status = nfserr;
1565         memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1566 }
1567
1568 static __be32
1569 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1570                             struct nfsd4_clid_slot *slot)
1571 {
1572         memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1573         return slot->sl_status;
1574 }
1575
1576 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1577                         2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1578                         1 +     /* MIN tag is length with zero, only length */ \
1579                         3 +     /* version, opcount, opcode */ \
1580                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1581                                 /* seqid, slotID, slotID, cache */ \
1582                         4 ) * sizeof(__be32))
1583
1584 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1585                         2 +     /* verifier: AUTH_NULL, length 0 */\
1586                         1 +     /* status */ \
1587                         1 +     /* MIN tag is length with zero, only length */ \
1588                         3 +     /* opcount, opcode, opstatus*/ \
1589                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1590                                 /* seqid, slotID, slotID, slotID, status */ \
1591                         5 ) * sizeof(__be32))
1592
1593 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1594 {
1595         return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1596                 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1597 }
1598
1599 __be32
1600 nfsd4_create_session(struct svc_rqst *rqstp,
1601                      struct nfsd4_compound_state *cstate,
1602                      struct nfsd4_create_session *cr_ses)
1603 {
1604         struct sockaddr *sa = svc_addr(rqstp);
1605         struct nfs4_client *conf, *unconf;
1606         struct nfsd4_session *new;
1607         struct nfsd4_clid_slot *cs_slot = NULL;
1608         bool confirm_me = false;
1609         int status = 0;
1610
1611         if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1612                 return nfserr_inval;
1613
1614         nfs4_lock_state();
1615         unconf = find_unconfirmed_client(&cr_ses->clientid);
1616         conf = find_confirmed_client(&cr_ses->clientid);
1617
1618         if (conf) {
1619                 cs_slot = &conf->cl_cs_slot;
1620                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1621                 if (status == nfserr_replay_cache) {
1622                         dprintk("Got a create_session replay! seqid= %d\n",
1623                                 cs_slot->sl_seqid);
1624                         /* Return the cached reply status */
1625                         status = nfsd4_replay_create_session(cr_ses, cs_slot);
1626                         goto out;
1627                 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1628                         status = nfserr_seq_misordered;
1629                         dprintk("Sequence misordered!\n");
1630                         dprintk("Expected seqid= %d but got seqid= %d\n",
1631                                 cs_slot->sl_seqid, cr_ses->seqid);
1632                         goto out;
1633                 }
1634         } else if (unconf) {
1635                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1636                     !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1637                         status = nfserr_clid_inuse;
1638                         goto out;
1639                 }
1640
1641                 cs_slot = &unconf->cl_cs_slot;
1642                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1643                 if (status) {
1644                         /* an unconfirmed replay returns misordered */
1645                         status = nfserr_seq_misordered;
1646                         goto out;
1647                 }
1648
1649                 confirm_me = true;
1650                 conf = unconf;
1651         } else {
1652                 status = nfserr_stale_clientid;
1653                 goto out;
1654         }
1655
1656         /*
1657          * XXX: we should probably set this at creation time, and check
1658          * for consistent minorversion use throughout:
1659          */
1660         conf->cl_minorversion = 1;
1661         /*
1662          * We do not support RDMA or persistent sessions
1663          */
1664         cr_ses->flags &= ~SESSION4_PERSIST;
1665         cr_ses->flags &= ~SESSION4_RDMA;
1666
1667         status = nfserr_toosmall;
1668         if (check_forechannel_attrs(cr_ses->fore_channel))
1669                 goto out;
1670
1671         status = nfserr_jukebox;
1672         new = alloc_init_session(rqstp, conf, cr_ses);
1673         if (!new)
1674                 goto out;
1675         status = nfs_ok;
1676         memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1677                NFS4_MAX_SESSIONID_LEN);
1678         memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1679                 sizeof(struct nfsd4_channel_attrs));
1680         cs_slot->sl_seqid++;
1681         cr_ses->seqid = cs_slot->sl_seqid;
1682
1683         /* cache solo and embedded create sessions under the state lock */
1684         nfsd4_cache_create_session(cr_ses, cs_slot, status);
1685         if (confirm_me)
1686                 move_to_confirmed(conf);
1687 out:
1688         nfs4_unlock_state();
1689         dprintk("%s returns %d\n", __func__, ntohl(status));
1690         return status;
1691 }
1692
1693 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1694 {
1695         struct nfsd4_compoundres *resp = rqstp->rq_resp;
1696         struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1697
1698         return argp->opcnt == resp->opcnt;
1699 }
1700
1701 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1702 {
1703         switch (*dir) {
1704         case NFS4_CDFC4_FORE:
1705         case NFS4_CDFC4_BACK:
1706                 return nfs_ok;
1707         case NFS4_CDFC4_FORE_OR_BOTH:
1708         case NFS4_CDFC4_BACK_OR_BOTH:
1709                 *dir = NFS4_CDFC4_BOTH;
1710                 return nfs_ok;
1711         };
1712         return nfserr_inval;
1713 }
1714
1715 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1716                      struct nfsd4_compound_state *cstate,
1717                      struct nfsd4_bind_conn_to_session *bcts)
1718 {
1719         __be32 status;
1720
1721         if (!nfsd4_last_compound_op(rqstp))
1722                 return nfserr_not_only_op;
1723         spin_lock(&client_lock);
1724         cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1725         /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1726          * client_lock iself: */
1727         if (cstate->session) {
1728                 nfsd4_get_session(cstate->session);
1729                 atomic_inc(&cstate->session->se_client->cl_refcount);
1730         }
1731         spin_unlock(&client_lock);
1732         if (!cstate->session)
1733                 return nfserr_badsession;
1734
1735         status = nfsd4_map_bcts_dir(&bcts->dir);
1736         if (!status)
1737                 nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
1738         return status;
1739 }
1740
1741 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1742 {
1743         if (!session)
1744                 return 0;
1745         return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1746 }
1747
1748 __be32
1749 nfsd4_destroy_session(struct svc_rqst *r,
1750                       struct nfsd4_compound_state *cstate,
1751                       struct nfsd4_destroy_session *sessionid)
1752 {
1753         struct nfsd4_session *ses;
1754         u32 status = nfserr_badsession;
1755
1756         /* Notes:
1757          * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1758          * - Should we return nfserr_back_chan_busy if waiting for
1759          *   callbacks on to-be-destroyed session?
1760          * - Do we need to clear any callback info from previous session?
1761          */
1762
1763         if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1764                 if (!nfsd4_last_compound_op(r))
1765                         return nfserr_not_only_op;
1766         }
1767         dump_sessionid(__func__, &sessionid->sessionid);
1768         spin_lock(&client_lock);
1769         ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1770         if (!ses) {
1771                 spin_unlock(&client_lock);
1772                 goto out;
1773         }
1774
1775         unhash_session(ses);
1776         spin_unlock(&client_lock);
1777
1778         nfs4_lock_state();
1779         nfsd4_probe_callback_sync(ses->se_client);
1780         nfs4_unlock_state();
1781
1782         nfsd4_del_conns(ses);
1783
1784         nfsd4_put_session(ses);
1785         status = nfs_ok;
1786 out:
1787         dprintk("%s returns %d\n", __func__, ntohl(status));
1788         return status;
1789 }
1790
1791 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1792 {
1793         struct nfsd4_conn *c;
1794
1795         list_for_each_entry(c, &s->se_conns, cn_persession) {
1796                 if (c->cn_xprt == xpt) {
1797                         return c;
1798                 }
1799         }
1800         return NULL;
1801 }
1802
1803 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1804 {
1805         struct nfs4_client *clp = ses->se_client;
1806         struct nfsd4_conn *c;
1807         int ret;
1808
1809         spin_lock(&clp->cl_lock);
1810         c = __nfsd4_find_conn(new->cn_xprt, ses);
1811         if (c) {
1812                 spin_unlock(&clp->cl_lock);
1813                 free_conn(new);
1814                 return;
1815         }
1816         __nfsd4_hash_conn(new, ses);
1817         spin_unlock(&clp->cl_lock);
1818         ret = nfsd4_register_conn(new);
1819         if (ret)
1820                 /* oops; xprt is already down: */
1821                 nfsd4_conn_lost(&new->cn_xpt_user);
1822         return;
1823 }
1824
1825 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
1826 {
1827         struct nfsd4_compoundargs *args = rqstp->rq_argp;
1828
1829         return args->opcnt > session->se_fchannel.maxops;
1830 }
1831
1832 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
1833                                   struct nfsd4_session *session)
1834 {
1835         struct xdr_buf *xb = &rqstp->rq_arg;
1836
1837         return xb->len > session->se_fchannel.maxreq_sz;
1838 }
1839
1840 __be32
1841 nfsd4_sequence(struct svc_rqst *rqstp,
1842                struct nfsd4_compound_state *cstate,
1843                struct nfsd4_sequence *seq)
1844 {
1845         struct nfsd4_compoundres *resp = rqstp->rq_resp;
1846         struct nfsd4_session *session;
1847         struct nfsd4_slot *slot;
1848         struct nfsd4_conn *conn;
1849         int status;
1850
1851         if (resp->opcnt != 1)
1852                 return nfserr_sequence_pos;
1853
1854         /*
1855          * Will be either used or freed by nfsd4_sequence_check_conn
1856          * below.
1857          */
1858         conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
1859         if (!conn)
1860                 return nfserr_jukebox;
1861
1862         spin_lock(&client_lock);
1863         status = nfserr_badsession;
1864         session = find_in_sessionid_hashtbl(&seq->sessionid);
1865         if (!session)
1866                 goto out;
1867
1868         status = nfserr_too_many_ops;
1869         if (nfsd4_session_too_many_ops(rqstp, session))
1870                 goto out;
1871
1872         status = nfserr_req_too_big;
1873         if (nfsd4_request_too_big(rqstp, session))
1874                 goto out;
1875
1876         status = nfserr_badslot;
1877         if (seq->slotid >= session->se_fchannel.maxreqs)
1878                 goto out;
1879
1880         slot = session->se_slots[seq->slotid];
1881         dprintk("%s: slotid %d\n", __func__, seq->slotid);
1882
1883         /* We do not negotiate the number of slots yet, so set the
1884          * maxslots to the session maxreqs which is used to encode
1885          * sr_highest_slotid and the sr_target_slot id to maxslots */
1886         seq->maxslots = session->se_fchannel.maxreqs;
1887
1888         status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
1889         if (status == nfserr_replay_cache) {
1890                 cstate->slot = slot;
1891                 cstate->session = session;
1892                 /* Return the cached reply status and set cstate->status
1893                  * for nfsd4_proc_compound processing */
1894                 status = nfsd4_replay_cache_entry(resp, seq);
1895                 cstate->status = nfserr_replay_cache;
1896                 goto out;
1897         }
1898         if (status)
1899                 goto out;
1900
1901         nfsd4_sequence_check_conn(conn, session);
1902         conn = NULL;
1903
1904         /* Success! bump slot seqid */
1905         slot->sl_inuse = true;
1906         slot->sl_seqid = seq->seqid;
1907         slot->sl_cachethis = seq->cachethis;
1908
1909         cstate->slot = slot;
1910         cstate->session = session;
1911
1912 out:
1913         /* Hold a session reference until done processing the compound. */
1914         if (cstate->session) {
1915                 struct nfs4_client *clp = session->se_client;
1916
1917                 nfsd4_get_session(cstate->session);
1918                 atomic_inc(&clp->cl_refcount);
1919                 if (clp->cl_cb_state == NFSD4_CB_DOWN)
1920                         seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
1921         }
1922         kfree(conn);
1923         spin_unlock(&client_lock);
1924         dprintk("%s: return %d\n", __func__, ntohl(status));
1925         return status;
1926 }
1927
1928 __be32
1929 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
1930 {
1931         int status = 0;
1932
1933         if (rc->rca_one_fs) {
1934                 if (!cstate->current_fh.fh_dentry)
1935                         return nfserr_nofilehandle;
1936                 /*
1937                  * We don't take advantage of the rca_one_fs case.
1938                  * That's OK, it's optional, we can safely ignore it.
1939                  */
1940                  return nfs_ok;
1941         }
1942
1943         nfs4_lock_state();
1944         status = nfserr_complete_already;
1945         if (cstate->session->se_client->cl_firststate)
1946                 goto out;
1947
1948         status = nfserr_stale_clientid;
1949         if (is_client_expired(cstate->session->se_client))
1950                 /*
1951                  * The following error isn't really legal.
1952                  * But we only get here if the client just explicitly
1953                  * destroyed the client.  Surely it no longer cares what
1954                  * error it gets back on an operation for the dead
1955                  * client.
1956                  */
1957                 goto out;
1958
1959         status = nfs_ok;
1960         nfsd4_create_clid_dir(cstate->session->se_client);
1961 out:
1962         nfs4_unlock_state();
1963         return status;
1964 }
1965
1966 __be32
1967 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1968                   struct nfsd4_setclientid *setclid)
1969 {
1970         struct xdr_netobj       clname = { 
1971                 .len = setclid->se_namelen,
1972                 .data = setclid->se_name,
1973         };
1974         nfs4_verifier           clverifier = setclid->se_verf;
1975         unsigned int            strhashval;
1976         struct nfs4_client      *conf, *unconf, *new;
1977         __be32                  status;
1978         char                    dname[HEXDIR_LEN];
1979         
1980         if (!check_name(clname))
1981                 return nfserr_inval;
1982
1983         status = nfs4_make_rec_clidname(dname, &clname);
1984         if (status)
1985                 return status;
1986
1987         /* 
1988          * XXX The Duplicate Request Cache (DRC) has been checked (??)
1989          * We get here on a DRC miss.
1990          */
1991
1992         strhashval = clientstr_hashval(dname);
1993
1994         nfs4_lock_state();
1995         conf = find_confirmed_client_by_str(dname, strhashval);
1996         if (conf) {
1997                 /* RFC 3530 14.2.33 CASE 0: */
1998                 status = nfserr_clid_inuse;
1999                 if (clp_used_exchangeid(conf))
2000                         goto out;
2001                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2002                         char addr_str[INET6_ADDRSTRLEN];
2003                         rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2004                                  sizeof(addr_str));
2005                         dprintk("NFSD: setclientid: string in use by client "
2006                                 "at %s\n", addr_str);
2007                         goto out;
2008                 }
2009         }
2010         /*
2011          * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
2012          * has a description of SETCLIENTID request processing consisting
2013          * of 5 bullet points, labeled as CASE0 - CASE4 below.
2014          */
2015         unconf = find_unconfirmed_client_by_str(dname, strhashval);
2016         status = nfserr_jukebox;
2017         if (!conf) {
2018                 /*
2019                  * RFC 3530 14.2.33 CASE 4:
2020                  * placed first, because it is the normal case
2021                  */
2022                 if (unconf)
2023                         expire_client(unconf);
2024                 new = create_client(clname, dname, rqstp, &clverifier);
2025                 if (new == NULL)
2026                         goto out;
2027                 gen_clid(new);
2028         } else if (same_verf(&conf->cl_verifier, &clverifier)) {
2029                 /*
2030                  * RFC 3530 14.2.33 CASE 1:
2031                  * probable callback update
2032                  */
2033                 if (unconf) {
2034                         /* Note this is removing unconfirmed {*x***},
2035                          * which is stronger than RFC recommended {vxc**}.
2036                          * This has the advantage that there is at most
2037                          * one {*x***} in either list at any time.
2038                          */
2039                         expire_client(unconf);
2040                 }
2041                 new = create_client(clname, dname, rqstp, &clverifier);
2042                 if (new == NULL)
2043                         goto out;
2044                 copy_clid(new, conf);
2045         } else if (!unconf) {
2046                 /*
2047                  * RFC 3530 14.2.33 CASE 2:
2048                  * probable client reboot; state will be removed if
2049                  * confirmed.
2050                  */
2051                 new = create_client(clname, dname, rqstp, &clverifier);
2052                 if (new == NULL)
2053                         goto out;
2054                 gen_clid(new);
2055         } else {
2056                 /*
2057                  * RFC 3530 14.2.33 CASE 3:
2058                  * probable client reboot; state will be removed if
2059                  * confirmed.
2060                  */
2061                 expire_client(unconf);
2062                 new = create_client(clname, dname, rqstp, &clverifier);
2063                 if (new == NULL)
2064                         goto out;
2065                 gen_clid(new);
2066         }
2067         /*
2068          * XXX: we should probably set this at creation time, and check
2069          * for consistent minorversion use throughout:
2070          */
2071         new->cl_minorversion = 0;
2072         gen_callback(new, setclid, rqstp);
2073         add_to_unconfirmed(new, strhashval);
2074         setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2075         setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2076         memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2077         status = nfs_ok;
2078 out:
2079         nfs4_unlock_state();
2080         return status;
2081 }
2082
2083
2084 /*
2085  * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
2086  * a description of SETCLIENTID_CONFIRM request processing consisting of 4
2087  * bullets, labeled as CASE1 - CASE4 below.
2088  */
2089 __be32
2090 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2091                          struct nfsd4_compound_state *cstate,
2092                          struct nfsd4_setclientid_confirm *setclientid_confirm)
2093 {
2094         struct sockaddr *sa = svc_addr(rqstp);
2095         struct nfs4_client *conf, *unconf;
2096         nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
2097         clientid_t * clid = &setclientid_confirm->sc_clientid;
2098         __be32 status;
2099
2100         if (STALE_CLIENTID(clid))
2101                 return nfserr_stale_clientid;
2102         /* 
2103          * XXX The Duplicate Request Cache (DRC) has been checked (??)
2104          * We get here on a DRC miss.
2105          */
2106
2107         nfs4_lock_state();
2108
2109         conf = find_confirmed_client(clid);
2110         unconf = find_unconfirmed_client(clid);
2111
2112         status = nfserr_clid_inuse;
2113         if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
2114                 goto out;
2115         if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
2116                 goto out;
2117
2118         /*
2119          * section 14.2.34 of RFC 3530 has a description of
2120          * SETCLIENTID_CONFIRM request processing consisting
2121          * of 4 bullet points, labeled as CASE1 - CASE4 below.
2122          */
2123         if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
2124                 /*
2125                  * RFC 3530 14.2.34 CASE 1:
2126                  * callback update
2127                  */
2128                 if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
2129                         status = nfserr_clid_inuse;
2130                 else {
2131                         nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2132                         nfsd4_probe_callback(conf);
2133                         expire_client(unconf);
2134                         status = nfs_ok;
2135
2136                 }
2137         } else if (conf && !unconf) {
2138                 /*
2139                  * RFC 3530 14.2.34 CASE 2:
2140                  * probable retransmitted request; play it safe and
2141                  * do nothing.
2142                  */
2143                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
2144                         status = nfserr_clid_inuse;
2145                 else
2146                         status = nfs_ok;
2147         } else if (!conf && unconf
2148                         && same_verf(&unconf->cl_confirm, &confirm)) {
2149                 /*
2150                  * RFC 3530 14.2.34 CASE 3:
2151                  * Normal case; new or rebooted client:
2152                  */
2153                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
2154                         status = nfserr_clid_inuse;
2155                 } else {
2156                         unsigned int hash =
2157                                 clientstr_hashval(unconf->cl_recdir);
2158                         conf = find_confirmed_client_by_str(unconf->cl_recdir,
2159                                                             hash);
2160                         if (conf) {
2161                                 nfsd4_remove_clid_dir(conf);
2162                                 expire_client(conf);
2163                         }
2164                         move_to_confirmed(unconf);
2165                         conf = unconf;
2166                         nfsd4_probe_callback(conf);
2167                         status = nfs_ok;
2168                 }
2169         } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
2170             && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
2171                                                                 &confirm)))) {
2172                 /*
2173                  * RFC 3530 14.2.34 CASE 4:
2174                  * Client probably hasn't noticed that we rebooted yet.
2175                  */
2176                 status = nfserr_stale_clientid;
2177         } else {
2178                 /* check that we have hit one of the cases...*/
2179                 status = nfserr_clid_inuse;
2180         }
2181 out:
2182         nfs4_unlock_state();
2183         return status;
2184 }
2185
2186 /* OPEN Share state helper functions */
2187 static inline struct nfs4_file *
2188 alloc_init_file(struct inode *ino)
2189 {
2190         struct nfs4_file *fp;
2191         unsigned int hashval = file_hashval(ino);
2192
2193         fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
2194         if (fp) {
2195                 atomic_set(&fp->fi_ref, 1);
2196                 INIT_LIST_HEAD(&fp->fi_hash);
2197                 INIT_LIST_HEAD(&fp->fi_stateids);
2198                 INIT_LIST_HEAD(&fp->fi_delegations);
2199                 fp->fi_inode = igrab(ino);
2200                 fp->fi_had_conflict = false;
2201                 fp->fi_lease = NULL;
2202                 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2203                 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2204                 spin_lock(&recall_lock);
2205                 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2206                 spin_unlock(&recall_lock);
2207                 return fp;
2208         }
2209         return NULL;
2210 }
2211
2212 static void
2213 nfsd4_free_slab(struct kmem_cache **slab)
2214 {
2215         if (*slab == NULL)
2216                 return;
2217         kmem_cache_destroy(*slab);
2218         *slab = NULL;
2219 }
2220
2221 void
2222 nfsd4_free_slabs(void)
2223 {
2224         nfsd4_free_slab(&openowner_slab);
2225         nfsd4_free_slab(&lockowner_slab);
2226         nfsd4_free_slab(&file_slab);
2227         nfsd4_free_slab(&stateid_slab);
2228         nfsd4_free_slab(&deleg_slab);
2229 }
2230
2231 static int
2232 nfsd4_init_slabs(void)
2233 {
2234         openowner_slab = kmem_cache_create("nfsd4_openowners",
2235                         sizeof(struct nfs4_openowner), 0, 0, NULL);
2236         if (openowner_slab == NULL)
2237                 goto out_nomem;
2238         lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2239                         sizeof(struct nfs4_openowner), 0, 0, NULL);
2240         if (lockowner_slab == NULL)
2241                 goto out_nomem;
2242         file_slab = kmem_cache_create("nfsd4_files",
2243                         sizeof(struct nfs4_file), 0, 0, NULL);
2244         if (file_slab == NULL)
2245                 goto out_nomem;
2246         stateid_slab = kmem_cache_create("nfsd4_stateids",
2247                         sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2248         if (stateid_slab == NULL)
2249                 goto out_nomem;
2250         deleg_slab = kmem_cache_create("nfsd4_delegations",
2251                         sizeof(struct nfs4_delegation), 0, 0, NULL);
2252         if (deleg_slab == NULL)
2253                 goto out_nomem;
2254         return 0;
2255 out_nomem:
2256         nfsd4_free_slabs();
2257         dprintk("nfsd4: out of memory while initializing nfsv4\n");
2258         return -ENOMEM;
2259 }
2260
2261 void nfs4_free_openowner(struct nfs4_openowner *oo)
2262 {
2263         kfree(oo->oo_owner.so_owner.data);
2264         kmem_cache_free(openowner_slab, oo);
2265 }
2266
2267 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2268 {
2269         kfree(lo->lo_owner.so_owner.data);
2270         kmem_cache_free(lockowner_slab, lo);
2271 }
2272
2273 static void init_nfs4_replay(struct nfs4_replay *rp)
2274 {
2275         rp->rp_status = nfserr_serverfault;
2276         rp->rp_buflen = 0;
2277         rp->rp_buf = rp->rp_ibuf;
2278 }
2279
2280 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2281 {
2282         struct nfs4_stateowner *sop;
2283
2284         sop = kmem_cache_alloc(slab, GFP_KERNEL);
2285         if (!sop)
2286                 return NULL;
2287
2288         sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2289         if (!sop->so_owner.data) {
2290                 kmem_cache_free(slab, sop);
2291                 return NULL;
2292         }
2293         sop->so_owner.len = owner->len;
2294
2295         INIT_LIST_HEAD(&sop->so_stateids);
2296         sop->so_client = clp;
2297         init_nfs4_replay(&sop->so_replay);
2298         return sop;
2299 }
2300
2301 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2302 {
2303         list_add(&oo->oo_owner.so_strhash, &open_ownerstr_hashtbl[strhashval]);
2304         list_add(&oo->oo_perclient, &clp->cl_openowners);
2305 }
2306
2307 static struct nfs4_openowner *
2308 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2309         struct nfs4_openowner *oo;
2310
2311         oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2312         if (!oo)
2313                 return NULL;
2314         oo->oo_owner.so_is_open_owner = 1;
2315         oo->oo_owner.so_seqid = open->op_seqid;
2316         oo->oo_flags = 0;
2317         oo->oo_time = 0;
2318         oo->oo_last_closed_stid = NULL;
2319         INIT_LIST_HEAD(&oo->oo_close_lru);
2320         hash_openowner(oo, clp, strhashval);
2321         return oo;
2322 }
2323
2324 static inline void
2325 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2326         struct nfs4_openowner *oo = open->op_openowner;
2327         struct nfs4_client *clp = oo->oo_owner.so_client;
2328
2329         INIT_LIST_HEAD(&stp->st_lockowners);
2330         list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2331         list_add(&stp->st_perfile, &fp->fi_stateids);
2332         stp->st_stid.sc_type = NFS4_OPEN_STID;
2333         stp->st_stateowner = &oo->oo_owner;
2334         get_nfs4_file(fp);
2335         stp->st_file = fp;
2336         stp->st_stid.sc_stateid.si_opaque.so_clid = clp->cl_clientid;
2337         stp->st_stid.sc_stateid.si_opaque.so_id = current_stateid++;
2338         /* note will be incremented before first return to client: */
2339         stp->st_stid.sc_stateid.si_generation = 0;
2340         hash_stid(&stp->st_stid);
2341         stp->st_access_bmap = 0;
2342         stp->st_deny_bmap = 0;
2343         __set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK,
2344                   &stp->st_access_bmap);
2345         __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2346         stp->st_openstp = NULL;
2347 }
2348
2349 static void
2350 move_to_close_lru(struct nfs4_openowner *oo)
2351 {
2352         dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2353
2354         list_move_tail(&oo->oo_close_lru, &close_lru);
2355         oo->oo_time = get_seconds();
2356 }
2357
2358 static int
2359 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2360                                                         clientid_t *clid)
2361 {
2362         return (sop->so_owner.len == owner->len) &&
2363                 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2364                 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2365 }
2366
2367 static struct nfs4_openowner *
2368 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
2369 {
2370         struct nfs4_stateowner *so = NULL;
2371
2372         list_for_each_entry(so, &open_ownerstr_hashtbl[hashval], so_strhash) {
2373                 if (same_owner_str(so, &open->op_owner, &open->op_clientid))
2374                         return container_of(so, struct nfs4_openowner, oo_owner);
2375         }
2376         return NULL;
2377 }
2378
2379 /* search file_hashtbl[] for file */
2380 static struct nfs4_file *
2381 find_file(struct inode *ino)
2382 {
2383         unsigned int hashval = file_hashval(ino);
2384         struct nfs4_file *fp;
2385
2386         spin_lock(&recall_lock);
2387         list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2388                 if (fp->fi_inode == ino) {
2389                         get_nfs4_file(fp);
2390                         spin_unlock(&recall_lock);
2391                         return fp;
2392                 }
2393         }
2394         spin_unlock(&recall_lock);
2395         return NULL;
2396 }
2397
2398 static inline int access_valid(u32 x, u32 minorversion)
2399 {
2400         if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ)
2401                 return 0;
2402         if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH)
2403                 return 0;
2404         x &= ~NFS4_SHARE_ACCESS_MASK;
2405         if (minorversion && x) {
2406                 if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL)
2407                         return 0;
2408                 if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED)
2409                         return 0;
2410                 x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK);
2411         }
2412         if (x)
2413                 return 0;
2414         return 1;
2415 }
2416
2417 static inline int deny_valid(u32 x)
2418 {
2419         /* Note: unlike access bits, deny bits may be zero. */
2420         return x <= NFS4_SHARE_DENY_BOTH;
2421 }
2422
2423 /*
2424  * Called to check deny when READ with all zero stateid or
2425  * WRITE with all zero or all one stateid
2426  */
2427 static __be32
2428 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2429 {
2430         struct inode *ino = current_fh->fh_dentry->d_inode;
2431         struct nfs4_file *fp;
2432         struct nfs4_ol_stateid *stp;
2433         __be32 ret;
2434
2435         dprintk("NFSD: nfs4_share_conflict\n");
2436
2437         fp = find_file(ino);
2438         if (!fp)
2439                 return nfs_ok;
2440         ret = nfserr_locked;
2441         /* Search for conflicting share reservations */
2442         list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2443                 if (test_bit(deny_type, &stp->st_deny_bmap) ||
2444                     test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
2445                         goto out;
2446         }
2447         ret = nfs_ok;
2448 out:
2449         put_nfs4_file(fp);
2450         return ret;
2451 }
2452
2453 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2454 {
2455         /* We're assuming the state code never drops its reference
2456          * without first removing the lease.  Since we're in this lease
2457          * callback (and since the lease code is serialized by the kernel
2458          * lock) we know the server hasn't removed the lease yet, we know
2459          * it's safe to take a reference: */
2460         atomic_inc(&dp->dl_count);
2461
2462         list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2463
2464         /* only place dl_time is set. protected by lock_flocks*/
2465         dp->dl_time = get_seconds();
2466
2467         nfsd4_cb_recall(dp);
2468 }
2469
2470 /* Called from break_lease() with lock_flocks() held. */
2471 static void nfsd_break_deleg_cb(struct file_lock *fl)
2472 {
2473         struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2474         struct nfs4_delegation *dp;
2475
2476         BUG_ON(!fp);
2477         /* We assume break_lease is only called once per lease: */
2478         BUG_ON(fp->fi_had_conflict);
2479         /*
2480          * We don't want the locks code to timeout the lease for us;
2481          * we'll remove it ourself if a delegation isn't returned
2482          * in time:
2483          */
2484         fl->fl_break_time = 0;
2485
2486         spin_lock(&recall_lock);
2487         fp->fi_had_conflict = true;
2488         list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2489                 nfsd_break_one_deleg(dp);
2490         spin_unlock(&recall_lock);
2491 }
2492
2493 static
2494 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2495 {
2496         if (arg & F_UNLCK)
2497                 return lease_modify(onlist, arg);
2498         else
2499                 return -EAGAIN;
2500 }
2501
2502 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2503         .lm_break = nfsd_break_deleg_cb,
2504         .lm_change = nfsd_change_deleg_cb,
2505 };
2506
2507 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2508 {
2509         if (nfsd4_has_session(cstate))
2510                 return nfs_ok;
2511         if (seqid == so->so_seqid - 1)
2512                 return nfserr_replay_me;
2513         if (seqid == so->so_seqid)
2514                 return nfs_ok;
2515         return nfserr_bad_seqid;
2516 }
2517
2518 __be32
2519 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2520                     struct nfsd4_open *open)
2521 {
2522         clientid_t *clientid = &open->op_clientid;
2523         struct nfs4_client *clp = NULL;
2524         unsigned int strhashval;
2525         struct nfs4_openowner *oo = NULL;
2526         __be32 status;
2527
2528         if (!check_name(open->op_owner))
2529                 return nfserr_inval;
2530
2531         if (STALE_CLIENTID(&open->op_clientid))
2532                 return nfserr_stale_clientid;
2533
2534         strhashval = open_ownerstr_hashval(clientid->cl_id, &open->op_owner);
2535         oo = find_openstateowner_str(strhashval, open);
2536         open->op_openowner = oo;
2537         if (!oo) {
2538                 /* Make sure the client's lease hasn't expired. */
2539                 clp = find_confirmed_client(clientid);
2540                 if (clp == NULL)
2541                         return nfserr_expired;
2542                 goto renew;
2543         }
2544         if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2545                 /* Replace unconfirmed owners without checking for replay. */
2546                 clp = oo->oo_owner.so_client;
2547                 release_openowner(oo);
2548                 open->op_openowner = NULL;
2549                 goto renew;
2550         }
2551         status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2552         if (status)
2553                 return status;
2554 renew:
2555         if (open->op_openowner == NULL) {
2556                 oo = alloc_init_open_stateowner(strhashval, clp, open);
2557                 if (oo == NULL)
2558                         return nfserr_jukebox;
2559                 open->op_openowner = oo;
2560         }
2561         list_del_init(&oo->oo_close_lru);
2562         renew_client(oo->oo_owner.so_client);
2563         return nfs_ok;
2564 }
2565
2566 static inline __be32
2567 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2568 {
2569         if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2570                 return nfserr_openmode;
2571         else
2572                 return nfs_ok;
2573 }
2574
2575 static int share_access_to_flags(u32 share_access)
2576 {
2577         share_access &= ~NFS4_SHARE_WANT_MASK;
2578
2579         return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2580 }
2581
2582 static struct nfs4_delegation *find_deleg_stateid(stateid_t *s)
2583 {
2584         struct nfs4_stid *ret;
2585
2586         ret = find_stateid_by_type(s, NFS4_DELEG_STID);
2587         if (!ret)
2588                 return NULL;
2589         return delegstateid(ret);
2590 }
2591
2592 static __be32
2593 nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
2594                 struct nfs4_delegation **dp)
2595 {
2596         int flags;
2597         __be32 status = nfserr_bad_stateid;
2598
2599         *dp = find_deleg_stateid(&open->op_delegate_stateid);
2600         if (*dp == NULL)
2601                 goto out;
2602         flags = share_access_to_flags(open->op_share_access);
2603         status = nfs4_check_delegmode(*dp, flags);
2604         if (status)
2605                 *dp = NULL;
2606 out:
2607         if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
2608                 return nfs_ok;
2609         if (status)
2610                 return status;
2611         open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2612         return nfs_ok;
2613 }
2614
2615 static __be32
2616 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2617 {
2618         struct nfs4_ol_stateid *local;
2619         struct nfs4_openowner *oo = open->op_openowner;
2620
2621         list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2622                 /* ignore lock owners */
2623                 if (local->st_stateowner->so_is_open_owner == 0)
2624                         continue;
2625                 /* remember if we have seen this open owner */
2626                 if (local->st_stateowner == &oo->oo_owner)
2627                         *stpp = local;
2628                 /* check for conflicting share reservations */
2629                 if (!test_share(local, open))
2630                         return nfserr_share_denied;
2631         }
2632         return nfs_ok;
2633 }
2634
2635 static inline struct nfs4_ol_stateid *
2636 nfs4_alloc_stateid(void)
2637 {
2638         return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
2639 }
2640
2641 static inline int nfs4_access_to_access(u32 nfs4_access)
2642 {
2643         int flags = 0;
2644
2645         if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2646                 flags |= NFSD_MAY_READ;
2647         if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2648                 flags |= NFSD_MAY_WRITE;
2649         return flags;
2650 }
2651
2652 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2653                 struct svc_fh *cur_fh, struct nfsd4_open *open)
2654 {
2655         __be32 status;
2656         int oflag = nfs4_access_to_omode(open->op_share_access);
2657         int access = nfs4_access_to_access(open->op_share_access);
2658
2659         /* CLAIM_DELEGATE_CUR is used in response to a broken lease;
2660          * allowing it to break the lease and return EAGAIN leaves the
2661          * client unable to make progress in returning the delegation */
2662         if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2663                 access |= NFSD_MAY_NOT_BREAK_LEASE;
2664
2665         if (!fp->fi_fds[oflag]) {
2666                 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2667                         &fp->fi_fds[oflag]);
2668                 if (status)
2669                         return status;
2670         }
2671         nfs4_file_get_access(fp, oflag);
2672
2673         return nfs_ok;
2674 }
2675
2676 static __be32
2677 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_ol_stateid **stpp,
2678                 struct nfs4_file *fp, struct svc_fh *cur_fh,
2679                 struct nfsd4_open *open)
2680 {
2681         struct nfs4_ol_stateid *stp;
2682         __be32 status;
2683
2684         stp = nfs4_alloc_stateid();
2685         if (stp == NULL)
2686                 return nfserr_jukebox;
2687
2688         status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2689         if (status) {
2690                 kmem_cache_free(stateid_slab, stp);
2691                 return status;
2692         }
2693         *stpp = stp;
2694         return 0;
2695 }
2696
2697 static inline __be32
2698 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2699                 struct nfsd4_open *open)
2700 {
2701         struct iattr iattr = {
2702                 .ia_valid = ATTR_SIZE,
2703                 .ia_size = 0,
2704         };
2705         if (!open->op_truncate)
2706                 return 0;
2707         if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2708                 return nfserr_inval;
2709         return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2710 }
2711
2712 static __be32
2713 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2714 {
2715         u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2716         bool new_access;
2717         __be32 status;
2718
2719         new_access = !test_bit(op_share_access, &stp->st_access_bmap);
2720         if (new_access) {
2721                 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2722                 if (status)
2723                         return status;
2724         }
2725         status = nfsd4_truncate(rqstp, cur_fh, open);
2726         if (status) {
2727                 if (new_access) {
2728                         int oflag = nfs4_access_to_omode(op_share_access);
2729                         nfs4_file_put_access(fp, oflag);
2730                 }
2731                 return status;
2732         }
2733         /* remember the open */
2734         __set_bit(op_share_access, &stp->st_access_bmap);
2735         __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2736
2737         return nfs_ok;
2738 }
2739
2740
2741 static void
2742 nfs4_set_claim_prev(struct nfsd4_open *open)
2743 {
2744         open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2745         open->op_openowner->oo_owner.so_client->cl_firststate = 1;
2746 }
2747
2748 /* Should we give out recallable state?: */
2749 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2750 {
2751         if (clp->cl_cb_state == NFSD4_CB_UP)
2752                 return true;
2753         /*
2754          * In the sessions case, since we don't have to establish a
2755          * separate connection for callbacks, we assume it's OK
2756          * until we hear otherwise:
2757          */
2758         return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2759 }
2760
2761 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2762 {
2763         struct file_lock *fl;
2764
2765         fl = locks_alloc_lock();
2766         if (!fl)
2767                 return NULL;
2768         locks_init_lock(fl);
2769         fl->fl_lmops = &nfsd_lease_mng_ops;
2770         fl->fl_flags = FL_LEASE;
2771         fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2772         fl->fl_end = OFFSET_MAX;
2773         fl->fl_owner = (fl_owner_t)(dp->dl_file);
2774         fl->fl_pid = current->tgid;
2775         return fl;
2776 }
2777
2778 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2779 {
2780         struct nfs4_file *fp = dp->dl_file;
2781         struct file_lock *fl;
2782         int status;
2783
2784         fl = nfs4_alloc_init_lease(dp, flag);
2785         if (!fl)
2786                 return -ENOMEM;
2787         fl->fl_file = find_readable_file(fp);
2788         list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2789         status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2790         if (status) {
2791                 list_del_init(&dp->dl_perclnt);
2792                 locks_free_lock(fl);
2793                 return -ENOMEM;
2794         }
2795         fp->fi_lease = fl;
2796         fp->fi_deleg_file = fl->fl_file;
2797         get_file(fp->fi_deleg_file);
2798         atomic_set(&fp->fi_delegees, 1);
2799         list_add(&dp->dl_perfile, &fp->fi_delegations);
2800         return 0;
2801 }
2802
2803 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2804 {
2805         struct nfs4_file *fp = dp->dl_file;
2806
2807         if (!fp->fi_lease)
2808                 return nfs4_setlease(dp, flag);
2809         spin_lock(&recall_lock);
2810         if (fp->fi_had_conflict) {
2811                 spin_unlock(&recall_lock);
2812                 return -EAGAIN;
2813         }
2814         atomic_inc(&fp->fi_delegees);
2815         list_add(&dp->dl_perfile, &fp->fi_delegations);
2816         spin_unlock(&recall_lock);
2817         list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2818         return 0;
2819 }
2820
2821 /*
2822  * Attempt to hand out a delegation.
2823  */
2824 static void
2825 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2826 {
2827         struct nfs4_delegation *dp;
2828         struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2829         int cb_up;
2830         int status, flag = 0;
2831
2832         cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2833         flag = NFS4_OPEN_DELEGATE_NONE;
2834         open->op_recall = 0;
2835         switch (open->op_claim_type) {
2836                 case NFS4_OPEN_CLAIM_PREVIOUS:
2837                         if (!cb_up)
2838                                 open->op_recall = 1;
2839                         flag = open->op_delegate_type;
2840                         if (flag == NFS4_OPEN_DELEGATE_NONE)
2841                                 goto out;
2842                         break;
2843                 case NFS4_OPEN_CLAIM_NULL:
2844                         /* Let's not give out any delegations till everyone's
2845                          * had the chance to reclaim theirs.... */
2846                         if (locks_in_grace())
2847                                 goto out;
2848                         if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2849                                 goto out;
2850                         if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2851                                 flag = NFS4_OPEN_DELEGATE_WRITE;
2852                         else
2853                                 flag = NFS4_OPEN_DELEGATE_READ;
2854                         break;
2855                 default:
2856                         goto out;
2857         }
2858
2859         dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2860         if (dp == NULL)
2861                 goto out_no_deleg;
2862         status = nfs4_set_delegation(dp, flag);
2863         if (status)
2864                 goto out_free;
2865
2866         memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2867
2868         dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2869                 STATEID_VAL(&dp->dl_stid.sc_stateid));
2870 out:
2871         if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
2872                         && flag == NFS4_OPEN_DELEGATE_NONE
2873                         && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2874                 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2875         open->op_delegate_type = flag;
2876         return;
2877 out_free:
2878         nfs4_put_delegation(dp);
2879 out_no_deleg:
2880         flag = NFS4_OPEN_DELEGATE_NONE;
2881         goto out;
2882 }
2883
2884 /*
2885  * called with nfs4_lock_state() held.
2886  */
2887 __be32
2888 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2889 {
2890         struct nfsd4_compoundres *resp = rqstp->rq_resp;
2891         struct nfs4_file *fp = NULL;
2892         struct inode *ino = current_fh->fh_dentry->d_inode;
2893         struct nfs4_ol_stateid *stp = NULL;
2894         struct nfs4_delegation *dp = NULL;
2895         __be32 status;
2896
2897         status = nfserr_inval;
2898         if (!access_valid(open->op_share_access, resp->cstate.minorversion)
2899                         || !deny_valid(open->op_share_deny))
2900                 goto out;
2901         /*
2902          * Lookup file; if found, lookup stateid and check open request,
2903          * and check for delegations in the process of being recalled.
2904          * If not found, create the nfs4_file struct
2905          */
2906         fp = find_file(ino);
2907         if (fp) {
2908                 if ((status = nfs4_check_open(fp, open, &stp)))
2909                         goto out;
2910                 status = nfs4_check_deleg(fp, open, &dp);
2911                 if (status)
2912                         goto out;
2913         } else {
2914                 status = nfserr_bad_stateid;
2915                 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2916                         goto out;
2917                 status = nfserr_jukebox;
2918                 fp = alloc_init_file(ino);
2919                 if (fp == NULL)
2920                         goto out;
2921         }
2922
2923         /*
2924          * OPEN the file, or upgrade an existing OPEN.
2925          * If truncate fails, the OPEN fails.
2926          */
2927         if (stp) {
2928                 /* Stateid was found, this is an OPEN upgrade */
2929                 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2930                 if (status)
2931                         goto out;
2932         } else {
2933                 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
2934                 if (status)
2935                         goto out;
2936                 init_open_stateid(stp, fp, open);
2937                 status = nfsd4_truncate(rqstp, current_fh, open);
2938                 if (status) {
2939                         release_open_stateid(stp);
2940                         goto out;
2941                 }
2942         }
2943         update_stateid(&stp->st_stid.sc_stateid);
2944         memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2945
2946         if (nfsd4_has_session(&resp->cstate))
2947                 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2948
2949         /*
2950         * Attempt to hand out a delegation. No error return, because the
2951         * OPEN succeeds even if we fail.
2952         */
2953         nfs4_open_delegation(current_fh, open, stp);
2954
2955         status = nfs_ok;
2956
2957         dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
2958                 STATEID_VAL(&stp->st_stid.sc_stateid));
2959 out:
2960         if (fp)
2961                 put_nfs4_file(fp);
2962         if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
2963                 nfs4_set_claim_prev(open);
2964         /*
2965         * To finish the open response, we just need to set the rflags.
2966         */
2967         open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
2968         if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
2969             !nfsd4_has_session(&resp->cstate))
2970                 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
2971
2972         return status;
2973 }
2974
2975 __be32
2976 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2977             clientid_t *clid)
2978 {
2979         struct nfs4_client *clp;
2980         __be32 status;
2981
2982         nfs4_lock_state();
2983         dprintk("process_renew(%08x/%08x): starting\n", 
2984                         clid->cl_boot, clid->cl_id);
2985         status = nfserr_stale_clientid;
2986         if (STALE_CLIENTID(clid))
2987                 goto out;
2988         clp = find_confirmed_client(clid);
2989         status = nfserr_expired;
2990         if (clp == NULL) {
2991                 /* We assume the client took too long to RENEW. */
2992                 dprintk("nfsd4_renew: clientid not found!\n");
2993                 goto out;
2994         }
2995         renew_client(clp);
2996         status = nfserr_cb_path_down;
2997         if (!list_empty(&clp->cl_delegations)
2998                         && clp->cl_cb_state != NFSD4_CB_UP)
2999                 goto out;
3000         status = nfs_ok;
3001 out:
3002         nfs4_unlock_state();
3003         return status;
3004 }
3005
3006 static struct lock_manager nfsd4_manager = {
3007 };
3008
3009 static void
3010 nfsd4_end_grace(void)
3011 {
3012         dprintk("NFSD: end of grace period\n");
3013         nfsd4_recdir_purge_old();
3014         locks_end_grace(&nfsd4_manager);
3015         /*
3016          * Now that every NFSv4 client has had the chance to recover and
3017          * to see the (possibly new, possibly shorter) lease time, we
3018          * can safely set the next grace time to the current lease time:
3019          */
3020         nfsd4_grace = nfsd4_lease;
3021 }
3022
3023 static time_t
3024 nfs4_laundromat(void)
3025 {
3026         struct nfs4_client *clp;
3027         struct nfs4_openowner *oo;
3028         struct nfs4_delegation *dp;
3029         struct list_head *pos, *next, reaplist;
3030         time_t cutoff = get_seconds() - nfsd4_lease;
3031         time_t t, clientid_val = nfsd4_lease;
3032         time_t u, test_val = nfsd4_lease;
3033
3034         nfs4_lock_state();
3035
3036         dprintk("NFSD: laundromat service - starting\n");
3037         if (locks_in_grace())
3038                 nfsd4_end_grace();
3039         INIT_LIST_HEAD(&reaplist);
3040         spin_lock(&client_lock);
3041         list_for_each_safe(pos, next, &client_lru) {
3042                 clp = list_entry(pos, struct nfs4_client, cl_lru);
3043                 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3044                         t = clp->cl_time - cutoff;
3045                         if (clientid_val > t)
3046                                 clientid_val = t;
3047                         break;
3048                 }
3049                 if (atomic_read(&clp->cl_refcount)) {
3050                         dprintk("NFSD: client in use (clientid %08x)\n",
3051                                 clp->cl_clientid.cl_id);
3052                         continue;
3053                 }
3054                 unhash_client_locked(clp);
3055                 list_add(&clp->cl_lru, &reaplist);
3056         }
3057         spin_unlock(&client_lock);
3058         list_for_each_safe(pos, next, &reaplist) {
3059                 clp = list_entry(pos, struct nfs4_client, cl_lru);
3060                 dprintk("NFSD: purging unused client (clientid %08x)\n",
3061                         clp->cl_clientid.cl_id);
3062                 nfsd4_remove_clid_dir(clp);
3063                 expire_client(clp);
3064         }
3065         spin_lock(&recall_lock);
3066         list_for_each_safe(pos, next, &del_recall_lru) {
3067                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3068                 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3069                         u = dp->dl_time - cutoff;
3070                         if (test_val > u)
3071                                 test_val = u;
3072                         break;
3073                 }
3074                 list_move(&dp->dl_recall_lru, &reaplist);
3075         }
3076         spin_unlock(&recall_lock);
3077         list_for_each_safe(pos, next, &reaplist) {
3078                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3079                 list_del_init(&dp->dl_recall_lru);
3080                 unhash_delegation(dp);
3081         }
3082         test_val = nfsd4_lease;
3083         list_for_each_safe(pos, next, &close_lru) {
3084                 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3085                 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3086                         u = oo->oo_time - cutoff;
3087                         if (test_val > u)
3088                                 test_val = u;
3089                         break;
3090                 }
3091                 release_openowner(oo);
3092         }
3093         if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3094                 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3095         nfs4_unlock_state();
3096         return clientid_val;
3097 }
3098
3099 static struct workqueue_struct *laundry_wq;
3100 static void laundromat_main(struct work_struct *);
3101 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3102
3103 static void
3104 laundromat_main(struct work_struct *not_used)
3105 {
3106         time_t t;
3107
3108         t = nfs4_laundromat();
3109         dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3110         queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3111 }
3112
3113 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3114 {
3115         if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3116                 return nfserr_bad_stateid;
3117         return nfs_ok;
3118 }
3119
3120 static int
3121 STALE_STATEID(stateid_t *stateid)
3122 {
3123         if (stateid->si_opaque.so_clid.cl_boot == boot_time)
3124                 return 0;
3125         dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3126                 STATEID_VAL(stateid));
3127         return 1;
3128 }
3129
3130 static inline int
3131 access_permit_read(unsigned long access_bmap)
3132 {
3133         return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
3134                 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
3135                 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
3136 }
3137
3138 static inline int
3139 access_permit_write(unsigned long access_bmap)
3140 {
3141         return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
3142                 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
3143 }
3144
3145 static
3146 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3147 {
3148         __be32 status = nfserr_openmode;
3149
3150         /* For lock stateid's, we test the parent open, not the lock: */
3151         if (stp->st_openstp)
3152                 stp = stp->st_openstp;
3153         if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
3154                 goto out;
3155         if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
3156                 goto out;
3157         status = nfs_ok;
3158 out:
3159         return status;
3160 }
3161
3162 static inline __be32
3163 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3164 {
3165         if (ONE_STATEID(stateid) && (flags & RD_STATE))
3166                 return nfs_ok;
3167         else if (locks_in_grace()) {
3168                 /* Answer in remaining cases depends on existence of
3169                  * conflicting state; so we must wait out the grace period. */
3170                 return nfserr_grace;
3171         } else if (flags & WR_STATE)
3172                 return nfs4_share_conflict(current_fh,
3173                                 NFS4_SHARE_DENY_WRITE);
3174         else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3175                 return nfs4_share_conflict(current_fh,
3176                                 NFS4_SHARE_DENY_READ);
3177 }
3178
3179 /*
3180  * Allow READ/WRITE during grace period on recovered state only for files
3181  * that are not able to provide mandatory locking.
3182  */
3183 static inline int
3184 grace_disallows_io(struct inode *inode)
3185 {
3186         return locks_in_grace() && mandatory_lock(inode);
3187 }
3188
3189 /* Returns true iff a is later than b: */
3190 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3191 {
3192         return (s32)a->si_generation - (s32)b->si_generation > 0;
3193 }
3194
3195 static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3196 {
3197         /*
3198          * When sessions are used the stateid generation number is ignored
3199          * when it is zero.
3200          */
3201         if (has_session && in->si_generation == 0)
3202                 return nfs_ok;
3203
3204         if (in->si_generation == ref->si_generation)
3205                 return nfs_ok;
3206
3207         /* If the client sends us a stateid from the future, it's buggy: */
3208         if (stateid_generation_after(in, ref))
3209                 return nfserr_bad_stateid;
3210         /*
3211          * However, we could see a stateid from the past, even from a
3212          * non-buggy client.  For example, if the client sends a lock
3213          * while some IO is outstanding, the lock may bump si_generation
3214          * while the IO is still in flight.  The client could avoid that
3215          * situation by waiting for responses on all the IO requests,
3216          * but better performance may result in retrying IO that
3217          * receives an old_stateid error if requests are rarely
3218          * reordered in flight:
3219          */
3220         return nfserr_old_stateid;
3221 }
3222
3223 __be32 nfs4_validate_stateid(stateid_t *stateid, bool has_session)
3224 {
3225         struct nfs4_stid *s;
3226         struct nfs4_ol_stateid *ols;
3227         __be32 status;
3228
3229         if (STALE_STATEID(stateid))
3230                 return nfserr_stale_stateid;
3231
3232         s = find_stateid(stateid);
3233         if (!s)
3234                  return nfserr_stale_stateid;
3235         status = check_stateid_generation(stateid, &s->sc_stateid, has_session);
3236         if (status)
3237                 return status;
3238         if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3239                 return nfs_ok;
3240         ols = openlockstateid(s);
3241         if (ols->st_stateowner->so_is_open_owner
3242             && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3243                 return nfserr_bad_stateid;
3244         return nfs_ok;
3245 }
3246
3247 /*
3248 * Checks for stateid operations
3249 */
3250 __be32
3251 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3252                            stateid_t *stateid, int flags, struct file **filpp)
3253 {
3254         struct nfs4_stid *s;
3255         struct nfs4_ol_stateid *stp = NULL;
3256         struct nfs4_delegation *dp = NULL;
3257         struct svc_fh *current_fh = &cstate->current_fh;
3258         struct inode *ino = current_fh->fh_dentry->d_inode;
3259         __be32 status;
3260
3261         if (filpp)
3262                 *filpp = NULL;
3263
3264         if (grace_disallows_io(ino))
3265                 return nfserr_grace;
3266
3267         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3268                 return check_special_stateids(current_fh, stateid, flags);
3269
3270         status = nfserr_stale_stateid;
3271         if (STALE_STATEID(stateid)) 
3272                 goto out;
3273
3274         /*
3275          * We assume that any stateid that has the current boot time,
3276          * but that we can't find, is expired:
3277          */
3278         status = nfserr_expired;
3279         s = find_stateid(stateid);
3280         if (!s)
3281                 goto out;
3282         status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3283         if (status)
3284                 goto out;
3285         switch (s->sc_type) {
3286         case NFS4_DELEG_STID:
3287                 dp = delegstateid(s);
3288                 status = nfs4_check_delegmode(dp, flags);
3289                 if (status)
3290                         goto out;
3291                 renew_client(dp->dl_client);
3292                 if (filpp) {
3293                         *filpp = dp->dl_file->fi_deleg_file;
3294                         BUG_ON(!*filpp);
3295                 }
3296                 break;
3297         case NFS4_OPEN_STID:
3298         case NFS4_LOCK_STID:
3299                 stp = openlockstateid(s);
3300                 status = nfs4_check_fh(current_fh, stp);
3301                 if (status)
3302                         goto out;
3303                 if (stp->st_stateowner->so_is_open_owner
3304                     && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3305                         goto out;
3306                 status = nfs4_check_openmode(stp, flags);
3307                 if (status)
3308                         goto out;
3309                 renew_client(stp->st_stateowner->so_client);
3310                 if (filpp) {
3311                         if (flags & RD_STATE)
3312                                 *filpp = find_readable_file(stp->st_file);
3313                         else
3314                                 *filpp = find_writeable_file(stp->st_file);
3315                 }
3316                 break;
3317         default:
3318                 return nfserr_bad_stateid;
3319         }
3320         status = nfs_ok;
3321 out:
3322         return status;
3323 }
3324
3325 static __be32
3326 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3327 {
3328         if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3329                 return nfserr_locks_held;
3330         release_lock_stateid(stp);
3331         return nfs_ok;
3332 }
3333
3334 /*
3335  * Test if the stateid is valid
3336  */
3337 __be32
3338 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3339                    struct nfsd4_test_stateid *test_stateid)
3340 {
3341         test_stateid->ts_has_session = nfsd4_has_session(cstate);
3342         return nfs_ok;
3343 }
3344
3345 __be32
3346 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3347                    struct nfsd4_free_stateid *free_stateid)
3348 {
3349         stateid_t *stateid = &free_stateid->fr_stateid;
3350         struct nfs4_stid *s;
3351         __be32 ret = nfserr_bad_stateid;
3352
3353         nfs4_lock_state();
3354         s = find_stateid(stateid);
3355         if (!s)
3356                 goto out;
3357         switch (s->sc_type) {
3358         case NFS4_DELEG_STID:
3359                 ret = nfserr_locks_held;
3360                 goto out;
3361         case NFS4_OPEN_STID:
3362         case NFS4_LOCK_STID:
3363                 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3364                 if (ret)
3365                         goto out;
3366                 if (s->sc_type == NFS4_LOCK_STID)
3367                         ret = nfsd4_free_lock_stateid(openlockstateid(s));
3368                 else
3369                         ret = nfserr_locks_held;
3370                 break;
3371         default:
3372                 ret = nfserr_bad_stateid;
3373         }
3374 out:
3375         nfs4_unlock_state();
3376         return ret;
3377 }
3378
3379 static inline int
3380 setlkflg (int type)
3381 {
3382         return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3383                 RD_STATE : WR_STATE;
3384 }
3385
3386 static __be32 nfs4_nospecial_stateid_checks(stateid_t *stateid)
3387 {
3388         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3389                 return nfserr_bad_stateid;
3390         if (STALE_STATEID(stateid))
3391                 return nfserr_stale_stateid;
3392         return nfs_ok;
3393 }
3394
3395 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3396 {
3397         struct svc_fh *current_fh = &cstate->current_fh;
3398         struct nfs4_stateowner *sop = stp->st_stateowner;
3399         __be32 status;
3400
3401         status = nfsd4_check_seqid(cstate, sop, seqid);
3402         if (status)
3403                 return status;
3404         if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3405                 /*
3406                  * "Closed" stateid's exist *only* to return
3407                  * nfserr_replay_me from the previous step.
3408                  */
3409                 return nfserr_bad_stateid;
3410         status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3411         if (status)
3412                 return status;
3413         return nfs4_check_fh(current_fh, stp);
3414 }
3415
3416 /* 
3417  * Checks for sequence id mutating operations. 
3418  */
3419 static __be32
3420 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3421                          stateid_t *stateid, char typemask,
3422                          struct nfs4_ol_stateid **stpp)
3423 {
3424         __be32 status;
3425
3426         dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3427                 seqid, STATEID_VAL(stateid));
3428
3429         *stpp = NULL;
3430         status = nfs4_nospecial_stateid_checks(stateid);
3431         if (status)
3432                 return status;
3433         *stpp = find_ol_stateid_by_type(stateid, typemask);
3434         if (*stpp == NULL)
3435                 return nfserr_expired;
3436         cstate->replay_owner = (*stpp)->st_stateowner;
3437         renew_client((*stpp)->st_stateowner->so_client);
3438
3439         return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3440 }
3441
3442 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
3443 {
3444         __be32 status;
3445         struct nfs4_openowner *oo;
3446
3447         status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3448                                                 NFS4_OPEN_STID, stpp);
3449         if (status)
3450                 return status;
3451         oo = openowner((*stpp)->st_stateowner);
3452         if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3453                 return nfserr_bad_stateid;
3454         return nfs_ok;
3455 }
3456
3457 __be32
3458 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3459                    struct nfsd4_open_confirm *oc)
3460 {
3461         __be32 status;
3462         struct nfs4_openowner *oo;
3463         struct nfs4_ol_stateid *stp;
3464
3465         dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3466                         (int)cstate->current_fh.fh_dentry->d_name.len,
3467                         cstate->current_fh.fh_dentry->d_name.name);
3468
3469         status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3470         if (status)
3471                 return status;
3472
3473         nfs4_lock_state();
3474
3475         status = nfs4_preprocess_seqid_op(cstate,
3476                                         oc->oc_seqid, &oc->oc_req_stateid,
3477                                         NFS4_OPEN_STID, &stp);
3478         if (status)
3479                 goto out;
3480         oo = openowner(stp->st_stateowner);
3481         status = nfserr_bad_stateid;
3482         if (oo->oo_flags & NFS4_OO_CONFIRMED)
3483                 goto out;
3484         oo->oo_flags |= NFS4_OO_CONFIRMED;
3485         update_stateid(&stp->st_stid.sc_stateid);
3486         memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3487         dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3488                 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3489
3490         nfsd4_create_clid_dir(oo->oo_owner.so_client);
3491         status = nfs_ok;
3492 out:
3493         if (!cstate->replay_owner)
3494                 nfs4_unlock_state();
3495         return status;
3496 }
3497
3498 static inline void nfs4_file_downgrade(struct nfs4_ol_stateid *stp, unsigned int to_access)
3499 {
3500         int i;
3501
3502         for (i = 1; i < 4; i++) {
3503                 if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
3504                         nfs4_file_put_access(stp->st_file, i);
3505                         __clear_bit(i, &stp->st_access_bmap);
3506                 }
3507         }
3508 }
3509
3510 static void
3511 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
3512 {
3513         int i;
3514         for (i = 0; i < 4; i++) {
3515                 if ((i & deny) != i)
3516                         __clear_bit(i, bmap);
3517         }
3518 }
3519
3520 __be32
3521 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3522                      struct nfsd4_compound_state *cstate,
3523                      struct nfsd4_open_downgrade *od)
3524 {
3525         __be32 status;
3526         struct nfs4_ol_stateid *stp;
3527
3528         dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
3529                         (int)cstate->current_fh.fh_dentry->d_name.len,
3530                         cstate->current_fh.fh_dentry->d_name.name);
3531
3532         if (!access_valid(od->od_share_access, cstate->minorversion)
3533                         || !deny_valid(od->od_share_deny))
3534                 return nfserr_inval;
3535
3536         nfs4_lock_state();
3537         status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3538                                         &od->od_stateid, &stp);
3539         if (status)
3540                 goto out; 
3541         status = nfserr_inval;
3542         if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
3543                 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
3544                         stp->st_access_bmap, od->od_share_access);
3545                 goto out;
3546         }
3547         if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
3548                 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3549                         stp->st_deny_bmap, od->od_share_deny);
3550                 goto out;
3551         }
3552         nfs4_file_downgrade(stp, od->od_share_access);
3553
3554         reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
3555
3556         update_stateid(&stp->st_stid.sc_stateid);
3557         memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3558         status = nfs_ok;
3559 out:
3560         if (!cstate->replay_owner)
3561                 nfs4_unlock_state();
3562         return status;
3563 }
3564
3565 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3566 {
3567         struct nfs4_openowner *oo;
3568         struct nfs4_ol_stateid *s;
3569
3570         if (!so->so_is_open_owner)
3571                 return;
3572         oo = openowner(so);
3573         s = oo->oo_last_closed_stid;
3574         if (!s)
3575                 return;
3576         if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3577                 /* Release the last_closed_stid on the next seqid bump: */
3578                 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3579                 return;
3580         }
3581         oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3582         release_last_closed_stateid(oo);
3583 }
3584
3585 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3586 {
3587         unhash_open_stateid(s);
3588         s->st_stid.sc_type = NFS4_CLOSED_STID;
3589 }
3590
3591 /*
3592  * nfs4_unlock_state() called after encode
3593  */
3594 __be32
3595 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3596             struct nfsd4_close *close)
3597 {
3598         __be32 status;
3599         struct nfs4_openowner *oo;
3600         struct nfs4_ol_stateid *stp;
3601
3602         dprintk("NFSD: nfsd4_close on file %.*s\n", 
3603                         (int)cstate->current_fh.fh_dentry->d_name.len,
3604                         cstate->current_fh.fh_dentry->d_name.name);
3605
3606         nfs4_lock_state();
3607         status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3608                                         &close->cl_stateid,
3609                                         NFS4_OPEN_STID|NFS4_CLOSED_STID,
3610                                         &stp);
3611         if (status)
3612                 goto out; 
3613         oo = openowner(stp->st_stateowner);
3614         status = nfs_ok;
3615         update_stateid(&stp->st_stid.sc_stateid);
3616         memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3617
3618         nfsd4_close_open_stateid(stp);
3619         oo->oo_last_closed_stid = stp;
3620
3621         /* place unused nfs4_stateowners on so_close_lru list to be
3622          * released by the laundromat service after the lease period
3623          * to enable us to handle CLOSE replay
3624          */
3625         if (list_empty(&oo->oo_owner.so_stateids))
3626                 move_to_close_lru(oo);
3627 out:
3628         if (!cstate->replay_owner)
3629                 nfs4_unlock_state();
3630         return status;
3631 }
3632
3633 __be32
3634 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3635                   struct nfsd4_delegreturn *dr)
3636 {
3637         struct nfs4_delegation *dp;
3638         stateid_t *stateid = &dr->dr_stateid;
3639         struct inode *inode;
3640         __be32 status;
3641
3642         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3643                 return status;
3644         inode = cstate->current_fh.fh_dentry->d_inode;
3645
3646         nfs4_lock_state();
3647         status = nfserr_bad_stateid;
3648         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3649                 goto out;
3650         status = nfserr_stale_stateid;
3651         if (STALE_STATEID(stateid))
3652                 goto out;
3653         status = nfserr_expired;
3654         dp = find_deleg_stateid(stateid);
3655         if (!dp)
3656                 goto out;
3657         status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3658         if (status)
3659                 goto out;
3660         renew_client(dp->dl_client);
3661
3662         unhash_delegation(dp);
3663 out:
3664         nfs4_unlock_state();
3665
3666         return status;
3667 }
3668
3669
3670 /* 
3671  * Lock owner state (byte-range locks)
3672  */
3673 #define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
3674 #define LOCK_HASH_BITS              8
3675 #define LOCK_HASH_SIZE             (1 << LOCK_HASH_BITS)
3676 #define LOCK_HASH_MASK             (LOCK_HASH_SIZE - 1)
3677
3678 static inline u64
3679 end_offset(u64 start, u64 len)
3680 {
3681         u64 end;
3682
3683         end = start + len;
3684         return end >= start ? end: NFS4_MAX_UINT64;
3685 }
3686
3687 /* last octet in a range */
3688 static inline u64
3689 last_byte_offset(u64 start, u64 len)
3690 {
3691         u64 end;
3692
3693         BUG_ON(!len);
3694         end = start + len;
3695         return end > start ? end - 1: NFS4_MAX_UINT64;
3696 }
3697
3698 static inline unsigned int
3699 lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
3700                 struct xdr_netobj *ownername)
3701 {
3702         return (file_hashval(inode) + cl_id
3703                         + opaque_hashval(ownername->data, ownername->len))
3704                 & LOCK_HASH_MASK;
3705 }
3706
3707 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
3708
3709 /*
3710  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3711  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3712  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
3713  * locking, this prevents us from being completely protocol-compliant.  The
3714  * real solution to this problem is to start using unsigned file offsets in
3715  * the VFS, but this is a very deep change!
3716  */
3717 static inline void
3718 nfs4_transform_lock_offset(struct file_lock *lock)
3719 {
3720         if (lock->fl_start < 0)
3721                 lock->fl_start = OFFSET_MAX;
3722         if (lock->fl_end < 0)
3723                 lock->fl_end = OFFSET_MAX;
3724 }
3725
3726 /* Hack!: For now, we're defining this just so we can use a pointer to it
3727  * as a unique cookie to identify our (NFSv4's) posix locks. */
3728 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
3729 };
3730
3731 static inline void
3732 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3733 {
3734         struct nfs4_lockowner *lo;
3735
3736         if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3737                 lo = (struct nfs4_lockowner *) fl->fl_owner;
3738                 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3739                                         lo->lo_owner.so_owner.len, GFP_KERNEL);
3740                 if (!deny->ld_owner.data)
3741                         /* We just don't care that much */
3742                         goto nevermind;
3743                 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3744                 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3745         } else {
3746 nevermind:
3747                 deny->ld_owner.len = 0;
3748                 deny->ld_owner.data = NULL;
3749                 deny->ld_clientid.cl_boot = 0;
3750                 deny->ld_clientid.cl_id = 0;
3751         }
3752         deny->ld_start = fl->fl_start;
3753         deny->ld_length = NFS4_MAX_UINT64;
3754         if (fl->fl_end != NFS4_MAX_UINT64)
3755                 deny->ld_length = fl->fl_end - fl->fl_start + 1;        
3756         deny->ld_type = NFS4_READ_LT;
3757         if (fl->fl_type != F_RDLCK)
3758                 deny->ld_type = NFS4_WRITE_LT;
3759 }
3760
3761 static struct nfs4_lockowner *
3762 find_lockowner_str(struct inode *inode, clientid_t *clid,
3763                 struct xdr_netobj *owner)
3764 {
3765         unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
3766         struct nfs4_stateowner *op;
3767
3768         list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
3769                 if (same_owner_str(op, owner, clid))
3770                         return lockowner(op);
3771         }
3772         return NULL;
3773 }
3774
3775 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3776 {
3777         list_add(&lo->lo_owner.so_strhash, &lock_ownerstr_hashtbl[strhashval]);
3778         list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
3779 }
3780
3781 /*
3782  * Alloc a lock owner structure.
3783  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
3784  * occurred. 
3785  *
3786  * strhashval = lock_ownerstr_hashval 
3787  */
3788
3789 static struct nfs4_lockowner *
3790 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3791         struct nfs4_lockowner *lo;
3792
3793         lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3794         if (!lo)
3795                 return NULL;
3796         INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
3797         lo->lo_owner.so_is_open_owner = 0;
3798         /* It is the openowner seqid that will be incremented in encode in the
3799          * case of new lockowners; so increment the lock seqid manually: */
3800         lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
3801         hash_lockowner(lo, strhashval, clp, open_stp);
3802         return lo;
3803 }
3804
3805 static struct nfs4_ol_stateid *
3806 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
3807 {
3808         struct nfs4_ol_stateid *stp;
3809         struct nfs4_client *clp = lo->lo_owner.so_client;
3810
3811         stp = nfs4_alloc_stateid();
3812         if (stp == NULL)
3813                 goto out;
3814         list_add(&stp->st_perfile, &fp->fi_stateids);
3815         list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3816         stp->st_stateowner = &lo->lo_owner;
3817         stp->st_stid.sc_type = NFS4_LOCK_STID;
3818         get_nfs4_file(fp);
3819         stp->st_file = fp;
3820         stp->st_stid.sc_stateid.si_opaque.so_clid = clp->cl_clientid;
3821         stp->st_stid.sc_stateid.si_opaque.so_id = current_stateid++;
3822         /* note will be incremented before first return to client: */
3823         stp->st_stid.sc_stateid.si_generation = 0;
3824         hash_stid(&stp->st_stid);
3825         stp->st_access_bmap = 0;
3826         stp->st_deny_bmap = open_stp->st_deny_bmap;
3827         stp->st_openstp = open_stp;
3828
3829 out:
3830         return stp;
3831 }
3832
3833 static int
3834 check_lock_length(u64 offset, u64 length)
3835 {
3836         return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
3837              LOFF_OVERFLOW(offset, length)));
3838 }
3839
3840 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
3841 {
3842         struct nfs4_file *fp = lock_stp->st_file;
3843         int oflag = nfs4_access_to_omode(access);
3844
3845         if (test_bit(access, &lock_stp->st_access_bmap))
3846                 return;
3847         nfs4_file_get_access(fp, oflag);
3848         __set_bit(access, &lock_stp->st_access_bmap);
3849 }
3850
3851 /*
3852  *  LOCK operation 
3853  */
3854 __be32
3855 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3856            struct nfsd4_lock *lock)
3857 {
3858         struct nfs4_openowner *open_sop = NULL;
3859         struct nfs4_lockowner *lock_sop = NULL;
3860         struct nfs4_ol_stateid *lock_stp;
3861         struct nfs4_file *fp;
3862         struct file *filp = NULL;
3863         struct file_lock file_lock;
3864         struct file_lock conflock;
3865         __be32 status = 0;
3866         unsigned int strhashval;
3867         int lkflg;
3868         int err;
3869
3870         dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
3871                 (long long) lock->lk_offset,
3872                 (long long) lock->lk_length);
3873
3874         if (check_lock_length(lock->lk_offset, lock->lk_length))
3875                  return nfserr_inval;
3876
3877         if ((status = fh_verify(rqstp, &cstate->current_fh,
3878                                 S_IFREG, NFSD_MAY_LOCK))) {
3879                 dprintk("NFSD: nfsd4_lock: permission denied!\n");
3880                 return status;
3881         }
3882
3883         nfs4_lock_state();
3884
3885         if (lock->lk_is_new) {
3886                 /*
3887                  * Client indicates that this is a new lockowner.
3888                  * Use open owner and open stateid to create lock owner and
3889                  * lock stateid.
3890                  */
3891                 struct nfs4_ol_stateid *open_stp = NULL;
3892                 
3893                 status = nfserr_stale_clientid;
3894                 if (!nfsd4_has_session(cstate) &&
3895                     STALE_CLIENTID(&lock->lk_new_clientid))
3896                         goto out;
3897
3898                 /* validate and update open stateid and open seqid */
3899                 status = nfs4_preprocess_confirmed_seqid_op(cstate,
3900                                         lock->lk_new_open_seqid,
3901                                         &lock->lk_new_open_stateid,
3902                                         &open_stp);
3903                 if (status)
3904                         goto out;
3905                 open_sop = openowner(open_stp->st_stateowner);
3906                 status = nfserr_bad_stateid;
3907                 if (!nfsd4_has_session(cstate) &&
3908                         !same_clid(&open_sop->oo_owner.so_client->cl_clientid,
3909                                                 &lock->v.new.clientid))
3910                         goto out;
3911                 /* create lockowner and lock stateid */
3912                 fp = open_stp->st_file;
3913                 strhashval = lock_ownerstr_hashval(fp->fi_inode,
3914                                 open_sop->oo_owner.so_client->cl_clientid.cl_id,
3915                                 &lock->v.new.owner);
3916                 /* XXX: Do we need to check for duplicate stateowners on
3917                  * the same file, or should they just be allowed (and
3918                  * create new stateids)? */
3919                 status = nfserr_jukebox;
3920                 lock_sop = alloc_init_lock_stateowner(strhashval,
3921                                 open_sop->oo_owner.so_client, open_stp, lock);
3922                 if (lock_sop == NULL)
3923                         goto out;
3924                 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
3925                 if (lock_stp == NULL)
3926                         goto out;
3927         } else {
3928                 /* lock (lock owner + lock stateid) already exists */
3929                 status = nfs4_preprocess_seqid_op(cstate,
3930                                        lock->lk_old_lock_seqid,
3931                                        &lock->lk_old_lock_stateid,
3932                                        NFS4_LOCK_STID, &lock_stp);
3933                 if (status)
3934                         goto out;
3935                 lock_sop = lockowner(lock_stp->st_stateowner);
3936                 fp = lock_stp->st_file;
3937         }
3938         /* lock_sop and lock_stp have been created or found */
3939
3940         lkflg = setlkflg(lock->lk_type);
3941         status = nfs4_check_openmode(lock_stp, lkflg);
3942         if (status)
3943                 goto out;
3944
3945         status = nfserr_grace;
3946         if (locks_in_grace() && !lock->lk_reclaim)
3947                 goto out;
3948         status = nfserr_no_grace;
3949         if (!locks_in_grace() && lock->lk_reclaim)
3950                 goto out;
3951
3952         locks_init_lock(&file_lock);
3953         switch (lock->lk_type) {
3954                 case NFS4_READ_LT:
3955                 case NFS4_READW_LT:
3956                         filp = find_readable_file(lock_stp->st_file);
3957                         if (filp)
3958                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
3959                         file_lock.fl_type = F_RDLCK;
3960                         break;
3961                 case NFS4_WRITE_LT:
3962                 case NFS4_WRITEW_LT:
3963                         filp = find_writeable_file(lock_stp->st_file);
3964                         if (filp)
3965                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
3966                         file_lock.fl_type = F_WRLCK;
3967                         break;
3968                 default:
3969                         status = nfserr_inval;
3970                 goto out;
3971         }
3972         if (!filp) {
3973                 status = nfserr_openmode;
3974                 goto out;
3975         }
3976         file_lock.fl_owner = (fl_owner_t)lock_sop;
3977         file_lock.fl_pid = current->tgid;
3978         file_lock.fl_file = filp;
3979         file_lock.fl_flags = FL_POSIX;
3980         file_lock.fl_lmops = &nfsd_posix_mng_ops;
3981
3982         file_lock.fl_start = lock->lk_offset;
3983         file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
3984         nfs4_transform_lock_offset(&file_lock);
3985
3986         /*
3987         * Try to lock the file in the VFS.
3988         * Note: locks.c uses the BKL to protect the inode's lock list.
3989         */
3990
3991         err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
3992         switch (-err) {
3993         case 0: /* success! */
3994                 update_stateid(&lock_stp->st_stid.sc_stateid);
3995                 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 
3996                                 sizeof(stateid_t));
3997                 status = 0;
3998                 break;
3999         case (EAGAIN):          /* conflock holds conflicting lock */
4000                 status = nfserr_denied;
4001                 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4002                 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
4003                 break;
4004         case (EDEADLK):
4005                 status = nfserr_deadlock;
4006                 break;
4007         default:
4008                 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4009                 status = nfserrno(err);
4010                 break;
4011         }
4012 out:
4013         if (status && lock->lk_is_new && lock_sop)
4014                 release_lockowner(lock_sop);
4015         if (!cstate->replay_owner)
4016                 nfs4_unlock_state();
4017         return status;
4018 }
4019
4020 /*
4021  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4022  * so we do a temporary open here just to get an open file to pass to
4023  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
4024  * inode operation.)
4025  */
4026 static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4027 {
4028         struct file *file;
4029         int err;
4030
4031         err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4032         if (err)
4033                 return err;
4034         err = vfs_test_lock(file, lock);
4035         nfsd_close(file);
4036         return err;
4037 }
4038
4039 /*
4040  * LOCKT operation
4041  */
4042 __be32
4043 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4044             struct nfsd4_lockt *lockt)
4045 {
4046         struct inode *inode;
4047         struct file_lock file_lock;
4048         struct nfs4_lockowner *lo;
4049         int error;
4050         __be32 status;
4051
4052         if (locks_in_grace())
4053                 return nfserr_grace;
4054
4055         if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4056                  return nfserr_inval;
4057
4058         nfs4_lock_state();
4059
4060         status = nfserr_stale_clientid;
4061         if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
4062                 goto out;
4063
4064         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4065                 goto out;
4066
4067         inode = cstate->current_fh.fh_dentry->d_inode;
4068         locks_init_lock(&file_lock);
4069         switch (lockt->lt_type) {
4070                 case NFS4_READ_LT:
4071                 case NFS4_READW_LT:
4072                         file_lock.fl_type = F_RDLCK;
4073                 break;
4074                 case NFS4_WRITE_LT:
4075                 case NFS4_WRITEW_LT:
4076                         file_lock.fl_type = F_WRLCK;
4077                 break;
4078                 default:
4079                         dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4080                         status = nfserr_inval;
4081                 goto out;
4082         }
4083
4084         lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4085         if (lo)
4086                 file_lock.fl_owner = (fl_owner_t)lo;
4087         file_lock.fl_pid = current->tgid;
4088         file_lock.fl_flags = FL_POSIX;
4089
4090         file_lock.fl_start = lockt->lt_offset;
4091         file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4092
4093         nfs4_transform_lock_offset(&file_lock);
4094
4095         status = nfs_ok;
4096         error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4097         if (error) {
4098                 status = nfserrno(error);
4099                 goto out;
4100         }
4101         if (file_lock.fl_type != F_UNLCK) {
4102                 status = nfserr_denied;
4103                 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4104         }
4105 out:
4106         nfs4_unlock_state();
4107         return status;
4108 }
4109
4110 __be32
4111 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4112             struct nfsd4_locku *locku)
4113 {
4114         struct nfs4_ol_stateid *stp;
4115         struct file *filp = NULL;
4116         struct file_lock file_lock;
4117         __be32 status;
4118         int err;
4119                                                         
4120         dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4121                 (long long) locku->lu_offset,
4122                 (long long) locku->lu_length);
4123
4124         if (check_lock_length(locku->lu_offset, locku->lu_length))
4125                  return nfserr_inval;
4126
4127         nfs4_lock_state();
4128                                                                                 
4129         status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4130                                         &locku->lu_stateid, NFS4_LOCK_STID, &stp);
4131         if (status)
4132                 goto out;
4133         filp = find_any_file(stp->st_file);
4134         if (!filp) {
4135                 status = nfserr_lock_range;
4136                 goto out;
4137         }
4138         BUG_ON(!filp);
4139         locks_init_lock(&file_lock);
4140         file_lock.fl_type = F_UNLCK;
4141         file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4142         file_lock.fl_pid = current->tgid;
4143         file_lock.fl_file = filp;
4144         file_lock.fl_flags = FL_POSIX; 
4145         file_lock.fl_lmops = &nfsd_posix_mng_ops;
4146         file_lock.fl_start = locku->lu_offset;
4147
4148         file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
4149         nfs4_transform_lock_offset(&file_lock);
4150
4151         /*
4152         *  Try to unlock the file in the VFS.
4153         */
4154         err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
4155         if (err) {
4156                 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4157                 goto out_nfserr;
4158         }
4159         /*
4160         * OK, unlock succeeded; the only thing left to do is update the stateid.
4161         */
4162         update_stateid(&stp->st_stid.sc_stateid);
4163         memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4164
4165 out:
4166         nfs4_unlock_state();
4167         return status;
4168
4169 out_nfserr:
4170         status = nfserrno(err);
4171         goto out;
4172 }
4173
4174 /*
4175  * returns
4176  *      1: locks held by lockowner
4177  *      0: no locks held by lockowner
4178  */
4179 static int
4180 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4181 {
4182         struct file_lock **flpp;
4183         struct inode *inode = filp->fi_inode;
4184         int status = 0;
4185
4186         lock_flocks();
4187         for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4188                 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4189                         status = 1;
4190                         goto out;
4191                 }
4192         }
4193 out:
4194         unlock_flocks();
4195         return status;
4196 }
4197
4198 __be32
4199 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4200                         struct nfsd4_compound_state *cstate,
4201                         struct nfsd4_release_lockowner *rlockowner)
4202 {
4203         clientid_t *clid = &rlockowner->rl_clientid;
4204         struct nfs4_stateowner *sop;
4205         struct nfs4_lockowner *lo;
4206         struct nfs4_ol_stateid *stp;
4207         struct xdr_netobj *owner = &rlockowner->rl_owner;
4208         struct list_head matches;
4209         int i;
4210         __be32 status;
4211
4212         dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4213                 clid->cl_boot, clid->cl_id);
4214
4215         /* XXX check for lease expiration */
4216
4217         status = nfserr_stale_clientid;
4218         if (STALE_CLIENTID(clid))
4219                 return status;
4220
4221         nfs4_lock_state();
4222
4223         status = nfserr_locks_held;
4224         /* XXX: we're doing a linear search through all the lockowners.
4225          * Yipes!  For now we'll just hope clients aren't really using
4226          * release_lockowner much, but eventually we have to fix these
4227          * data structures. */
4228         INIT_LIST_HEAD(&matches);
4229         for (i = 0; i < LOCK_HASH_SIZE; i++) {
4230                 list_for_each_entry(sop, &lock_ownerstr_hashtbl[i], so_strhash) {
4231                         if (!same_owner_str(sop, owner, clid))
4232                                 continue;
4233                         list_for_each_entry(stp, &sop->so_stateids,
4234                                         st_perstateowner) {
4235                                 lo = lockowner(sop);
4236                                 if (check_for_locks(stp->st_file, lo))
4237                                         goto out;
4238                                 list_add(&lo->lo_list, &matches);
4239                         }
4240                 }
4241         }
4242         /* Clients probably won't expect us to return with some (but not all)
4243          * of the lockowner state released; so don't release any until all
4244          * have been checked. */
4245         status = nfs_ok;
4246         while (!list_empty(&matches)) {
4247                 lo = list_entry(matches.next, struct nfs4_lockowner,
4248                                                                 lo_list);
4249                 /* unhash_stateowner deletes so_perclient only
4250                  * for openowners. */
4251                 list_del(&lo->lo_list);
4252                 release_lockowner(lo);
4253         }
4254 out:
4255         nfs4_unlock_state();
4256         return status;
4257 }
4258
4259 static inline struct nfs4_client_reclaim *
4260 alloc_reclaim(void)
4261 {
4262         return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4263 }
4264
4265 int
4266 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4267 {
4268         unsigned int strhashval = clientstr_hashval(name);
4269         struct nfs4_client *clp;
4270
4271         clp = find_confirmed_client_by_str(name, strhashval);
4272         return clp ? 1 : 0;
4273 }
4274
4275 /*
4276  * failure => all reset bets are off, nfserr_no_grace...
4277  */
4278 int
4279 nfs4_client_to_reclaim(const char *name)
4280 {
4281         unsigned int strhashval;
4282         struct nfs4_client_reclaim *crp = NULL;
4283
4284         dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4285         crp = alloc_reclaim();
4286         if (!crp)
4287                 return 0;
4288         strhashval = clientstr_hashval(name);
4289         INIT_LIST_HEAD(&crp->cr_strhash);
4290         list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
4291         memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4292         reclaim_str_hashtbl_size++;
4293         return 1;
4294 }
4295
4296 static void
4297 nfs4_release_reclaim(void)
4298 {
4299         struct nfs4_client_reclaim *crp = NULL;
4300         int i;
4301
4302         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4303                 while (!list_empty(&reclaim_str_hashtbl[i])) {
4304                         crp = list_entry(reclaim_str_hashtbl[i].next,
4305                                         struct nfs4_client_reclaim, cr_strhash);
4306                         list_del(&crp->cr_strhash);
4307                         kfree(crp);
4308                         reclaim_str_hashtbl_size--;
4309                 }
4310         }
4311         BUG_ON(reclaim_str_hashtbl_size);
4312 }
4313
4314 /*
4315  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4316 static struct nfs4_client_reclaim *
4317 nfs4_find_reclaim_client(clientid_t *clid)
4318 {
4319         unsigned int strhashval;
4320         struct nfs4_client *clp;
4321         struct nfs4_client_reclaim *crp = NULL;
4322
4323
4324         /* find clientid in conf_id_hashtbl */
4325         clp = find_confirmed_client(clid);
4326         if (clp == NULL)
4327                 return NULL;
4328
4329         dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4330                             clp->cl_name.len, clp->cl_name.data,
4331                             clp->cl_recdir);
4332
4333         /* find clp->cl_name in reclaim_str_hashtbl */
4334         strhashval = clientstr_hashval(clp->cl_recdir);
4335         list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
4336                 if (same_name(crp->cr_recdir, clp->cl_recdir)) {
4337                         return crp;
4338                 }
4339         }
4340         return NULL;
4341 }
4342
4343 /*
4344 * Called from OPEN. Look for clientid in reclaim list.
4345 */
4346 __be32
4347 nfs4_check_open_reclaim(clientid_t *clid)
4348 {
4349         return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad;
4350 }
4351
4352 /* initialization to perform at module load time: */
4353
4354 int
4355 nfs4_state_init(void)
4356 {
4357         int i, status;
4358
4359         status = nfsd4_init_slabs();
4360         if (status)
4361                 return status;
4362         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4363                 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
4364                 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
4365                 INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
4366                 INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
4367                 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
4368         }
4369         for (i = 0; i < SESSION_HASH_SIZE; i++)
4370                 INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4371         for (i = 0; i < FILE_HASH_SIZE; i++) {
4372                 INIT_LIST_HEAD(&file_hashtbl[i]);
4373         }
4374         for (i = 0; i < OPEN_OWNER_HASH_SIZE; i++) {
4375                 INIT_LIST_HEAD(&open_ownerstr_hashtbl[i]);
4376         }
4377         for (i = 0; i < STATEID_HASH_SIZE; i++)
4378                 INIT_LIST_HEAD(&stateid_hashtbl[i]);
4379         for (i = 0; i < LOCK_HASH_SIZE; i++) {
4380                 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
4381         }
4382         memset(&onestateid, ~0, sizeof(stateid_t));
4383         INIT_LIST_HEAD(&close_lru);
4384         INIT_LIST_HEAD(&client_lru);
4385         INIT_LIST_HEAD(&del_recall_lru);
4386         reclaim_str_hashtbl_size = 0;
4387         return 0;
4388 }
4389
4390 static void
4391 nfsd4_load_reboot_recovery_data(void)
4392 {
4393         int status;
4394
4395         nfs4_lock_state();
4396         nfsd4_init_recdir();
4397         status = nfsd4_recdir_load();
4398         nfs4_unlock_state();
4399         if (status)
4400                 printk("NFSD: Failure reading reboot recovery data\n");
4401 }
4402
4403 /*
4404  * Since the lifetime of a delegation isn't limited to that of an open, a
4405  * client may quite reasonably hang on to a delegation as long as it has
4406  * the inode cached.  This becomes an obvious problem the first time a
4407  * client's inode cache approaches the size of the server's total memory.
4408  *
4409  * For now we avoid this problem by imposing a hard limit on the number
4410  * of delegations, which varies according to the server's memory size.
4411  */
4412 static void
4413 set_max_delegations(void)
4414 {
4415         /*
4416          * Allow at most 4 delegations per megabyte of RAM.  Quick
4417          * estimates suggest that in the worst case (where every delegation
4418          * is for a different inode), a delegation could take about 1.5K,
4419          * giving a worst case usage of about 6% of memory.
4420          */
4421         max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4422 }
4423
4424 /* initialization to perform when the nfsd service is started: */
4425
4426 static int
4427 __nfs4_state_start(void)
4428 {
4429         int ret;
4430
4431         boot_time = get_seconds();
4432         locks_start_grace(&nfsd4_manager);
4433         printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4434                nfsd4_grace);
4435         ret = set_callback_cred();
4436         if (ret)
4437                 return -ENOMEM;
4438         laundry_wq = create_singlethread_workqueue("nfsd4");
4439         if (laundry_wq == NULL)
4440                 return -ENOMEM;
4441         ret = nfsd4_create_callback_queue();
4442         if (ret)
4443                 goto out_free_laundry;
4444         queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4445         set_max_delegations();
4446         return 0;
4447 out_free_laundry:
4448         destroy_workqueue(laundry_wq);
4449         return ret;
4450 }
4451
4452 int
4453 nfs4_state_start(void)
4454 {
4455         nfsd4_load_reboot_recovery_data();
4456         return __nfs4_state_start();
4457 }
4458
4459 static void
4460 __nfs4_state_shutdown(void)
4461 {
4462         int i;
4463         struct nfs4_client *clp = NULL;
4464         struct nfs4_delegation *dp = NULL;
4465         struct list_head *pos, *next, reaplist;
4466
4467         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4468                 while (!list_empty(&conf_id_hashtbl[i])) {
4469                         clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4470                         expire_client(clp);
4471                 }
4472                 while (!list_empty(&unconf_str_hashtbl[i])) {
4473                         clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
4474                         expire_client(clp);
4475                 }
4476         }
4477         INIT_LIST_HEAD(&reaplist);
4478         spin_lock(&recall_lock);
4479         list_for_each_safe(pos, next, &del_recall_lru) {
4480                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4481                 list_move(&dp->dl_recall_lru, &reaplist);
4482         }
4483         spin_unlock(&recall_lock);
4484         list_for_each_safe(pos, next, &reaplist) {
4485                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4486                 list_del_init(&dp->dl_recall_lru);
4487                 unhash_delegation(dp);
4488         }
4489
4490         nfsd4_shutdown_recdir();
4491 }
4492
4493 void
4494 nfs4_state_shutdown(void)
4495 {
4496         cancel_delayed_work_sync(&laundromat_work);
4497         destroy_workqueue(laundry_wq);
4498         locks_end_grace(&nfsd4_manager);
4499         nfs4_lock_state();
4500         nfs4_release_reclaim();
4501         __nfs4_state_shutdown();
4502         nfs4_unlock_state();
4503         nfsd4_destroy_callback_queue();
4504 }