2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/clnt.h>
46 #include "current_stateid.h"
47 #include "fault_inject.h"
51 #define NFSDDBG_FACILITY NFSDDBG_PROC
54 time_t nfsd4_lease = 90; /* default lease time */
55 time_t nfsd4_grace = 90;
57 #define all_ones {{~0,~0},~0}
58 static const stateid_t one_stateid = {
60 .si_opaque = all_ones,
62 static const stateid_t zero_stateid = {
65 static const stateid_t currentstateid = {
69 static u64 current_sessionid = 1;
71 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
72 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
73 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
75 /* forward declarations */
76 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
80 /* Currently used for almost all code touching nfsv4 state: */
81 static DEFINE_MUTEX(client_mutex);
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
88 static DEFINE_SPINLOCK(recall_lock);
90 static struct kmem_cache *openowner_slab = NULL;
91 static struct kmem_cache *lockowner_slab = NULL;
92 static struct kmem_cache *file_slab = NULL;
93 static struct kmem_cache *stateid_slab = NULL;
94 static struct kmem_cache *deleg_slab = NULL;
99 mutex_lock(&client_mutex);
102 static void free_session(struct kref *);
104 /* Must be called under the client_lock */
105 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
107 kref_put(&ses->se_ref, free_session);
110 static void nfsd4_get_session(struct nfsd4_session *ses)
112 kref_get(&ses->se_ref);
116 nfs4_unlock_state(void)
118 mutex_unlock(&client_mutex);
122 opaque_hashval(const void *ptr, int nbytes)
124 unsigned char *cptr = (unsigned char *) ptr;
134 static struct list_head del_recall_lru;
136 static void nfsd4_free_file(struct nfs4_file *f)
138 kmem_cache_free(file_slab, f);
142 put_nfs4_file(struct nfs4_file *fi)
144 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
145 list_del(&fi->fi_hash);
146 spin_unlock(&recall_lock);
153 get_nfs4_file(struct nfs4_file *fi)
155 atomic_inc(&fi->fi_ref);
158 static int num_delegations;
159 unsigned int max_delegations;
162 * Open owner state (share locks)
165 /* hash tables for lock and open owners */
166 #define OWNER_HASH_BITS 8
167 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
168 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
170 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
174 ret = opaque_hashval(ownername->data, ownername->len);
176 return ret & OWNER_HASH_MASK;
179 /* hash table for nfs4_file */
180 #define FILE_HASH_BITS 8
181 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
183 static unsigned int file_hashval(struct inode *ino)
185 /* XXX: why are we hashing on inode pointer, anyway? */
186 return hash_ptr(ino, FILE_HASH_BITS);
189 static struct list_head file_hashtbl[FILE_HASH_SIZE];
191 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
193 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
194 atomic_inc(&fp->fi_access[oflag]);
197 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
199 if (oflag == O_RDWR) {
200 __nfs4_file_get_access(fp, O_RDONLY);
201 __nfs4_file_get_access(fp, O_WRONLY);
203 __nfs4_file_get_access(fp, oflag);
206 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
208 if (fp->fi_fds[oflag]) {
209 fput(fp->fi_fds[oflag]);
210 fp->fi_fds[oflag] = NULL;
214 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
216 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
217 nfs4_file_put_fd(fp, oflag);
219 * It's also safe to get rid of the RDWR open *if*
220 * we no longer have need of the other kind of access
221 * or if we already have the other kind of open:
223 if (fp->fi_fds[1-oflag]
224 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
225 nfs4_file_put_fd(fp, O_RDWR);
229 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
231 if (oflag == O_RDWR) {
232 __nfs4_file_put_access(fp, O_RDONLY);
233 __nfs4_file_put_access(fp, O_WRONLY);
235 __nfs4_file_put_access(fp, oflag);
238 static inline int get_new_stid(struct nfs4_stid *stid)
240 static int min_stateid = 0;
241 struct idr *stateids = &stid->sc_client->cl_stateids;
245 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
247 * Note: the necessary preallocation was done in
248 * nfs4_alloc_stateid(). The idr code caps the number of
249 * preallocations that can exist at a time, but the state lock
250 * prevents anyone from using ours before we get here:
254 * It shouldn't be a problem to reuse an opaque stateid value.
255 * I don't think it is for 4.1. But with 4.0 I worry that, for
256 * example, a stray write retransmission could be accepted by
257 * the server when it should have been rejected. Therefore,
258 * adopt a trick from the sctp code to attempt to maximize the
259 * amount of time until an id is reused, by ensuring they always
260 * "increase" (mod INT_MAX):
263 min_stateid = new_stid+1;
264 if (min_stateid == INT_MAX)
269 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
271 stateid_t *s = &stid->sc_stateid;
274 stid->sc_type = type;
275 stid->sc_client = cl;
276 s->si_opaque.so_clid = cl->cl_clientid;
277 new_id = get_new_stid(stid);
278 s->si_opaque.so_id = (u32)new_id;
279 /* Will be incremented before return to client: */
280 s->si_generation = 0;
283 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
285 struct idr *stateids = &cl->cl_stateids;
287 if (!idr_pre_get(stateids, GFP_KERNEL))
290 * Note: if we fail here (or any time between now and the time
291 * we actually get the new idr), we won't need to undo the idr
292 * preallocation, since the idr code caps the number of
293 * preallocated entries.
295 return kmem_cache_alloc(slab, GFP_KERNEL);
298 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
300 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
303 static struct nfs4_delegation *
304 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
306 struct nfs4_delegation *dp;
307 struct nfs4_file *fp = stp->st_file;
309 dprintk("NFSD alloc_init_deleg\n");
311 * Major work on the lease subsystem (for example, to support
312 * calbacks on stat) will be required before we can support
313 * write delegations properly.
315 if (type != NFS4_OPEN_DELEGATE_READ)
317 if (fp->fi_had_conflict)
319 if (num_delegations > max_delegations)
321 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
324 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
326 * delegation seqid's are never incremented. The 4.1 special
327 * meaning of seqid 0 isn't meaningful, really, but let's avoid
328 * 0 anyway just for consistency and use 1:
330 dp->dl_stid.sc_stateid.si_generation = 1;
332 INIT_LIST_HEAD(&dp->dl_perfile);
333 INIT_LIST_HEAD(&dp->dl_perclnt);
334 INIT_LIST_HEAD(&dp->dl_recall_lru);
338 fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle);
340 atomic_set(&dp->dl_count, 1);
341 nfsd4_init_callback(&dp->dl_recall);
346 nfs4_put_delegation(struct nfs4_delegation *dp)
348 if (atomic_dec_and_test(&dp->dl_count)) {
349 dprintk("NFSD: freeing dp %p\n",dp);
350 put_nfs4_file(dp->dl_file);
351 kmem_cache_free(deleg_slab, dp);
356 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
358 if (atomic_dec_and_test(&fp->fi_delegees)) {
359 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
361 fput(fp->fi_deleg_file);
362 fp->fi_deleg_file = NULL;
366 static void unhash_stid(struct nfs4_stid *s)
368 struct idr *stateids = &s->sc_client->cl_stateids;
370 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
373 /* Called under the state lock. */
375 unhash_delegation(struct nfs4_delegation *dp)
377 unhash_stid(&dp->dl_stid);
378 list_del_init(&dp->dl_perclnt);
379 spin_lock(&recall_lock);
380 list_del_init(&dp->dl_perfile);
381 list_del_init(&dp->dl_recall_lru);
382 spin_unlock(&recall_lock);
383 nfs4_put_deleg_lease(dp->dl_file);
384 nfs4_put_delegation(dp);
391 /* client_lock protects the client lru list and session hash table */
392 static DEFINE_SPINLOCK(client_lock);
394 static unsigned int clientid_hashval(u32 id)
396 return id & CLIENT_HASH_MASK;
399 static unsigned int clientstr_hashval(const char *name)
401 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
405 * We store the NONE, READ, WRITE, and BOTH bits separately in the
406 * st_{access,deny}_bmap field of the stateid, in order to track not
407 * only what share bits are currently in force, but also what
408 * combinations of share bits previous opens have used. This allows us
409 * to enforce the recommendation of rfc 3530 14.2.19 that the server
410 * return an error if the client attempt to downgrade to a combination
411 * of share bits not explicable by closing some of its previous opens.
413 * XXX: This enforcement is actually incomplete, since we don't keep
414 * track of access/deny bit combinations; so, e.g., we allow:
416 * OPEN allow read, deny write
417 * OPEN allow both, deny none
418 * DOWNGRADE allow read, deny none
420 * which we should reject.
423 bmap_to_share_mode(unsigned long bmap) {
425 unsigned int access = 0;
427 for (i = 1; i < 4; i++) {
428 if (test_bit(i, &bmap))
435 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
436 unsigned int access, deny;
438 access = bmap_to_share_mode(stp->st_access_bmap);
439 deny = bmap_to_share_mode(stp->st_deny_bmap);
440 if ((access & open->op_share_deny) || (deny & open->op_share_access))
445 /* set share access for a given stateid */
447 set_access(u32 access, struct nfs4_ol_stateid *stp)
449 __set_bit(access, &stp->st_access_bmap);
452 /* clear share access for a given stateid */
454 clear_access(u32 access, struct nfs4_ol_stateid *stp)
456 __clear_bit(access, &stp->st_access_bmap);
459 /* test whether a given stateid has access */
461 test_access(u32 access, struct nfs4_ol_stateid *stp)
463 return test_bit(access, &stp->st_access_bmap);
466 /* set share deny for a given stateid */
468 set_deny(u32 access, struct nfs4_ol_stateid *stp)
470 __set_bit(access, &stp->st_deny_bmap);
473 /* clear share deny for a given stateid */
475 clear_deny(u32 access, struct nfs4_ol_stateid *stp)
477 __clear_bit(access, &stp->st_deny_bmap);
480 /* test whether a given stateid is denying specific access */
482 test_deny(u32 access, struct nfs4_ol_stateid *stp)
484 return test_bit(access, &stp->st_deny_bmap);
487 static int nfs4_access_to_omode(u32 access)
489 switch (access & NFS4_SHARE_ACCESS_BOTH) {
490 case NFS4_SHARE_ACCESS_READ:
492 case NFS4_SHARE_ACCESS_WRITE:
494 case NFS4_SHARE_ACCESS_BOTH:
501 /* release all access and file references for a given stateid */
503 release_all_access(struct nfs4_ol_stateid *stp)
507 for (i = 1; i < 4; i++) {
508 if (test_access(i, stp))
509 nfs4_file_put_access(stp->st_file,
510 nfs4_access_to_omode(i));
511 clear_access(i, stp);
515 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
517 list_del(&stp->st_perfile);
518 list_del(&stp->st_perstateowner);
521 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
523 release_all_access(stp);
524 put_nfs4_file(stp->st_file);
528 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
530 kmem_cache_free(stateid_slab, stp);
533 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
537 unhash_generic_stateid(stp);
538 unhash_stid(&stp->st_stid);
539 file = find_any_file(stp->st_file);
541 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
542 close_generic_stateid(stp);
543 free_generic_stateid(stp);
546 static void unhash_lockowner(struct nfs4_lockowner *lo)
548 struct nfs4_ol_stateid *stp;
550 list_del(&lo->lo_owner.so_strhash);
551 list_del(&lo->lo_perstateid);
552 list_del(&lo->lo_owner_ino_hash);
553 while (!list_empty(&lo->lo_owner.so_stateids)) {
554 stp = list_first_entry(&lo->lo_owner.so_stateids,
555 struct nfs4_ol_stateid, st_perstateowner);
556 release_lock_stateid(stp);
560 static void release_lockowner(struct nfs4_lockowner *lo)
562 unhash_lockowner(lo);
563 nfs4_free_lockowner(lo);
567 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
569 struct nfs4_lockowner *lo;
571 while (!list_empty(&open_stp->st_lockowners)) {
572 lo = list_entry(open_stp->st_lockowners.next,
573 struct nfs4_lockowner, lo_perstateid);
574 release_lockowner(lo);
578 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
580 unhash_generic_stateid(stp);
581 release_stateid_lockowners(stp);
582 close_generic_stateid(stp);
585 static void release_open_stateid(struct nfs4_ol_stateid *stp)
587 unhash_open_stateid(stp);
588 unhash_stid(&stp->st_stid);
589 free_generic_stateid(stp);
592 static void unhash_openowner(struct nfs4_openowner *oo)
594 struct nfs4_ol_stateid *stp;
596 list_del(&oo->oo_owner.so_strhash);
597 list_del(&oo->oo_perclient);
598 while (!list_empty(&oo->oo_owner.so_stateids)) {
599 stp = list_first_entry(&oo->oo_owner.so_stateids,
600 struct nfs4_ol_stateid, st_perstateowner);
601 release_open_stateid(stp);
605 static void release_last_closed_stateid(struct nfs4_openowner *oo)
607 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
610 unhash_stid(&s->st_stid);
611 free_generic_stateid(s);
612 oo->oo_last_closed_stid = NULL;
616 static void release_openowner(struct nfs4_openowner *oo)
618 unhash_openowner(oo);
619 list_del(&oo->oo_close_lru);
620 release_last_closed_stateid(oo);
621 nfs4_free_openowner(oo);
625 hash_sessionid(struct nfs4_sessionid *sessionid)
627 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
629 return sid->sequence % SESSION_HASH_SIZE;
634 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
636 u32 *ptr = (u32 *)(&sessionid->data[0]);
637 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
641 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
648 gen_sessionid(struct nfsd4_session *ses)
650 struct nfs4_client *clp = ses->se_client;
651 struct nfsd4_sessionid *sid;
653 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
654 sid->clientid = clp->cl_clientid;
655 sid->sequence = current_sessionid++;
660 * The protocol defines ca_maxresponssize_cached to include the size of
661 * the rpc header, but all we need to cache is the data starting after
662 * the end of the initial SEQUENCE operation--the rest we regenerate
663 * each time. Therefore we can advertise a ca_maxresponssize_cached
664 * value that is the number of bytes in our cache plus a few additional
665 * bytes. In order to stay on the safe side, and not promise more than
666 * we can cache, those additional bytes must be the minimum possible: 24
667 * bytes of rpc header (xid through accept state, with AUTH_NULL
668 * verifier), 12 for the compound header (with zero-length tag), and 44
669 * for the SEQUENCE op response:
671 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
674 free_session_slots(struct nfsd4_session *ses)
678 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
679 kfree(ses->se_slots[i]);
683 * We don't actually need to cache the rpc and session headers, so we
684 * can allocate a little less for each slot:
686 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
688 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
691 static int nfsd4_sanitize_slot_size(u32 size)
693 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
694 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
700 * XXX: If we run out of reserved DRC memory we could (up to a point)
701 * re-negotiate active sessions and reduce their slot usage to make
702 * room for new connections. For now we just fail the create session.
704 static int nfsd4_get_drc_mem(int slotsize, u32 num)
708 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
710 spin_lock(&nfsd_drc_lock);
711 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
712 nfsd_drc_max_mem - nfsd_drc_mem_used);
713 num = min_t(int, num, avail / slotsize);
714 nfsd_drc_mem_used += num * slotsize;
715 spin_unlock(&nfsd_drc_lock);
720 static void nfsd4_put_drc_mem(int slotsize, int num)
722 spin_lock(&nfsd_drc_lock);
723 nfsd_drc_mem_used -= slotsize * num;
724 spin_unlock(&nfsd_drc_lock);
727 static struct nfsd4_session *__alloc_session(int slotsize, int numslots)
729 struct nfsd4_session *new;
732 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
733 + sizeof(struct nfsd4_session) > PAGE_SIZE);
734 mem = numslots * sizeof(struct nfsd4_slot *);
736 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
739 /* allocate each struct nfsd4_slot and data cache in one piece */
740 for (i = 0; i < numslots; i++) {
741 mem = sizeof(struct nfsd4_slot) + slotsize;
742 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
743 if (!new->se_slots[i])
749 kfree(new->se_slots[i]);
754 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
756 u32 maxrpc = nfsd_serv->sv_max_mesg;
758 new->maxreqs = numslots;
759 new->maxresp_cached = min_t(u32, req->maxresp_cached,
760 slotsize + NFSD_MIN_HDR_SEQ_SZ);
761 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
762 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
763 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
766 static void free_conn(struct nfsd4_conn *c)
768 svc_xprt_put(c->cn_xprt);
772 static void nfsd4_conn_lost(struct svc_xpt_user *u)
774 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
775 struct nfs4_client *clp = c->cn_session->se_client;
777 spin_lock(&clp->cl_lock);
778 if (!list_empty(&c->cn_persession)) {
779 list_del(&c->cn_persession);
782 spin_unlock(&clp->cl_lock);
783 nfsd4_probe_callback(clp);
786 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
788 struct nfsd4_conn *conn;
790 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
793 svc_xprt_get(rqstp->rq_xprt);
794 conn->cn_xprt = rqstp->rq_xprt;
795 conn->cn_flags = flags;
796 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
800 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
802 conn->cn_session = ses;
803 list_add(&conn->cn_persession, &ses->se_conns);
806 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
808 struct nfs4_client *clp = ses->se_client;
810 spin_lock(&clp->cl_lock);
811 __nfsd4_hash_conn(conn, ses);
812 spin_unlock(&clp->cl_lock);
815 static int nfsd4_register_conn(struct nfsd4_conn *conn)
817 conn->cn_xpt_user.callback = nfsd4_conn_lost;
818 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
821 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
825 nfsd4_hash_conn(conn, ses);
826 ret = nfsd4_register_conn(conn);
828 /* oops; xprt is already down: */
829 nfsd4_conn_lost(&conn->cn_xpt_user);
830 if (conn->cn_flags & NFS4_CDFC4_BACK) {
831 /* callback channel may be back up */
832 nfsd4_probe_callback(ses->se_client);
836 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
838 u32 dir = NFS4_CDFC4_FORE;
840 if (cses->flags & SESSION4_BACK_CHAN)
841 dir |= NFS4_CDFC4_BACK;
842 return alloc_conn(rqstp, dir);
845 /* must be called under client_lock */
846 static void nfsd4_del_conns(struct nfsd4_session *s)
848 struct nfs4_client *clp = s->se_client;
849 struct nfsd4_conn *c;
851 spin_lock(&clp->cl_lock);
852 while (!list_empty(&s->se_conns)) {
853 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
854 list_del_init(&c->cn_persession);
855 spin_unlock(&clp->cl_lock);
857 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
860 spin_lock(&clp->cl_lock);
862 spin_unlock(&clp->cl_lock);
865 static void __free_session(struct nfsd4_session *ses)
867 nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs);
868 free_session_slots(ses);
872 static void free_session(struct kref *kref)
874 struct nfsd4_session *ses;
876 lockdep_assert_held(&client_lock);
877 ses = container_of(kref, struct nfsd4_session, se_ref);
878 nfsd4_del_conns(ses);
882 void nfsd4_put_session(struct nfsd4_session *ses)
884 spin_lock(&client_lock);
885 nfsd4_put_session_locked(ses);
886 spin_unlock(&client_lock);
889 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan)
891 struct nfsd4_session *new;
892 int numslots, slotsize;
894 * Note decreasing slot size below client's request may
895 * make it difficult for client to function correctly, whereas
896 * decreasing the number of slots will (just?) affect
897 * performance. When short on memory we therefore prefer to
898 * decrease number of slots instead of their size.
900 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
901 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
905 new = __alloc_session(slotsize, numslots);
907 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
910 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
914 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
917 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
919 new->se_client = clp;
922 INIT_LIST_HEAD(&new->se_conns);
924 new->se_cb_seq_nr = 1;
925 new->se_flags = cses->flags;
926 new->se_cb_prog = cses->callback_prog;
927 new->se_cb_sec = cses->cb_sec;
928 kref_init(&new->se_ref);
929 idx = hash_sessionid(&new->se_sessionid);
930 spin_lock(&client_lock);
931 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
932 spin_lock(&clp->cl_lock);
933 list_add(&new->se_perclnt, &clp->cl_sessions);
934 spin_unlock(&clp->cl_lock);
935 spin_unlock(&client_lock);
937 if (cses->flags & SESSION4_BACK_CHAN) {
938 struct sockaddr *sa = svc_addr(rqstp);
940 * This is a little silly; with sessions there's no real
941 * use for the callback address. Use the peer address
942 * as a reasonable default for now, but consider fixing
943 * the rpc client not to require an address in the
946 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
947 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
951 /* caller must hold client_lock */
952 static struct nfsd4_session *
953 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
955 struct nfsd4_session *elem;
957 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
959 dump_sessionid(__func__, sessionid);
960 idx = hash_sessionid(sessionid);
961 /* Search in the appropriate list */
962 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
963 if (!memcmp(elem->se_sessionid.data, sessionid->data,
964 NFS4_MAX_SESSIONID_LEN)) {
969 dprintk("%s: session not found\n", __func__);
973 /* caller must hold client_lock */
975 unhash_session(struct nfsd4_session *ses)
977 list_del(&ses->se_hash);
978 spin_lock(&ses->se_client->cl_lock);
979 list_del(&ses->se_perclnt);
980 spin_unlock(&ses->se_client->cl_lock);
983 /* must be called under the client_lock */
985 renew_client_locked(struct nfs4_client *clp)
987 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
989 if (is_client_expired(clp)) {
991 printk("%s: client (clientid %08x/%08x) already expired\n",
993 clp->cl_clientid.cl_boot,
994 clp->cl_clientid.cl_id);
998 dprintk("renewing client (clientid %08x/%08x)\n",
999 clp->cl_clientid.cl_boot,
1000 clp->cl_clientid.cl_id);
1001 list_move_tail(&clp->cl_lru, &nn->client_lru);
1002 clp->cl_time = get_seconds();
1006 renew_client(struct nfs4_client *clp)
1008 spin_lock(&client_lock);
1009 renew_client_locked(clp);
1010 spin_unlock(&client_lock);
1013 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1015 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1017 if (clid->cl_boot == nn->boot_time)
1019 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1020 clid->cl_boot, clid->cl_id, nn->boot_time);
1025 * XXX Should we use a slab cache ?
1026 * This type of memory management is somewhat inefficient, but we use it
1027 * anyway since SETCLIENTID is not a common operation.
1029 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1031 struct nfs4_client *clp;
1033 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1036 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1037 if (clp->cl_name.data == NULL) {
1041 clp->cl_name.len = name.len;
1046 free_client(struct nfs4_client *clp)
1048 lockdep_assert_held(&client_lock);
1049 while (!list_empty(&clp->cl_sessions)) {
1050 struct nfsd4_session *ses;
1051 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1053 list_del(&ses->se_perclnt);
1054 nfsd4_put_session_locked(ses);
1056 free_svc_cred(&clp->cl_cred);
1057 kfree(clp->cl_name.data);
1062 release_session_client(struct nfsd4_session *session)
1064 struct nfs4_client *clp = session->se_client;
1066 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
1068 if (is_client_expired(clp)) {
1070 session->se_client = NULL;
1072 renew_client_locked(clp);
1073 spin_unlock(&client_lock);
1076 /* must be called under the client_lock */
1078 unhash_client_locked(struct nfs4_client *clp)
1080 struct nfsd4_session *ses;
1082 mark_client_expired(clp);
1083 list_del(&clp->cl_lru);
1084 spin_lock(&clp->cl_lock);
1085 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1086 list_del_init(&ses->se_hash);
1087 spin_unlock(&clp->cl_lock);
1091 destroy_client(struct nfs4_client *clp)
1093 struct nfs4_openowner *oo;
1094 struct nfs4_delegation *dp;
1095 struct list_head reaplist;
1096 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1098 INIT_LIST_HEAD(&reaplist);
1099 spin_lock(&recall_lock);
1100 while (!list_empty(&clp->cl_delegations)) {
1101 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1102 list_del_init(&dp->dl_perclnt);
1103 list_move(&dp->dl_recall_lru, &reaplist);
1105 spin_unlock(&recall_lock);
1106 while (!list_empty(&reaplist)) {
1107 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1108 unhash_delegation(dp);
1110 while (!list_empty(&clp->cl_openowners)) {
1111 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1112 release_openowner(oo);
1114 nfsd4_shutdown_callback(clp);
1115 if (clp->cl_cb_conn.cb_xprt)
1116 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1117 list_del(&clp->cl_idhash);
1118 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1119 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1121 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1122 spin_lock(&client_lock);
1123 unhash_client_locked(clp);
1124 if (atomic_read(&clp->cl_refcount) == 0)
1126 spin_unlock(&client_lock);
1129 static void expire_client(struct nfs4_client *clp)
1131 nfsd4_client_record_remove(clp);
1132 destroy_client(clp);
1135 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1137 memcpy(target->cl_verifier.data, source->data,
1138 sizeof(target->cl_verifier.data));
1141 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1143 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1144 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1147 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1149 if (source->cr_principal) {
1150 target->cr_principal =
1151 kstrdup(source->cr_principal, GFP_KERNEL);
1152 if (target->cr_principal == NULL)
1155 target->cr_principal = NULL;
1156 target->cr_flavor = source->cr_flavor;
1157 target->cr_uid = source->cr_uid;
1158 target->cr_gid = source->cr_gid;
1159 target->cr_group_info = source->cr_group_info;
1160 get_group_info(target->cr_group_info);
1165 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1169 res = o1->len - o2->len;
1172 return (long long)memcmp(o1->data, o2->data, o1->len);
1175 static int same_name(const char *n1, const char *n2)
1177 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1181 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1183 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1187 same_clid(clientid_t *cl1, clientid_t *cl2)
1189 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1192 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1196 if (g1->ngroups != g2->ngroups)
1198 for (i=0; i<g1->ngroups; i++)
1199 if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1205 * RFC 3530 language requires clid_inuse be returned when the
1206 * "principal" associated with a requests differs from that previously
1207 * used. We use uid, gid's, and gss principal string as our best
1208 * approximation. We also don't want to allow non-gss use of a client
1209 * established using gss: in theory cr_principal should catch that
1210 * change, but in practice cr_principal can be null even in the gss case
1211 * since gssd doesn't always pass down a principal string.
1213 static bool is_gss_cred(struct svc_cred *cr)
1215 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1216 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1221 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1223 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1224 || (cr1->cr_uid != cr2->cr_uid)
1225 || (cr1->cr_gid != cr2->cr_gid)
1226 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1228 if (cr1->cr_principal == cr2->cr_principal)
1230 if (!cr1->cr_principal || !cr2->cr_principal)
1232 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1235 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1237 static u32 current_clientid = 1;
1239 clp->cl_clientid.cl_boot = nn->boot_time;
1240 clp->cl_clientid.cl_id = current_clientid++;
1243 static void gen_confirm(struct nfs4_client *clp)
1248 verf[0] = (__be32)get_seconds();
1249 verf[1] = (__be32)i++;
1250 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1253 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1255 return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1258 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1260 struct nfs4_stid *s;
1262 s = find_stateid(cl, t);
1265 if (typemask & s->sc_type)
1270 static struct nfs4_client *create_client(struct xdr_netobj name,
1271 struct svc_rqst *rqstp, nfs4_verifier *verf)
1273 struct nfs4_client *clp;
1274 struct sockaddr *sa = svc_addr(rqstp);
1276 struct net *net = SVC_NET(rqstp);
1278 clp = alloc_client(name);
1282 INIT_LIST_HEAD(&clp->cl_sessions);
1283 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1285 spin_lock(&client_lock);
1287 spin_unlock(&client_lock);
1290 idr_init(&clp->cl_stateids);
1291 atomic_set(&clp->cl_refcount, 0);
1292 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1293 INIT_LIST_HEAD(&clp->cl_idhash);
1294 INIT_LIST_HEAD(&clp->cl_openowners);
1295 INIT_LIST_HEAD(&clp->cl_delegations);
1296 INIT_LIST_HEAD(&clp->cl_lru);
1297 INIT_LIST_HEAD(&clp->cl_callbacks);
1298 spin_lock_init(&clp->cl_lock);
1299 nfsd4_init_callback(&clp->cl_cb_null);
1300 clp->cl_time = get_seconds();
1301 clear_bit(0, &clp->cl_cb_slot_busy);
1302 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1303 copy_verf(clp, verf);
1304 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1306 clp->cl_cb_session = NULL;
1312 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1314 struct rb_node **new = &(root->rb_node), *parent = NULL;
1315 struct nfs4_client *clp;
1318 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1321 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1322 new = &((*new)->rb_left);
1324 new = &((*new)->rb_right);
1327 rb_link_node(&new_clp->cl_namenode, parent, new);
1328 rb_insert_color(&new_clp->cl_namenode, root);
1331 static struct nfs4_client *
1332 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1335 struct rb_node *node = root->rb_node;
1336 struct nfs4_client *clp;
1339 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1340 cmp = compare_blob(&clp->cl_name, name);
1342 node = node->rb_left;
1344 node = node->rb_right;
1352 add_to_unconfirmed(struct nfs4_client *clp)
1354 unsigned int idhashval;
1355 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1357 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1358 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1359 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1360 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1365 move_to_confirmed(struct nfs4_client *clp)
1367 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1368 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1370 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1371 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1372 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1373 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1374 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1378 static struct nfs4_client *
1379 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1381 struct nfs4_client *clp;
1382 unsigned int idhashval = clientid_hashval(clid->cl_id);
1384 list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) {
1385 if (same_clid(&clp->cl_clientid, clid)) {
1386 if ((bool)clp->cl_minorversion != sessions)
1395 static struct nfs4_client *
1396 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1398 struct nfs4_client *clp;
1399 unsigned int idhashval = clientid_hashval(clid->cl_id);
1401 list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) {
1402 if (same_clid(&clp->cl_clientid, clid)) {
1403 if ((bool)clp->cl_minorversion != sessions)
1411 static bool clp_used_exchangeid(struct nfs4_client *clp)
1413 return clp->cl_exchange_flags != 0;
1416 static struct nfs4_client *
1417 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1419 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1422 static struct nfs4_client *
1423 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1425 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1429 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1431 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1432 struct sockaddr *sa = svc_addr(rqstp);
1433 u32 scopeid = rpc_get_scope_id(sa);
1434 unsigned short expected_family;
1436 /* Currently, we only support tcp and tcp6 for the callback channel */
1437 if (se->se_callback_netid_len == 3 &&
1438 !memcmp(se->se_callback_netid_val, "tcp", 3))
1439 expected_family = AF_INET;
1440 else if (se->se_callback_netid_len == 4 &&
1441 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1442 expected_family = AF_INET6;
1446 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1447 se->se_callback_addr_len,
1448 (struct sockaddr *)&conn->cb_addr,
1449 sizeof(conn->cb_addr));
1451 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1454 if (conn->cb_addr.ss_family == AF_INET6)
1455 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1457 conn->cb_prog = se->se_callback_prog;
1458 conn->cb_ident = se->se_callback_ident;
1459 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1462 conn->cb_addr.ss_family = AF_UNSPEC;
1463 conn->cb_addrlen = 0;
1464 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1465 "will not receive delegations\n",
1466 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1472 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1475 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1477 struct nfsd4_slot *slot = resp->cstate.slot;
1480 dprintk("--> %s slot %p\n", __func__, slot);
1482 slot->sl_opcnt = resp->opcnt;
1483 slot->sl_status = resp->cstate.status;
1485 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1486 if (nfsd4_not_cached(resp)) {
1487 slot->sl_datalen = 0;
1490 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1491 base = (char *)resp->cstate.datap -
1492 (char *)resp->xbuf->head[0].iov_base;
1493 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1495 WARN("%s: sessions DRC could not cache compound\n", __func__);
1500 * Encode the replay sequence operation from the slot values.
1501 * If cachethis is FALSE encode the uncached rep error on the next
1502 * operation which sets resp->p and increments resp->opcnt for
1503 * nfs4svc_encode_compoundres.
1507 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1508 struct nfsd4_compoundres *resp)
1510 struct nfsd4_op *op;
1511 struct nfsd4_slot *slot = resp->cstate.slot;
1513 /* Encode the replayed sequence operation */
1514 op = &args->ops[resp->opcnt - 1];
1515 nfsd4_encode_operation(resp, op);
1517 /* Return nfserr_retry_uncached_rep in next operation. */
1518 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1519 op = &args->ops[resp->opcnt++];
1520 op->status = nfserr_retry_uncached_rep;
1521 nfsd4_encode_operation(resp, op);
1527 * The sequence operation is not cached because we can use the slot and
1531 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1532 struct nfsd4_sequence *seq)
1534 struct nfsd4_slot *slot = resp->cstate.slot;
1537 dprintk("--> %s slot %p\n", __func__, slot);
1539 /* Either returns 0 or nfserr_retry_uncached */
1540 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1541 if (status == nfserr_retry_uncached_rep)
1544 /* The sequence operation has been encoded, cstate->datap set. */
1545 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1547 resp->opcnt = slot->sl_opcnt;
1548 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1549 status = slot->sl_status;
1555 * Set the exchange_id flags returned by the server.
1558 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1560 /* pNFS is not supported */
1561 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1563 /* Referrals are supported, Migration is not. */
1564 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1566 /* set the wire flags to return to client. */
1567 clid->flags = new->cl_exchange_flags;
1570 static bool client_has_state(struct nfs4_client *clp)
1573 * Note clp->cl_openowners check isn't quite right: there's no
1574 * need to count owners without stateid's.
1576 * Also note we should probably be using this in 4.0 case too.
1578 return !list_empty(&clp->cl_openowners)
1579 || !list_empty(&clp->cl_delegations)
1580 || !list_empty(&clp->cl_sessions);
1584 nfsd4_exchange_id(struct svc_rqst *rqstp,
1585 struct nfsd4_compound_state *cstate,
1586 struct nfsd4_exchange_id *exid)
1588 struct nfs4_client *unconf, *conf, *new;
1590 char addr_str[INET6_ADDRSTRLEN];
1591 nfs4_verifier verf = exid->verifier;
1592 struct sockaddr *sa = svc_addr(rqstp);
1593 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1594 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1596 rpc_ntop(sa, addr_str, sizeof(addr_str));
1597 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1598 "ip_addr=%s flags %x, spa_how %d\n",
1599 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1600 addr_str, exid->flags, exid->spa_how);
1602 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1603 return nfserr_inval;
1605 /* Currently only support SP4_NONE */
1606 switch (exid->spa_how) {
1609 default: /* checked by xdr code */
1613 return nfserr_serverfault; /* no excuse :-/ */
1616 /* Cases below refer to rfc 5661 section 18.35.4: */
1618 conf = find_confirmed_client_by_name(&exid->clname, nn);
1620 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1621 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1624 if (!clp_used_exchangeid(conf)) { /* buggy client */
1625 status = nfserr_inval;
1628 if (!creds_match) { /* case 9 */
1629 status = nfserr_perm;
1632 if (!verfs_match) { /* case 8 */
1633 status = nfserr_not_same;
1637 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1641 if (!creds_match) { /* case 3 */
1642 if (client_has_state(conf)) {
1643 status = nfserr_clid_inuse;
1646 expire_client(conf);
1649 if (verfs_match) { /* case 2 */
1650 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1654 /* case 5, client reboot */
1658 if (update) { /* case 7 */
1659 status = nfserr_noent;
1663 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1664 if (unconf) /* case 4, possible retry or client restart */
1665 expire_client(unconf);
1667 /* case 1 (normal case) */
1669 new = create_client(exid->clname, rqstp, &verf);
1671 status = nfserr_jukebox;
1674 new->cl_minorversion = 1;
1677 add_to_unconfirmed(new);
1679 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1680 exid->clientid.cl_id = new->cl_clientid.cl_id;
1682 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1683 nfsd4_set_ex_flags(new, exid);
1685 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1686 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1690 nfs4_unlock_state();
1695 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1697 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1700 /* The slot is in use, and no response has been sent. */
1702 if (seqid == slot_seqid)
1703 return nfserr_jukebox;
1705 return nfserr_seq_misordered;
1707 /* Note unsigned 32-bit arithmetic handles wraparound: */
1708 if (likely(seqid == slot_seqid + 1))
1710 if (seqid == slot_seqid)
1711 return nfserr_replay_cache;
1712 return nfserr_seq_misordered;
1716 * Cache the create session result into the create session single DRC
1717 * slot cache by saving the xdr structure. sl_seqid has been set.
1718 * Do this for solo or embedded create session operations.
1721 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1722 struct nfsd4_clid_slot *slot, __be32 nfserr)
1724 slot->sl_status = nfserr;
1725 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1729 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1730 struct nfsd4_clid_slot *slot)
1732 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1733 return slot->sl_status;
1736 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1737 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1738 1 + /* MIN tag is length with zero, only length */ \
1739 3 + /* version, opcount, opcode */ \
1740 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1741 /* seqid, slotID, slotID, cache */ \
1742 4 ) * sizeof(__be32))
1744 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1745 2 + /* verifier: AUTH_NULL, length 0 */\
1747 1 + /* MIN tag is length with zero, only length */ \
1748 3 + /* opcount, opcode, opstatus*/ \
1749 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1750 /* seqid, slotID, slotID, slotID, status */ \
1751 5 ) * sizeof(__be32))
1753 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1755 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1756 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1760 nfsd4_create_session(struct svc_rqst *rqstp,
1761 struct nfsd4_compound_state *cstate,
1762 struct nfsd4_create_session *cr_ses)
1764 struct sockaddr *sa = svc_addr(rqstp);
1765 struct nfs4_client *conf, *unconf;
1766 struct nfsd4_session *new;
1767 struct nfsd4_conn *conn;
1768 struct nfsd4_clid_slot *cs_slot = NULL;
1770 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1772 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1773 return nfserr_inval;
1774 if (check_forechannel_attrs(cr_ses->fore_channel))
1775 return nfserr_toosmall;
1776 new = alloc_session(&cr_ses->fore_channel);
1778 return nfserr_jukebox;
1779 status = nfserr_jukebox;
1780 conn = alloc_conn_from_crses(rqstp, cr_ses);
1782 goto out_free_session;
1785 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
1786 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
1789 cs_slot = &conf->cl_cs_slot;
1790 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1791 if (status == nfserr_replay_cache) {
1792 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1794 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1795 status = nfserr_seq_misordered;
1798 } else if (unconf) {
1799 struct nfs4_client *old;
1800 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1801 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1802 status = nfserr_clid_inuse;
1805 cs_slot = &unconf->cl_cs_slot;
1806 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1808 /* an unconfirmed replay returns misordered */
1809 status = nfserr_seq_misordered;
1812 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
1815 move_to_confirmed(unconf);
1818 status = nfserr_stale_clientid;
1823 * We do not support RDMA or persistent sessions
1825 cr_ses->flags &= ~SESSION4_PERSIST;
1826 cr_ses->flags &= ~SESSION4_RDMA;
1828 init_session(rqstp, new, conf, cr_ses);
1829 nfsd4_init_conn(rqstp, conn, new);
1831 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1832 NFS4_MAX_SESSIONID_LEN);
1833 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1834 sizeof(struct nfsd4_channel_attrs));
1835 cs_slot->sl_seqid++;
1836 cr_ses->seqid = cs_slot->sl_seqid;
1838 /* cache solo and embedded create sessions under the state lock */
1839 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1841 nfs4_unlock_state();
1842 dprintk("%s returns %d\n", __func__, ntohl(status));
1847 __free_session(new);
1851 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1853 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1854 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1856 return argp->opcnt == resp->opcnt;
1859 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1862 case NFS4_CDFC4_FORE:
1863 case NFS4_CDFC4_BACK:
1865 case NFS4_CDFC4_FORE_OR_BOTH:
1866 case NFS4_CDFC4_BACK_OR_BOTH:
1867 *dir = NFS4_CDFC4_BOTH;
1870 return nfserr_inval;
1873 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
1875 struct nfsd4_session *session = cstate->session;
1877 spin_lock(&client_lock);
1878 session->se_cb_prog = bc->bc_cb_program;
1879 session->se_cb_sec = bc->bc_cb_sec;
1880 spin_unlock(&client_lock);
1882 nfsd4_probe_callback(session->se_client);
1887 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1888 struct nfsd4_compound_state *cstate,
1889 struct nfsd4_bind_conn_to_session *bcts)
1892 struct nfsd4_conn *conn;
1894 if (!nfsd4_last_compound_op(rqstp))
1895 return nfserr_not_only_op;
1896 spin_lock(&client_lock);
1897 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp));
1898 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1899 * client_lock iself: */
1900 if (cstate->session) {
1901 nfsd4_get_session(cstate->session);
1902 atomic_inc(&cstate->session->se_client->cl_refcount);
1904 spin_unlock(&client_lock);
1905 if (!cstate->session)
1906 return nfserr_badsession;
1908 status = nfsd4_map_bcts_dir(&bcts->dir);
1911 conn = alloc_conn(rqstp, bcts->dir);
1913 return nfserr_jukebox;
1914 nfsd4_init_conn(rqstp, conn, cstate->session);
1918 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1922 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1926 nfsd4_destroy_session(struct svc_rqst *r,
1927 struct nfsd4_compound_state *cstate,
1928 struct nfsd4_destroy_session *sessionid)
1930 struct nfsd4_session *ses;
1931 __be32 status = nfserr_badsession;
1934 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1935 * - Should we return nfserr_back_chan_busy if waiting for
1936 * callbacks on to-be-destroyed session?
1937 * - Do we need to clear any callback info from previous session?
1940 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1941 if (!nfsd4_last_compound_op(r))
1942 return nfserr_not_only_op;
1944 dump_sessionid(__func__, &sessionid->sessionid);
1945 spin_lock(&client_lock);
1946 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r));
1948 spin_unlock(&client_lock);
1952 unhash_session(ses);
1953 spin_unlock(&client_lock);
1956 nfsd4_probe_callback_sync(ses->se_client);
1957 nfs4_unlock_state();
1959 spin_lock(&client_lock);
1960 nfsd4_del_conns(ses);
1961 nfsd4_put_session_locked(ses);
1962 spin_unlock(&client_lock);
1965 dprintk("%s returns %d\n", __func__, ntohl(status));
1969 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1971 struct nfsd4_conn *c;
1973 list_for_each_entry(c, &s->se_conns, cn_persession) {
1974 if (c->cn_xprt == xpt) {
1981 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1983 struct nfs4_client *clp = ses->se_client;
1984 struct nfsd4_conn *c;
1987 spin_lock(&clp->cl_lock);
1988 c = __nfsd4_find_conn(new->cn_xprt, ses);
1990 spin_unlock(&clp->cl_lock);
1994 __nfsd4_hash_conn(new, ses);
1995 spin_unlock(&clp->cl_lock);
1996 ret = nfsd4_register_conn(new);
1998 /* oops; xprt is already down: */
1999 nfsd4_conn_lost(&new->cn_xpt_user);
2003 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2005 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2007 return args->opcnt > session->se_fchannel.maxops;
2010 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2011 struct nfsd4_session *session)
2013 struct xdr_buf *xb = &rqstp->rq_arg;
2015 return xb->len > session->se_fchannel.maxreq_sz;
2019 nfsd4_sequence(struct svc_rqst *rqstp,
2020 struct nfsd4_compound_state *cstate,
2021 struct nfsd4_sequence *seq)
2023 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2024 struct nfsd4_session *session;
2025 struct nfsd4_slot *slot;
2026 struct nfsd4_conn *conn;
2029 if (resp->opcnt != 1)
2030 return nfserr_sequence_pos;
2033 * Will be either used or freed by nfsd4_sequence_check_conn
2036 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2038 return nfserr_jukebox;
2040 spin_lock(&client_lock);
2041 status = nfserr_badsession;
2042 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
2046 status = nfserr_too_many_ops;
2047 if (nfsd4_session_too_many_ops(rqstp, session))
2050 status = nfserr_req_too_big;
2051 if (nfsd4_request_too_big(rqstp, session))
2054 status = nfserr_badslot;
2055 if (seq->slotid >= session->se_fchannel.maxreqs)
2058 slot = session->se_slots[seq->slotid];
2059 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2061 /* We do not negotiate the number of slots yet, so set the
2062 * maxslots to the session maxreqs which is used to encode
2063 * sr_highest_slotid and the sr_target_slot id to maxslots */
2064 seq->maxslots = session->se_fchannel.maxreqs;
2066 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2067 slot->sl_flags & NFSD4_SLOT_INUSE);
2068 if (status == nfserr_replay_cache) {
2069 status = nfserr_seq_misordered;
2070 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2072 cstate->slot = slot;
2073 cstate->session = session;
2074 /* Return the cached reply status and set cstate->status
2075 * for nfsd4_proc_compound processing */
2076 status = nfsd4_replay_cache_entry(resp, seq);
2077 cstate->status = nfserr_replay_cache;
2083 nfsd4_sequence_check_conn(conn, session);
2086 /* Success! bump slot seqid */
2087 slot->sl_seqid = seq->seqid;
2088 slot->sl_flags |= NFSD4_SLOT_INUSE;
2090 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2092 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2094 cstate->slot = slot;
2095 cstate->session = session;
2098 /* Hold a session reference until done processing the compound. */
2099 if (cstate->session) {
2100 struct nfs4_client *clp = session->se_client;
2102 nfsd4_get_session(cstate->session);
2103 atomic_inc(&clp->cl_refcount);
2104 switch (clp->cl_cb_state) {
2106 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2108 case NFSD4_CB_FAULT:
2109 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2112 seq->status_flags = 0;
2116 spin_unlock(&client_lock);
2117 dprintk("%s: return %d\n", __func__, ntohl(status));
2122 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2124 struct nfs4_client *conf, *unconf, *clp;
2126 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2129 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2130 conf = find_confirmed_client(&dc->clientid, true, nn);
2135 if (!is_client_expired(conf) && client_has_state(conf)) {
2136 status = nfserr_clientid_busy;
2140 /* rfc5661 18.50.3 */
2141 if (cstate->session && conf == cstate->session->se_client) {
2142 status = nfserr_clientid_busy;
2148 status = nfserr_stale_clientid;
2154 nfs4_unlock_state();
2155 dprintk("%s return %d\n", __func__, ntohl(status));
2160 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2164 if (rc->rca_one_fs) {
2165 if (!cstate->current_fh.fh_dentry)
2166 return nfserr_nofilehandle;
2168 * We don't take advantage of the rca_one_fs case.
2169 * That's OK, it's optional, we can safely ignore it.
2175 status = nfserr_complete_already;
2176 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2177 &cstate->session->se_client->cl_flags))
2180 status = nfserr_stale_clientid;
2181 if (is_client_expired(cstate->session->se_client))
2183 * The following error isn't really legal.
2184 * But we only get here if the client just explicitly
2185 * destroyed the client. Surely it no longer cares what
2186 * error it gets back on an operation for the dead
2192 nfsd4_client_record_create(cstate->session->se_client);
2194 nfs4_unlock_state();
2199 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2200 struct nfsd4_setclientid *setclid)
2202 struct xdr_netobj clname = setclid->se_name;
2203 nfs4_verifier clverifier = setclid->se_verf;
2204 struct nfs4_client *conf, *unconf, *new;
2206 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2208 /* Cases below refer to rfc 3530 section 14.2.33: */
2210 conf = find_confirmed_client_by_name(&clname, nn);
2213 status = nfserr_clid_inuse;
2214 if (clp_used_exchangeid(conf))
2216 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2217 char addr_str[INET6_ADDRSTRLEN];
2218 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2220 dprintk("NFSD: setclientid: string in use by client "
2221 "at %s\n", addr_str);
2225 unconf = find_unconfirmed_client_by_name(&clname, nn);
2227 expire_client(unconf);
2228 status = nfserr_jukebox;
2229 new = create_client(clname, rqstp, &clverifier);
2232 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2233 /* case 1: probable callback update */
2234 copy_clid(new, conf);
2235 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2237 new->cl_minorversion = 0;
2238 gen_callback(new, setclid, rqstp);
2239 add_to_unconfirmed(new);
2240 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2241 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2242 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2245 nfs4_unlock_state();
2251 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2252 struct nfsd4_compound_state *cstate,
2253 struct nfsd4_setclientid_confirm *setclientid_confirm)
2255 struct nfs4_client *conf, *unconf;
2256 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2257 clientid_t * clid = &setclientid_confirm->sc_clientid;
2259 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2261 if (STALE_CLIENTID(clid, nn))
2262 return nfserr_stale_clientid;
2265 conf = find_confirmed_client(clid, false, nn);
2266 unconf = find_unconfirmed_client(clid, false, nn);
2268 * We try hard to give out unique clientid's, so if we get an
2269 * attempt to confirm the same clientid with a different cred,
2270 * there's a bug somewhere. Let's charitably assume it's our
2273 status = nfserr_serverfault;
2274 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2276 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2278 /* cases below refer to rfc 3530 section 14.2.34: */
2279 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2280 if (conf && !unconf) /* case 2: probable retransmit */
2282 else /* case 4: client hasn't noticed we rebooted yet? */
2283 status = nfserr_stale_clientid;
2287 if (conf) { /* case 1: callback update */
2288 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2289 nfsd4_probe_callback(conf);
2290 expire_client(unconf);
2291 } else { /* case 3: normal case; new or rebooted client */
2292 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2294 expire_client(conf);
2295 move_to_confirmed(unconf);
2296 nfsd4_probe_callback(unconf);
2299 nfs4_unlock_state();
2303 static struct nfs4_file *nfsd4_alloc_file(void)
2305 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2308 /* OPEN Share state helper functions */
2309 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2311 unsigned int hashval = file_hashval(ino);
2313 atomic_set(&fp->fi_ref, 1);
2314 INIT_LIST_HEAD(&fp->fi_hash);
2315 INIT_LIST_HEAD(&fp->fi_stateids);
2316 INIT_LIST_HEAD(&fp->fi_delegations);
2317 fp->fi_inode = igrab(ino);
2318 fp->fi_had_conflict = false;
2319 fp->fi_lease = NULL;
2320 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2321 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2322 spin_lock(&recall_lock);
2323 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2324 spin_unlock(&recall_lock);
2328 nfsd4_free_slab(struct kmem_cache **slab)
2332 kmem_cache_destroy(*slab);
2337 nfsd4_free_slabs(void)
2339 nfsd4_free_slab(&openowner_slab);
2340 nfsd4_free_slab(&lockowner_slab);
2341 nfsd4_free_slab(&file_slab);
2342 nfsd4_free_slab(&stateid_slab);
2343 nfsd4_free_slab(&deleg_slab);
2347 nfsd4_init_slabs(void)
2349 openowner_slab = kmem_cache_create("nfsd4_openowners",
2350 sizeof(struct nfs4_openowner), 0, 0, NULL);
2351 if (openowner_slab == NULL)
2353 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2354 sizeof(struct nfs4_lockowner), 0, 0, NULL);
2355 if (lockowner_slab == NULL)
2357 file_slab = kmem_cache_create("nfsd4_files",
2358 sizeof(struct nfs4_file), 0, 0, NULL);
2359 if (file_slab == NULL)
2361 stateid_slab = kmem_cache_create("nfsd4_stateids",
2362 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2363 if (stateid_slab == NULL)
2365 deleg_slab = kmem_cache_create("nfsd4_delegations",
2366 sizeof(struct nfs4_delegation), 0, 0, NULL);
2367 if (deleg_slab == NULL)
2372 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2376 void nfs4_free_openowner(struct nfs4_openowner *oo)
2378 kfree(oo->oo_owner.so_owner.data);
2379 kmem_cache_free(openowner_slab, oo);
2382 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2384 kfree(lo->lo_owner.so_owner.data);
2385 kmem_cache_free(lockowner_slab, lo);
2388 static void init_nfs4_replay(struct nfs4_replay *rp)
2390 rp->rp_status = nfserr_serverfault;
2392 rp->rp_buf = rp->rp_ibuf;
2395 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2397 struct nfs4_stateowner *sop;
2399 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2403 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2404 if (!sop->so_owner.data) {
2405 kmem_cache_free(slab, sop);
2408 sop->so_owner.len = owner->len;
2410 INIT_LIST_HEAD(&sop->so_stateids);
2411 sop->so_client = clp;
2412 init_nfs4_replay(&sop->so_replay);
2416 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2418 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2420 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2421 list_add(&oo->oo_perclient, &clp->cl_openowners);
2424 static struct nfs4_openowner *
2425 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2426 struct nfs4_openowner *oo;
2428 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2431 oo->oo_owner.so_is_open_owner = 1;
2432 oo->oo_owner.so_seqid = open->op_seqid;
2433 oo->oo_flags = NFS4_OO_NEW;
2435 oo->oo_last_closed_stid = NULL;
2436 INIT_LIST_HEAD(&oo->oo_close_lru);
2437 hash_openowner(oo, clp, strhashval);
2441 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2442 struct nfs4_openowner *oo = open->op_openowner;
2443 struct nfs4_client *clp = oo->oo_owner.so_client;
2445 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2446 INIT_LIST_HEAD(&stp->st_lockowners);
2447 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2448 list_add(&stp->st_perfile, &fp->fi_stateids);
2449 stp->st_stateowner = &oo->oo_owner;
2452 stp->st_access_bmap = 0;
2453 stp->st_deny_bmap = 0;
2454 set_access(open->op_share_access, stp);
2455 set_deny(open->op_share_deny, stp);
2456 stp->st_openstp = NULL;
2460 move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
2462 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2464 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2466 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2467 oo->oo_time = get_seconds();
2471 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2474 return (sop->so_owner.len == owner->len) &&
2475 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2476 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2479 static struct nfs4_openowner *
2480 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2481 bool sessions, struct nfsd_net *nn)
2483 struct nfs4_stateowner *so;
2484 struct nfs4_openowner *oo;
2485 struct nfs4_client *clp;
2487 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
2488 if (!so->so_is_open_owner)
2490 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2492 clp = oo->oo_owner.so_client;
2493 if ((bool)clp->cl_minorversion != sessions)
2495 renew_client(oo->oo_owner.so_client);
2502 /* search file_hashtbl[] for file */
2503 static struct nfs4_file *
2504 find_file(struct inode *ino)
2506 unsigned int hashval = file_hashval(ino);
2507 struct nfs4_file *fp;
2509 spin_lock(&recall_lock);
2510 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2511 if (fp->fi_inode == ino) {
2513 spin_unlock(&recall_lock);
2517 spin_unlock(&recall_lock);
2522 * Called to check deny when READ with all zero stateid or
2523 * WRITE with all zero or all one stateid
2526 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2528 struct inode *ino = current_fh->fh_dentry->d_inode;
2529 struct nfs4_file *fp;
2530 struct nfs4_ol_stateid *stp;
2533 dprintk("NFSD: nfs4_share_conflict\n");
2535 fp = find_file(ino);
2538 ret = nfserr_locked;
2539 /* Search for conflicting share reservations */
2540 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2541 if (test_deny(deny_type, stp) ||
2542 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2551 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2553 /* We're assuming the state code never drops its reference
2554 * without first removing the lease. Since we're in this lease
2555 * callback (and since the lease code is serialized by the kernel
2556 * lock) we know the server hasn't removed the lease yet, we know
2557 * it's safe to take a reference: */
2558 atomic_inc(&dp->dl_count);
2560 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2562 /* only place dl_time is set. protected by lock_flocks*/
2563 dp->dl_time = get_seconds();
2565 nfsd4_cb_recall(dp);
2568 /* Called from break_lease() with lock_flocks() held. */
2569 static void nfsd_break_deleg_cb(struct file_lock *fl)
2571 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2572 struct nfs4_delegation *dp;
2575 WARN(1, "(%p)->fl_owner NULL\n", fl);
2578 if (fp->fi_had_conflict) {
2579 WARN(1, "duplicate break on %p\n", fp);
2583 * We don't want the locks code to timeout the lease for us;
2584 * we'll remove it ourself if a delegation isn't returned
2587 fl->fl_break_time = 0;
2589 spin_lock(&recall_lock);
2590 fp->fi_had_conflict = true;
2591 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2592 nfsd_break_one_deleg(dp);
2593 spin_unlock(&recall_lock);
2597 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2600 return lease_modify(onlist, arg);
2605 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2606 .lm_break = nfsd_break_deleg_cb,
2607 .lm_change = nfsd_change_deleg_cb,
2610 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2612 if (nfsd4_has_session(cstate))
2614 if (seqid == so->so_seqid - 1)
2615 return nfserr_replay_me;
2616 if (seqid == so->so_seqid)
2618 return nfserr_bad_seqid;
2622 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2623 struct nfsd4_open *open, struct nfsd_net *nn)
2625 clientid_t *clientid = &open->op_clientid;
2626 struct nfs4_client *clp = NULL;
2627 unsigned int strhashval;
2628 struct nfs4_openowner *oo = NULL;
2631 if (STALE_CLIENTID(&open->op_clientid, nn))
2632 return nfserr_stale_clientid;
2634 * In case we need it later, after we've already created the
2635 * file and don't want to risk a further failure:
2637 open->op_file = nfsd4_alloc_file();
2638 if (open->op_file == NULL)
2639 return nfserr_jukebox;
2641 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2642 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
2643 open->op_openowner = oo;
2645 clp = find_confirmed_client(clientid, cstate->minorversion,
2648 return nfserr_expired;
2651 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2652 /* Replace unconfirmed owners without checking for replay. */
2653 clp = oo->oo_owner.so_client;
2654 release_openowner(oo);
2655 open->op_openowner = NULL;
2658 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2661 clp = oo->oo_owner.so_client;
2664 oo = alloc_init_open_stateowner(strhashval, clp, open);
2666 return nfserr_jukebox;
2667 open->op_openowner = oo;
2669 open->op_stp = nfs4_alloc_stateid(clp);
2671 return nfserr_jukebox;
2675 static inline __be32
2676 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2678 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2679 return nfserr_openmode;
2684 static int share_access_to_flags(u32 share_access)
2686 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2689 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2691 struct nfs4_stid *ret;
2693 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2696 return delegstateid(ret);
2699 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2701 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2702 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2706 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2707 struct nfs4_delegation **dp)
2710 __be32 status = nfserr_bad_stateid;
2712 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2715 flags = share_access_to_flags(open->op_share_access);
2716 status = nfs4_check_delegmode(*dp, flags);
2720 if (!nfsd4_is_deleg_cur(open))
2724 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2729 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2731 struct nfs4_ol_stateid *local;
2732 struct nfs4_openowner *oo = open->op_openowner;
2734 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2735 /* ignore lock owners */
2736 if (local->st_stateowner->so_is_open_owner == 0)
2738 /* remember if we have seen this open owner */
2739 if (local->st_stateowner == &oo->oo_owner)
2741 /* check for conflicting share reservations */
2742 if (!test_share(local, open))
2743 return nfserr_share_denied;
2748 static inline int nfs4_access_to_access(u32 nfs4_access)
2752 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2753 flags |= NFSD_MAY_READ;
2754 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2755 flags |= NFSD_MAY_WRITE;
2759 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2760 struct svc_fh *cur_fh, struct nfsd4_open *open)
2763 int oflag = nfs4_access_to_omode(open->op_share_access);
2764 int access = nfs4_access_to_access(open->op_share_access);
2766 if (!fp->fi_fds[oflag]) {
2767 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2768 &fp->fi_fds[oflag]);
2772 nfs4_file_get_access(fp, oflag);
2777 static inline __be32
2778 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2779 struct nfsd4_open *open)
2781 struct iattr iattr = {
2782 .ia_valid = ATTR_SIZE,
2785 if (!open->op_truncate)
2787 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2788 return nfserr_inval;
2789 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2793 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2795 u32 op_share_access = open->op_share_access;
2799 new_access = !test_access(op_share_access, stp);
2801 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2805 status = nfsd4_truncate(rqstp, cur_fh, open);
2808 int oflag = nfs4_access_to_omode(op_share_access);
2809 nfs4_file_put_access(fp, oflag);
2813 /* remember the open */
2814 set_access(op_share_access, stp);
2815 set_deny(open->op_share_deny, stp);
2822 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
2824 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2827 /* Should we give out recallable state?: */
2828 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2830 if (clp->cl_cb_state == NFSD4_CB_UP)
2833 * In the sessions case, since we don't have to establish a
2834 * separate connection for callbacks, we assume it's OK
2835 * until we hear otherwise:
2837 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2840 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2842 struct file_lock *fl;
2844 fl = locks_alloc_lock();
2847 locks_init_lock(fl);
2848 fl->fl_lmops = &nfsd_lease_mng_ops;
2849 fl->fl_flags = FL_LEASE;
2850 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2851 fl->fl_end = OFFSET_MAX;
2852 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2853 fl->fl_pid = current->tgid;
2857 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2859 struct nfs4_file *fp = dp->dl_file;
2860 struct file_lock *fl;
2863 fl = nfs4_alloc_init_lease(dp, flag);
2866 fl->fl_file = find_readable_file(fp);
2867 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2868 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2870 list_del_init(&dp->dl_perclnt);
2871 locks_free_lock(fl);
2875 fp->fi_deleg_file = get_file(fl->fl_file);
2876 atomic_set(&fp->fi_delegees, 1);
2877 list_add(&dp->dl_perfile, &fp->fi_delegations);
2881 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2883 struct nfs4_file *fp = dp->dl_file;
2886 return nfs4_setlease(dp, flag);
2887 spin_lock(&recall_lock);
2888 if (fp->fi_had_conflict) {
2889 spin_unlock(&recall_lock);
2892 atomic_inc(&fp->fi_delegees);
2893 list_add(&dp->dl_perfile, &fp->fi_delegations);
2894 spin_unlock(&recall_lock);
2895 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2899 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2901 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2902 if (status == -EAGAIN)
2903 open->op_why_no_deleg = WND4_CONTENTION;
2905 open->op_why_no_deleg = WND4_RESOURCE;
2906 switch (open->op_deleg_want) {
2907 case NFS4_SHARE_WANT_READ_DELEG:
2908 case NFS4_SHARE_WANT_WRITE_DELEG:
2909 case NFS4_SHARE_WANT_ANY_DELEG:
2911 case NFS4_SHARE_WANT_CANCEL:
2912 open->op_why_no_deleg = WND4_CANCELLED;
2914 case NFS4_SHARE_WANT_NO_DELEG:
2921 * Attempt to hand out a delegation.
2924 nfs4_open_delegation(struct net *net, struct svc_fh *fh,
2925 struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2927 struct nfs4_delegation *dp;
2928 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2930 int status = 0, flag = 0;
2932 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2933 flag = NFS4_OPEN_DELEGATE_NONE;
2934 open->op_recall = 0;
2935 switch (open->op_claim_type) {
2936 case NFS4_OPEN_CLAIM_PREVIOUS:
2938 open->op_recall = 1;
2939 flag = open->op_delegate_type;
2940 if (flag == NFS4_OPEN_DELEGATE_NONE)
2943 case NFS4_OPEN_CLAIM_NULL:
2944 /* Let's not give out any delegations till everyone's
2945 * had the chance to reclaim theirs.... */
2946 if (locks_in_grace(net))
2948 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2950 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2951 flag = NFS4_OPEN_DELEGATE_WRITE;
2953 flag = NFS4_OPEN_DELEGATE_READ;
2959 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2962 status = nfs4_set_delegation(dp, flag);
2966 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2968 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2969 STATEID_VAL(&dp->dl_stid.sc_stateid));
2971 open->op_delegate_type = flag;
2972 if (flag == NFS4_OPEN_DELEGATE_NONE) {
2973 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2974 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2975 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2977 /* 4.1 client asking for a delegation? */
2978 if (open->op_deleg_want)
2979 nfsd4_open_deleg_none_ext(open, status);
2983 nfs4_put_delegation(dp);
2985 flag = NFS4_OPEN_DELEGATE_NONE;
2989 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
2990 struct nfs4_delegation *dp)
2992 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
2993 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2994 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2995 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
2996 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
2997 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2998 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2999 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3001 /* Otherwise the client must be confused wanting a delegation
3002 * it already has, therefore we don't return
3003 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3008 * called with nfs4_lock_state() held.
3011 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3013 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3014 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3015 struct nfs4_file *fp = NULL;
3016 struct inode *ino = current_fh->fh_dentry->d_inode;
3017 struct nfs4_ol_stateid *stp = NULL;
3018 struct nfs4_delegation *dp = NULL;
3022 * Lookup file; if found, lookup stateid and check open request,
3023 * and check for delegations in the process of being recalled.
3024 * If not found, create the nfs4_file struct
3026 fp = find_file(ino);
3028 if ((status = nfs4_check_open(fp, open, &stp)))
3030 status = nfs4_check_deleg(cl, fp, open, &dp);
3034 status = nfserr_bad_stateid;
3035 if (nfsd4_is_deleg_cur(open))
3037 status = nfserr_jukebox;
3039 open->op_file = NULL;
3040 nfsd4_init_file(fp, ino);
3044 * OPEN the file, or upgrade an existing OPEN.
3045 * If truncate fails, the OPEN fails.
3048 /* Stateid was found, this is an OPEN upgrade */
3049 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3053 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3056 status = nfsd4_truncate(rqstp, current_fh, open);
3060 open->op_stp = NULL;
3061 init_open_stateid(stp, fp, open);
3063 update_stateid(&stp->st_stid.sc_stateid);
3064 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3066 if (nfsd4_has_session(&resp->cstate)) {
3067 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3069 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3070 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3071 open->op_why_no_deleg = WND4_NOT_WANTED;
3077 * Attempt to hand out a delegation. No error return, because the
3078 * OPEN succeeds even if we fail.
3080 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3084 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3085 STATEID_VAL(&stp->st_stid.sc_stateid));
3087 /* 4.1 client trying to upgrade/downgrade delegation? */
3088 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3089 open->op_deleg_want)
3090 nfsd4_deleg_xgrade_none_ext(open, dp);
3094 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3095 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3097 * To finish the open response, we just need to set the rflags.
3099 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3100 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3101 !nfsd4_has_session(&resp->cstate))
3102 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3107 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3109 if (open->op_openowner) {
3110 struct nfs4_openowner *oo = open->op_openowner;
3112 if (!list_empty(&oo->oo_owner.so_stateids))
3113 list_del_init(&oo->oo_close_lru);
3114 if (oo->oo_flags & NFS4_OO_NEW) {
3116 release_openowner(oo);
3117 open->op_openowner = NULL;
3119 oo->oo_flags &= ~NFS4_OO_NEW;
3123 nfsd4_free_file(open->op_file);
3125 free_generic_stateid(open->op_stp);
3129 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3132 struct nfs4_client *clp;
3134 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3137 dprintk("process_renew(%08x/%08x): starting\n",
3138 clid->cl_boot, clid->cl_id);
3139 status = nfserr_stale_clientid;
3140 if (STALE_CLIENTID(clid, nn))
3142 clp = find_confirmed_client(clid, cstate->minorversion, nn);
3143 status = nfserr_expired;
3145 /* We assume the client took too long to RENEW. */
3146 dprintk("nfsd4_renew: clientid not found!\n");
3149 status = nfserr_cb_path_down;
3150 if (!list_empty(&clp->cl_delegations)
3151 && clp->cl_cb_state != NFSD4_CB_UP)
3155 nfs4_unlock_state();
3160 nfsd4_end_grace(struct nfsd_net *nn)
3162 /* do nothing if grace period already ended */
3163 if (nn->grace_ended)
3166 dprintk("NFSD: end of grace period\n");
3167 nn->grace_ended = true;
3168 nfsd4_record_grace_done(nn, nn->boot_time);
3169 locks_end_grace(&nn->nfsd4_manager);
3171 * Now that every NFSv4 client has had the chance to recover and
3172 * to see the (possibly new, possibly shorter) lease time, we
3173 * can safely set the next grace time to the current lease time:
3175 nfsd4_grace = nfsd4_lease;
3179 nfs4_laundromat(struct nfsd_net *nn)
3181 struct nfs4_client *clp;
3182 struct nfs4_openowner *oo;
3183 struct nfs4_delegation *dp;
3184 struct list_head *pos, *next, reaplist;
3185 time_t cutoff = get_seconds() - nfsd4_lease;
3186 time_t t, clientid_val = nfsd4_lease;
3187 time_t u, test_val = nfsd4_lease;
3191 dprintk("NFSD: laundromat service - starting\n");
3192 nfsd4_end_grace(nn);
3193 INIT_LIST_HEAD(&reaplist);
3194 spin_lock(&client_lock);
3195 list_for_each_safe(pos, next, &nn->client_lru) {
3196 clp = list_entry(pos, struct nfs4_client, cl_lru);
3197 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3198 t = clp->cl_time - cutoff;
3199 if (clientid_val > t)
3203 if (atomic_read(&clp->cl_refcount)) {
3204 dprintk("NFSD: client in use (clientid %08x)\n",
3205 clp->cl_clientid.cl_id);
3208 unhash_client_locked(clp);
3209 list_add(&clp->cl_lru, &reaplist);
3211 spin_unlock(&client_lock);
3212 list_for_each_safe(pos, next, &reaplist) {
3213 clp = list_entry(pos, struct nfs4_client, cl_lru);
3214 dprintk("NFSD: purging unused client (clientid %08x)\n",
3215 clp->cl_clientid.cl_id);
3218 spin_lock(&recall_lock);
3219 list_for_each_safe(pos, next, &del_recall_lru) {
3220 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3221 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3222 u = dp->dl_time - cutoff;
3227 list_move(&dp->dl_recall_lru, &reaplist);
3229 spin_unlock(&recall_lock);
3230 list_for_each_safe(pos, next, &reaplist) {
3231 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3232 unhash_delegation(dp);
3234 test_val = nfsd4_lease;
3235 list_for_each_safe(pos, next, &nn->close_lru) {
3236 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3237 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3238 u = oo->oo_time - cutoff;
3243 release_openowner(oo);
3245 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3246 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3247 nfs4_unlock_state();
3248 return clientid_val;
3251 static struct workqueue_struct *laundry_wq;
3252 static void laundromat_main(struct work_struct *);
3255 laundromat_main(struct work_struct *laundry)
3258 struct delayed_work *dwork = container_of(laundry, struct delayed_work,
3260 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
3263 t = nfs4_laundromat(nn);
3264 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3265 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3268 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3270 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3271 return nfserr_bad_stateid;
3276 STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn)
3278 if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time)
3280 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3281 STATEID_VAL(stateid));
3286 access_permit_read(struct nfs4_ol_stateid *stp)
3288 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3289 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3290 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3294 access_permit_write(struct nfs4_ol_stateid *stp)
3296 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3297 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3301 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3303 __be32 status = nfserr_openmode;
3305 /* For lock stateid's, we test the parent open, not the lock: */
3306 if (stp->st_openstp)
3307 stp = stp->st_openstp;
3308 if ((flags & WR_STATE) && !access_permit_write(stp))
3310 if ((flags & RD_STATE) && !access_permit_read(stp))
3317 static inline __be32
3318 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3320 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3322 else if (locks_in_grace(net)) {
3323 /* Answer in remaining cases depends on existence of
3324 * conflicting state; so we must wait out the grace period. */
3325 return nfserr_grace;
3326 } else if (flags & WR_STATE)
3327 return nfs4_share_conflict(current_fh,
3328 NFS4_SHARE_DENY_WRITE);
3329 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3330 return nfs4_share_conflict(current_fh,
3331 NFS4_SHARE_DENY_READ);
3335 * Allow READ/WRITE during grace period on recovered state only for files
3336 * that are not able to provide mandatory locking.
3339 grace_disallows_io(struct net *net, struct inode *inode)
3341 return locks_in_grace(net) && mandatory_lock(inode);
3344 /* Returns true iff a is later than b: */
3345 static bool stateid_generation_after(stateid_t *a, stateid_t *b)