nfsd: Don't require client_lock in free_client
[pandora-kernel.git] / fs / nfsd / nfs4state.c
index 1f8aab8..256e903 100644 (file)
@@ -70,7 +70,8 @@ static u64 current_sessionid = 1;
 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
 
 /* forward declarations */
-static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
+static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
+static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
 
 /* Locking: */
 
@@ -84,6 +85,12 @@ static DEFINE_MUTEX(client_mutex);
  */
 static DEFINE_SPINLOCK(state_lock);
 
+/*
+ * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
+ * the refcount on the open stateid to drop.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(close_wq);
+
 static struct kmem_cache *openowner_slab;
 static struct kmem_cache *lockowner_slab;
 static struct kmem_cache *file_slab;
@@ -189,6 +196,17 @@ static void put_client_renew_locked(struct nfs4_client *clp)
                renew_client_locked(clp);
 }
 
+static void put_client_renew(struct nfs4_client *clp)
+{
+       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+       if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
+               return;
+       if (!is_client_expired(clp))
+               renew_client_locked(clp);
+       spin_unlock(&nn->client_lock);
+}
+
 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
 {
        __be32 status;
@@ -221,6 +239,44 @@ static void nfsd4_put_session(struct nfsd4_session *ses)
        spin_unlock(&nn->client_lock);
 }
 
+static int
+same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
+{
+       return (sop->so_owner.len == owner->len) &&
+               0 == memcmp(sop->so_owner.data, owner->data, owner->len);
+}
+
+static struct nfs4_openowner *
+find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
+                       struct nfs4_client *clp)
+{
+       struct nfs4_stateowner *so;
+
+       lockdep_assert_held(&clp->cl_lock);
+
+       list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
+                           so_strhash) {
+               if (!so->so_is_open_owner)
+                       continue;
+               if (same_owner_str(so, &open->op_owner)) {
+                       atomic_inc(&so->so_count);
+                       return openowner(so);
+               }
+       }
+       return NULL;
+}
+
+static struct nfs4_openowner *
+find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
+                       struct nfs4_client *clp)
+{
+       struct nfs4_openowner *oo;
+
+       spin_lock(&clp->cl_lock);
+       oo = find_openstateowner_str_locked(hashval, open, clp);
+       spin_unlock(&clp->cl_lock);
+       return oo;
+}
 
 static inline u32
 opaque_hashval(const void *ptr, int nbytes)
@@ -243,10 +299,11 @@ static void nfsd4_free_file(struct nfs4_file *f)
 static inline void
 put_nfs4_file(struct nfs4_file *fi)
 {
+       might_lock(&state_lock);
+
        if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
                hlist_del(&fi->fi_hash);
                spin_unlock(&state_lock);
-               iput(fi->fi_inode);
                nfsd4_free_file(fi);
        }
 }
@@ -257,7 +314,80 @@ get_nfs4_file(struct nfs4_file *fi)
        atomic_inc(&fi->fi_ref);
 }
 
-static int num_delegations;
+static struct file *
+__nfs4_get_fd(struct nfs4_file *f, int oflag)
+{
+       if (f->fi_fds[oflag])
+               return get_file(f->fi_fds[oflag]);
+       return NULL;
+}
+
+static struct file *
+find_writeable_file_locked(struct nfs4_file *f)
+{
+       struct file *ret;
+
+       lockdep_assert_held(&f->fi_lock);
+
+       ret = __nfs4_get_fd(f, O_WRONLY);
+       if (!ret)
+               ret = __nfs4_get_fd(f, O_RDWR);
+       return ret;
+}
+
+static struct file *
+find_writeable_file(struct nfs4_file *f)
+{
+       struct file *ret;
+
+       spin_lock(&f->fi_lock);
+       ret = find_writeable_file_locked(f);
+       spin_unlock(&f->fi_lock);
+
+       return ret;
+}
+
+static struct file *find_readable_file_locked(struct nfs4_file *f)
+{
+       struct file *ret;
+
+       lockdep_assert_held(&f->fi_lock);
+
+       ret = __nfs4_get_fd(f, O_RDONLY);
+       if (!ret)
+               ret = __nfs4_get_fd(f, O_RDWR);
+       return ret;
+}
+
+static struct file *
+find_readable_file(struct nfs4_file *f)
+{
+       struct file *ret;
+
+       spin_lock(&f->fi_lock);
+       ret = find_readable_file_locked(f);
+       spin_unlock(&f->fi_lock);
+
+       return ret;
+}
+
+static struct file *
+find_any_file(struct nfs4_file *f)
+{
+       struct file *ret;
+
+       spin_lock(&f->fi_lock);
+       ret = __nfs4_get_fd(f, O_RDWR);
+       if (!ret) {
+               ret = __nfs4_get_fd(f, O_WRONLY);
+               if (!ret)
+                       ret = __nfs4_get_fd(f, O_RDONLY);
+       }
+       spin_unlock(&f->fi_lock);
+       return ret;
+}
+
+static atomic_long_t num_delegations;
 unsigned long max_delegations;
 
 /*
@@ -269,12 +399,11 @@ unsigned long max_delegations;
 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
 
-static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
+static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
 {
        unsigned int ret;
 
        ret = opaque_hashval(ownername->data, ownername->len);
-       ret += clientid;
        return ret & OWNER_HASH_MASK;
 }
 
@@ -282,75 +411,124 @@ static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
 #define FILE_HASH_BITS                   8
 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
 
-static unsigned int file_hashval(struct inode *ino)
+static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
+{
+       return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
+}
+
+static unsigned int file_hashval(struct knfsd_fh *fh)
+{
+       return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
+}
+
+static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
 {
-       /* XXX: why are we hashing on inode pointer, anyway? */
-       return hash_ptr(ino, FILE_HASH_BITS);
+       return fh1->fh_size == fh2->fh_size &&
+               !memcmp(fh1->fh_base.fh_pad,
+                               fh2->fh_base.fh_pad,
+                               fh1->fh_size);
 }
 
 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
 
-static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
+static void
+__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
 {
-       WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
-       atomic_inc(&fp->fi_access[oflag]);
+       lockdep_assert_held(&fp->fi_lock);
+
+       if (access & NFS4_SHARE_ACCESS_WRITE)
+               atomic_inc(&fp->fi_access[O_WRONLY]);
+       if (access & NFS4_SHARE_ACCESS_READ)
+               atomic_inc(&fp->fi_access[O_RDONLY]);
 }
 
-static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
+static __be32
+nfs4_file_get_access(struct nfs4_file *fp, u32 access)
 {
-       if (oflag == O_RDWR) {
-               __nfs4_file_get_access(fp, O_RDONLY);
-               __nfs4_file_get_access(fp, O_WRONLY);
-       } else
-               __nfs4_file_get_access(fp, oflag);
+       lockdep_assert_held(&fp->fi_lock);
+
+       /* Does this access mode make sense? */
+       if (access & ~NFS4_SHARE_ACCESS_BOTH)
+               return nfserr_inval;
+
+       /* Does it conflict with a deny mode already set? */
+       if ((access & fp->fi_share_deny) != 0)
+               return nfserr_share_denied;
+
+       __nfs4_file_get_access(fp, access);
+       return nfs_ok;
 }
 
-static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
+static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
 {
-       if (fp->fi_fds[oflag]) {
-               fput(fp->fi_fds[oflag]);
-               fp->fi_fds[oflag] = NULL;
+       /* Common case is that there is no deny mode. */
+       if (deny) {
+               /* Does this deny mode make sense? */
+               if (deny & ~NFS4_SHARE_DENY_BOTH)
+                       return nfserr_inval;
+
+               if ((deny & NFS4_SHARE_DENY_READ) &&
+                   atomic_read(&fp->fi_access[O_RDONLY]))
+                       return nfserr_share_denied;
+
+               if ((deny & NFS4_SHARE_DENY_WRITE) &&
+                   atomic_read(&fp->fi_access[O_WRONLY]))
+                       return nfserr_share_denied;
        }
+       return nfs_ok;
 }
 
 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
 {
-       if (atomic_dec_and_test(&fp->fi_access[oflag])) {
-               nfs4_file_put_fd(fp, oflag);
+       might_lock(&fp->fi_lock);
+
+       if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
+               struct file *f1 = NULL;
+               struct file *f2 = NULL;
+
+               swap(f1, fp->fi_fds[oflag]);
                if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
-                       nfs4_file_put_fd(fp, O_RDWR);
+                       swap(f2, fp->fi_fds[O_RDWR]);
+               spin_unlock(&fp->fi_lock);
+               if (f1)
+                       fput(f1);
+               if (f2)
+                       fput(f2);
        }
 }
 
-static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
+static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
 {
-       if (oflag == O_RDWR) {
-               __nfs4_file_put_access(fp, O_RDONLY);
+       WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
+
+       if (access & NFS4_SHARE_ACCESS_WRITE)
                __nfs4_file_put_access(fp, O_WRONLY);
-       } else
-               __nfs4_file_put_access(fp, oflag);
+       if (access & NFS4_SHARE_ACCESS_READ)
+               __nfs4_file_put_access(fp, O_RDONLY);
 }
 
-static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
-kmem_cache *slab)
+static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+                                        struct kmem_cache *slab)
 {
-       struct idr *stateids = &cl->cl_stateids;
        struct nfs4_stid *stid;
        int new_id;
 
-       stid = kmem_cache_alloc(slab, GFP_KERNEL);
+       stid = kmem_cache_zalloc(slab, GFP_KERNEL);
        if (!stid)
                return NULL;
 
-       new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL);
+       idr_preload(GFP_KERNEL);
+       spin_lock(&cl->cl_lock);
+       new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
+       spin_unlock(&cl->cl_lock);
+       idr_preload_end();
        if (new_id < 0)
                goto out_free;
        stid->sc_client = cl;
-       stid->sc_type = 0;
        stid->sc_stateid.si_opaque.so_id = new_id;
        stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
        /* Will be incremented before return to client: */
-       stid->sc_stateid.si_generation = 0;
+       atomic_set(&stid->sc_count, 1);
 
        /*
         * It shouldn't be a problem to reuse an opaque stateid value.
@@ -367,9 +545,24 @@ out_free:
        return NULL;
 }
 
-static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
+static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
+{
+       struct nfs4_stid *stid;
+       struct nfs4_ol_stateid *stp;
+
+       stid = nfs4_alloc_stid(clp, stateid_slab);
+       if (!stid)
+               return NULL;
+
+       stp = openlockstateid(stid);
+       stp->st_stid.sc_free = nfs4_free_ol_stateid;
+       return stp;
+}
+
+static void nfs4_free_deleg(struct nfs4_stid *stid)
 {
-       return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
+       kmem_cache_free(deleg_slab, stid);
+       atomic_long_dec(&num_delegations);
 }
 
 /*
@@ -386,10 +579,11 @@ static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
  * low 3 bytes as hash-table indices.
  *
- * 'state_lock', which is always held when block_delegations() is called,
+ * 'blocked_delegations_lock', which is always taken in block_delegations(),
  * is used to manage concurrent access.  Testing does not need the lock
  * except when swapping the two filters.
  */
+static DEFINE_SPINLOCK(blocked_delegations_lock);
 static struct bloom_pair {
        int     entries, old_entries;
        time_t  swap_time;
@@ -405,7 +599,7 @@ static int delegation_blocked(struct knfsd_fh *fh)
        if (bd->entries == 0)
                return 0;
        if (seconds_since_boot() - bd->swap_time > 30) {
-               spin_lock(&state_lock);
+               spin_lock(&blocked_delegations_lock);
                if (seconds_since_boot() - bd->swap_time > 30) {
                        bd->entries -= bd->old_entries;
                        bd->old_entries = bd->entries;
@@ -414,7 +608,7 @@ static int delegation_blocked(struct knfsd_fh *fh)
                        bd->new = 1-bd->new;
                        bd->swap_time = seconds_since_boot();
                }
-               spin_unlock(&state_lock);
+               spin_unlock(&blocked_delegations_lock);
        }
        hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
        if (test_bit(hash&255, bd->set[0]) &&
@@ -437,69 +631,73 @@ static void block_delegations(struct knfsd_fh *fh)
 
        hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
 
+       spin_lock(&blocked_delegations_lock);
        __set_bit(hash&255, bd->set[bd->new]);
        __set_bit((hash>>8)&255, bd->set[bd->new]);
        __set_bit((hash>>16)&255, bd->set[bd->new]);
        if (bd->entries == 0)
                bd->swap_time = seconds_since_boot();
        bd->entries += 1;
+       spin_unlock(&blocked_delegations_lock);
 }
 
 static struct nfs4_delegation *
-alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
+alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
 {
        struct nfs4_delegation *dp;
+       long n;
 
        dprintk("NFSD alloc_init_deleg\n");
-       if (num_delegations > max_delegations)
-               return NULL;
+       n = atomic_long_inc_return(&num_delegations);
+       if (n < 0 || n > max_delegations)
+               goto out_dec;
        if (delegation_blocked(&current_fh->fh_handle))
-               return NULL;
+               goto out_dec;
        dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
        if (dp == NULL)
-               return dp;
+               goto out_dec;
+
+       dp->dl_stid.sc_free = nfs4_free_deleg;
        /*
         * delegation seqid's are never incremented.  The 4.1 special
         * meaning of seqid 0 isn't meaningful, really, but let's avoid
         * 0 anyway just for consistency and use 1:
         */
        dp->dl_stid.sc_stateid.si_generation = 1;
-       num_delegations++;
        INIT_LIST_HEAD(&dp->dl_perfile);
        INIT_LIST_HEAD(&dp->dl_perclnt);
        INIT_LIST_HEAD(&dp->dl_recall_lru);
-       dp->dl_file = NULL;
        dp->dl_type = NFS4_OPEN_DELEGATE_READ;
-       fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
-       dp->dl_time = 0;
-       atomic_set(&dp->dl_count, 1);
-       nfsd4_init_callback(&dp->dl_recall);
+       INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall);
        return dp;
+out_dec:
+       atomic_long_dec(&num_delegations);
+       return NULL;
 }
 
-static void remove_stid(struct nfs4_stid *s)
+void
+nfs4_put_stid(struct nfs4_stid *s)
 {
-       struct idr *stateids = &s->sc_client->cl_stateids;
+       struct nfs4_file *fp = s->sc_file;
+       struct nfs4_client *clp = s->sc_client;
 
-       idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
-}
-
-static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
-{
-       kmem_cache_free(slab, s);
-}
+       might_lock(&clp->cl_lock);
 
-void
-nfs4_put_delegation(struct nfs4_delegation *dp)
-{
-       if (atomic_dec_and_test(&dp->dl_count)) {
-               nfs4_free_stid(deleg_slab, &dp->dl_stid);
-               num_delegations--;
+       if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
+               wake_up_all(&close_wq);
+               return;
        }
+       idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+       spin_unlock(&clp->cl_lock);
+       s->sc_free(s);
+       if (fp)
+               put_nfs4_file(fp);
 }
 
 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
 {
+       lockdep_assert_held(&state_lock);
+
        if (!fp->fi_lease)
                return;
        if (atomic_dec_and_test(&fp->fi_delegees)) {
@@ -519,54 +717,54 @@ static void
 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
 {
        lockdep_assert_held(&state_lock);
+       lockdep_assert_held(&fp->fi_lock);
 
+       atomic_inc(&dp->dl_stid.sc_count);
        dp->dl_stid.sc_type = NFS4_DELEG_STID;
        list_add(&dp->dl_perfile, &fp->fi_delegations);
        list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
 }
 
-/* Called under the state lock. */
 static void
-unhash_delegation(struct nfs4_delegation *dp)
+unhash_delegation_locked(struct nfs4_delegation *dp)
 {
-       spin_lock(&state_lock);
-       list_del_init(&dp->dl_perclnt);
-       list_del_init(&dp->dl_perfile);
-       list_del_init(&dp->dl_recall_lru);
-       spin_unlock(&state_lock);
-       if (dp->dl_file) {
-               nfs4_put_deleg_lease(dp->dl_file);
-               put_nfs4_file(dp->dl_file);
-               dp->dl_file = NULL;
-       }
-}
-
+       struct nfs4_file *fp = dp->dl_stid.sc_file;
 
+       lockdep_assert_held(&state_lock);
 
-static void destroy_revoked_delegation(struct nfs4_delegation *dp)
-{
+       dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
+       /* Ensure that deleg break won't try to requeue it */
+       ++dp->dl_time;
+       spin_lock(&fp->fi_lock);
+       list_del_init(&dp->dl_perclnt);
        list_del_init(&dp->dl_recall_lru);
-       remove_stid(&dp->dl_stid);
-       nfs4_put_delegation(dp);
+       list_del_init(&dp->dl_perfile);
+       spin_unlock(&fp->fi_lock);
+       if (fp)
+               nfs4_put_deleg_lease(fp);
 }
 
 static void destroy_delegation(struct nfs4_delegation *dp)
 {
-       unhash_delegation(dp);
-       remove_stid(&dp->dl_stid);
-       nfs4_put_delegation(dp);
+       spin_lock(&state_lock);
+       unhash_delegation_locked(dp);
+       spin_unlock(&state_lock);
+       nfs4_put_stid(&dp->dl_stid);
 }
 
 static void revoke_delegation(struct nfs4_delegation *dp)
 {
        struct nfs4_client *clp = dp->dl_stid.sc_client;
 
+       WARN_ON(!list_empty(&dp->dl_recall_lru));
+
        if (clp->cl_minorversion == 0)
-               destroy_delegation(dp);
+               nfs4_put_stid(&dp->dl_stid);
        else {
-               unhash_delegation(dp);
                dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
+               spin_lock(&clp->cl_lock);
                list_add(&dp->dl_recall_lru, &clp->cl_revoked);
+               spin_unlock(&clp->cl_lock);
        }
 }
 
@@ -614,57 +812,62 @@ bmap_to_share_mode(unsigned long bmap) {
        return access;
 }
 
-static bool
-test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
-       unsigned int access, deny;
-
-       access = bmap_to_share_mode(stp->st_access_bmap);
-       deny = bmap_to_share_mode(stp->st_deny_bmap);
-       if ((access & open->op_share_deny) || (deny & open->op_share_access))
-               return false;
-       return true;
-}
-
 /* set share access for a given stateid */
 static inline void
 set_access(u32 access, struct nfs4_ol_stateid *stp)
 {
-       __set_bit(access, &stp->st_access_bmap);
+       unsigned char mask = 1 << access;
+
+       WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
+       stp->st_access_bmap |= mask;
 }
 
 /* clear share access for a given stateid */
 static inline void
 clear_access(u32 access, struct nfs4_ol_stateid *stp)
 {
-       __clear_bit(access, &stp->st_access_bmap);
+       unsigned char mask = 1 << access;
+
+       WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
+       stp->st_access_bmap &= ~mask;
 }
 
 /* test whether a given stateid has access */
 static inline bool
 test_access(u32 access, struct nfs4_ol_stateid *stp)
 {
-       return test_bit(access, &stp->st_access_bmap);
+       unsigned char mask = 1 << access;
+
+       return (bool)(stp->st_access_bmap & mask);
 }
 
 /* set share deny for a given stateid */
 static inline void
-set_deny(u32 access, struct nfs4_ol_stateid *stp)
+set_deny(u32 deny, struct nfs4_ol_stateid *stp)
 {
-       __set_bit(access, &stp->st_deny_bmap);
+       unsigned char mask = 1 << deny;
+
+       WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
+       stp->st_deny_bmap |= mask;
 }
 
 /* clear share deny for a given stateid */
 static inline void
-clear_deny(u32 access, struct nfs4_ol_stateid *stp)
+clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
 {
-       __clear_bit(access, &stp->st_deny_bmap);
+       unsigned char mask = 1 << deny;
+
+       WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
+       stp->st_deny_bmap &= ~mask;
 }
 
 /* test whether a given stateid is denying specific access */
 static inline bool
-test_deny(u32 access, struct nfs4_ol_stateid *stp)
+test_deny(u32 deny, struct nfs4_ol_stateid *stp)
 {
-       return test_bit(access, &stp->st_deny_bmap);
+       unsigned char mask = 1 << deny;
+
+       return (bool)(stp->st_deny_bmap & mask);
 }
 
 static int nfs4_access_to_omode(u32 access)
@@ -681,127 +884,240 @@ static int nfs4_access_to_omode(u32 access)
        return O_RDONLY;
 }
 
+/*
+ * A stateid that had a deny mode associated with it is being released
+ * or downgraded. Recalculate the deny mode on the file.
+ */
+static void
+recalculate_deny_mode(struct nfs4_file *fp)
+{
+       struct nfs4_ol_stateid *stp;
+
+       spin_lock(&fp->fi_lock);
+       fp->fi_share_deny = 0;
+       list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
+               fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
+       spin_unlock(&fp->fi_lock);
+}
+
+static void
+reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
+{
+       int i;
+       bool change = false;
+
+       for (i = 1; i < 4; i++) {
+               if ((i & deny) != i) {
+                       change = true;
+                       clear_deny(i, stp);
+               }
+       }
+
+       /* Recalculate per-file deny mode if there was a change */
+       if (change)
+               recalculate_deny_mode(stp->st_stid.sc_file);
+}
+
 /* release all access and file references for a given stateid */
 static void
 release_all_access(struct nfs4_ol_stateid *stp)
 {
        int i;
+       struct nfs4_file *fp = stp->st_stid.sc_file;
+
+       if (fp && stp->st_deny_bmap != 0)
+               recalculate_deny_mode(fp);
 
        for (i = 1; i < 4; i++) {
                if (test_access(i, stp))
-                       nfs4_file_put_access(stp->st_file,
-                                            nfs4_access_to_omode(i));
+                       nfs4_file_put_access(stp->st_stid.sc_file, i);
                clear_access(i, stp);
        }
 }
 
-static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
+static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
 {
-       list_del(&stp->st_perfile);
-       list_del(&stp->st_perstateowner);
+       struct nfs4_client *clp = sop->so_client;
+
+       might_lock(&clp->cl_lock);
+
+       if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
+               return;
+       sop->so_ops->so_unhash(sop);
+       spin_unlock(&clp->cl_lock);
+       kfree(sop->so_owner.data);
+       sop->so_ops->so_free(sop);
 }
 
-static void close_generic_stateid(struct nfs4_ol_stateid *stp)
+static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
 {
-       release_all_access(stp);
-       put_nfs4_file(stp->st_file);
-       stp->st_file = NULL;
+       struct nfs4_file *fp = stp->st_stid.sc_file;
+
+       lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
+
+       spin_lock(&fp->fi_lock);
+       list_del(&stp->st_perfile);
+       spin_unlock(&fp->fi_lock);
+       list_del(&stp->st_perstateowner);
 }
 
-static void free_generic_stateid(struct nfs4_ol_stateid *stp)
+static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
 {
-       remove_stid(&stp->st_stid);
-       nfs4_free_stid(stateid_slab, &stp->st_stid);
+       struct nfs4_ol_stateid *stp = openlockstateid(stid);
+
+       release_all_access(stp);
+       if (stp->st_stateowner)
+               nfs4_put_stateowner(stp->st_stateowner);
+       kmem_cache_free(stateid_slab, stid);
 }
 
-static void __release_lock_stateid(struct nfs4_ol_stateid *stp)
+static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
 {
+       struct nfs4_ol_stateid *stp = openlockstateid(stid);
+       struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
        struct file *file;
 
-       list_del(&stp->st_locks);
-       unhash_generic_stateid(stp);
-       unhash_stid(&stp->st_stid);
-       file = find_any_file(stp->st_file);
+       file = find_any_file(stp->st_stid.sc_file);
        if (file)
-               locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
-       close_generic_stateid(stp);
-       free_generic_stateid(stp);
+               filp_close(file, (fl_owner_t)lo);
+       nfs4_free_ol_stateid(stid);
 }
 
-static void unhash_lockowner(struct nfs4_lockowner *lo)
+/*
+ * Put the persistent reference to an already unhashed generic stateid, while
+ * holding the cl_lock. If it's the last reference, then put it onto the
+ * reaplist for later destruction.
+ */
+static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
+                                      struct list_head *reaplist)
 {
-       struct nfs4_ol_stateid *stp;
+       struct nfs4_stid *s = &stp->st_stid;
+       struct nfs4_client *clp = s->sc_client;
 
-       list_del(&lo->lo_owner.so_strhash);
-       while (!list_empty(&lo->lo_owner.so_stateids)) {
-               stp = list_first_entry(&lo->lo_owner.so_stateids,
-                               struct nfs4_ol_stateid, st_perstateowner);
-               __release_lock_stateid(stp);
+       lockdep_assert_held(&clp->cl_lock);
+
+       WARN_ON_ONCE(!list_empty(&stp->st_locks));
+
+       if (!atomic_dec_and_test(&s->sc_count)) {
+               wake_up_all(&close_wq);
+               return;
        }
+
+       idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+       list_add(&stp->st_locks, reaplist);
 }
 
-static void nfs4_free_lockowner(struct nfs4_lockowner *lo)
+static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
 {
-       kfree(lo->lo_owner.so_owner.data);
-       kmem_cache_free(lockowner_slab, lo);
+       struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+
+       lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+
+       list_del_init(&stp->st_locks);
+       unhash_ol_stateid(stp);
+       unhash_stid(&stp->st_stid);
 }
 
-static void release_lockowner(struct nfs4_lockowner *lo)
+static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+{
+       struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+
+       spin_lock(&oo->oo_owner.so_client->cl_lock);
+       unhash_lock_stateid(stp);
+       spin_unlock(&oo->oo_owner.so_client->cl_lock);
+       nfs4_put_stid(&stp->st_stid);
+}
+
+static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
 {
-       unhash_lockowner(lo);
-       nfs4_free_lockowner(lo);
+       struct nfs4_client *clp = lo->lo_owner.so_client;
+
+       lockdep_assert_held(&clp->cl_lock);
+
+       list_del_init(&lo->lo_owner.so_strhash);
 }
 
-static void release_lockowner_if_empty(struct nfs4_lockowner *lo)
+/*
+ * Free a list of generic stateids that were collected earlier after being
+ * fully unhashed.
+ */
+static void
+free_ol_stateid_reaplist(struct list_head *reaplist)
 {
-       if (list_empty(&lo->lo_owner.so_stateids))
-               release_lockowner(lo);
+       struct nfs4_ol_stateid *stp;
+
+       might_sleep();
+
+       while (!list_empty(reaplist)) {
+               stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
+                                      st_locks);
+               list_del(&stp->st_locks);
+               stp->st_stid.sc_free(&stp->st_stid);
+       }
 }
 
-static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+static void release_lockowner(struct nfs4_lockowner *lo)
 {
-       struct nfs4_lockowner *lo;
+       struct nfs4_client *clp = lo->lo_owner.so_client;
+       struct nfs4_ol_stateid *stp;
+       struct list_head reaplist;
+
+       INIT_LIST_HEAD(&reaplist);
 
-       lo = lockowner(stp->st_stateowner);
-       __release_lock_stateid(stp);
-       release_lockowner_if_empty(lo);
+       spin_lock(&clp->cl_lock);
+       unhash_lockowner_locked(lo);
+       while (!list_empty(&lo->lo_owner.so_stateids)) {
+               stp = list_first_entry(&lo->lo_owner.so_stateids,
+                               struct nfs4_ol_stateid, st_perstateowner);
+               unhash_lock_stateid(stp);
+               put_ol_stateid_locked(stp, &reaplist);
+       }
+       spin_unlock(&clp->cl_lock);
+       free_ol_stateid_reaplist(&reaplist);
+       nfs4_put_stateowner(&lo->lo_owner);
 }
 
-static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp)
+static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
+                                      struct list_head *reaplist)
 {
        struct nfs4_ol_stateid *stp;
 
        while (!list_empty(&open_stp->st_locks)) {
                stp = list_entry(open_stp->st_locks.next,
                                struct nfs4_ol_stateid, st_locks);
-               release_lock_stateid(stp);
+               unhash_lock_stateid(stp);
+               put_ol_stateid_locked(stp, reaplist);
        }
 }
 
-static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
+static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
+                               struct list_head *reaplist)
 {
-       unhash_generic_stateid(stp);
-       release_open_stateid_locks(stp);
-       close_generic_stateid(stp);
+       lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+
+       unhash_ol_stateid(stp);
+       release_open_stateid_locks(stp, reaplist);
 }
 
 static void release_open_stateid(struct nfs4_ol_stateid *stp)
 {
-       unhash_open_stateid(stp);
-       free_generic_stateid(stp);
+       LIST_HEAD(reaplist);
+
+       spin_lock(&stp->st_stid.sc_client->cl_lock);
+       unhash_open_stateid(stp, &reaplist);
+       put_ol_stateid_locked(stp, &reaplist);
+       spin_unlock(&stp->st_stid.sc_client->cl_lock);
+       free_ol_stateid_reaplist(&reaplist);
 }
 
-static void unhash_openowner(struct nfs4_openowner *oo)
+static void unhash_openowner_locked(struct nfs4_openowner *oo)
 {
-       struct nfs4_ol_stateid *stp;
+       struct nfs4_client *clp = oo->oo_owner.so_client;
 
-       list_del(&oo->oo_owner.so_strhash);
-       list_del(&oo->oo_perclient);
-       while (!list_empty(&oo->oo_owner.so_stateids)) {
-               stp = list_first_entry(&oo->oo_owner.so_stateids,
-                               struct nfs4_ol_stateid, st_perstateowner);
-               release_open_stateid(stp);
-       }
+       lockdep_assert_held(&clp->cl_lock);
+
+       list_del_init(&oo->oo_owner.so_strhash);
+       list_del_init(&oo->oo_perclient);
 }
 
 static void release_last_closed_stateid(struct nfs4_openowner *oo)
@@ -809,23 +1125,32 @@ static void release_last_closed_stateid(struct nfs4_openowner *oo)
        struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
 
        if (s) {
-               free_generic_stateid(s);
+               list_del_init(&oo->oo_close_lru);
                oo->oo_last_closed_stid = NULL;
+               nfs4_put_stid(&s->st_stid);
        }
 }
 
-static void nfs4_free_openowner(struct nfs4_openowner *oo)
-{
-       kfree(oo->oo_owner.so_owner.data);
-       kmem_cache_free(openowner_slab, oo);
-}
-
 static void release_openowner(struct nfs4_openowner *oo)
 {
-       unhash_openowner(oo);
-       list_del(&oo->oo_close_lru);
+       struct nfs4_ol_stateid *stp;
+       struct nfs4_client *clp = oo->oo_owner.so_client;
+       struct list_head reaplist;
+
+       INIT_LIST_HEAD(&reaplist);
+
+       spin_lock(&clp->cl_lock);
+       unhash_openowner_locked(oo);
+       while (!list_empty(&oo->oo_owner.so_stateids)) {
+               stp = list_first_entry(&oo->oo_owner.so_stateids,
+                               struct nfs4_ol_stateid, st_perstateowner);
+               unhash_open_stateid(stp, &reaplist);
+               put_ol_stateid_locked(stp, &reaplist);
+       }
+       spin_unlock(&clp->cl_lock);
+       free_ol_stateid_reaplist(&reaplist);
        release_last_closed_stateid(oo);
-       nfs4_free_openowner(oo);
+       nfs4_put_stateowner(&oo->oo_owner);
 }
 
 static inline int
@@ -862,7 +1187,7 @@ void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
                return;
 
        if (!seqid_mutating_err(ntohl(nfserr))) {
-               cstate->replay_owner = NULL;
+               nfsd4_cstate_clear_replay(cstate);
                return;
        }
        if (!so)
@@ -1050,10 +1375,8 @@ static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, str
        if (ret)
                /* oops; xprt is already down: */
                nfsd4_conn_lost(&conn->cn_xpt_user);
-       if (conn->cn_flags & NFS4_CDFC4_BACK) {
-               /* callback channel may be back up */
-               nfsd4_probe_callback(ses->se_client);
-       }
+       /* We may have gained or lost a callback channel: */
+       nfsd4_probe_callback_sync(ses->se_client);
 }
 
 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
@@ -1093,9 +1416,6 @@ static void __free_session(struct nfsd4_session *ses)
 
 static void free_session(struct nfsd4_session *ses)
 {
-       struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
-
-       lockdep_assert_held(&nn->client_lock);
        nfsd4_del_conns(ses);
        nfsd4_put_drc_mem(&ses->se_fchannel);
        __free_session(ses);
@@ -1207,15 +1527,20 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
 static struct nfs4_client *alloc_client(struct xdr_netobj name)
 {
        struct nfs4_client *clp;
+       int i;
 
        clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
        if (clp == NULL)
                return NULL;
        clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
-       if (clp->cl_name.data == NULL) {
-               kfree(clp);
-               return NULL;
-       }
+       if (clp->cl_name.data == NULL)
+               goto err_no_name;
+       clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
+                       OWNER_HASH_SIZE, GFP_KERNEL);
+       if (!clp->cl_ownerstr_hashtbl)
+               goto err_no_hashtbl;
+       for (i = 0; i < OWNER_HASH_SIZE; i++)
+               INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
        clp->cl_name.len = name.len;
        INIT_LIST_HEAD(&clp->cl_sessions);
        idr_init(&clp->cl_stateids);
@@ -1230,14 +1555,16 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
        spin_lock_init(&clp->cl_lock);
        rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
        return clp;
+err_no_hashtbl:
+       kfree(clp->cl_name.data);
+err_no_name:
+       kfree(clp);
+       return NULL;
 }
 
 static void
 free_client(struct nfs4_client *clp)
 {
-       struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
-
-       lockdep_assert_held(&nn->client_lock);
        while (!list_empty(&clp->cl_sessions)) {
                struct nfsd4_session *ses;
                ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
@@ -1248,18 +1575,30 @@ free_client(struct nfs4_client *clp)
        }
        rpc_destroy_wait_queue(&clp->cl_cb_waitq);
        free_svc_cred(&clp->cl_cred);
+       kfree(clp->cl_ownerstr_hashtbl);
        kfree(clp->cl_name.data);
        idr_destroy(&clp->cl_stateids);
        kfree(clp);
 }
 
 /* must be called under the client_lock */
-static inline void
+static void
 unhash_client_locked(struct nfs4_client *clp)
 {
+       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
        struct nfsd4_session *ses;
 
-       list_del(&clp->cl_lru);
+       /* Mark the client as expired! */
+       clp->cl_time = 0;
+       /* Make it invisible */
+       if (!list_empty(&clp->cl_idhash)) {
+               list_del_init(&clp->cl_idhash);
+               if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
+                       rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
+               else
+                       rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
+       }
+       list_del_init(&clp->cl_lru);
        spin_lock(&clp->cl_lock);
        list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
                list_del_init(&ses->se_hash);
@@ -1267,53 +1606,63 @@ unhash_client_locked(struct nfs4_client *clp)
 }
 
 static void
-destroy_client(struct nfs4_client *clp)
+unhash_client(struct nfs4_client *clp)
+{
+       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+       spin_lock(&nn->client_lock);
+       unhash_client_locked(clp);
+       spin_unlock(&nn->client_lock);
+}
+
+static void
+__destroy_client(struct nfs4_client *clp)
 {
        struct nfs4_openowner *oo;
        struct nfs4_delegation *dp;
        struct list_head reaplist;
-       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 
        INIT_LIST_HEAD(&reaplist);
        spin_lock(&state_lock);
        while (!list_empty(&clp->cl_delegations)) {
                dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
-               list_del_init(&dp->dl_perclnt);
-               list_move(&dp->dl_recall_lru, &reaplist);
+               unhash_delegation_locked(dp);
+               list_add(&dp->dl_recall_lru, &reaplist);
        }
        spin_unlock(&state_lock);
        while (!list_empty(&reaplist)) {
                dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
-               destroy_delegation(dp);
+               list_del_init(&dp->dl_recall_lru);
+               nfs4_put_stid(&dp->dl_stid);
        }
-       list_splice_init(&clp->cl_revoked, &reaplist);
-       while (!list_empty(&reaplist)) {
+       while (!list_empty(&clp->cl_revoked)) {
                dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
-               destroy_revoked_delegation(dp);
+               list_del_init(&dp->dl_recall_lru);
+               nfs4_put_stid(&dp->dl_stid);
        }
        while (!list_empty(&clp->cl_openowners)) {
                oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
+               atomic_inc(&oo->oo_owner.so_count);
                release_openowner(oo);
        }
        nfsd4_shutdown_callback(clp);
        if (clp->cl_cb_conn.cb_xprt)
                svc_xprt_put(clp->cl_cb_conn.cb_xprt);
-       list_del(&clp->cl_idhash);
-       if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
-               rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
-       else
-               rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
-       spin_lock(&nn->client_lock);
-       unhash_client_locked(clp);
-       WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
        free_client(clp);
-       spin_unlock(&nn->client_lock);
+}
+
+static void
+destroy_client(struct nfs4_client *clp)
+{
+       unhash_client(clp);
+       __destroy_client(clp);
 }
 
 static void expire_client(struct nfs4_client *clp)
 {
+       unhash_client(clp);
        nfsd4_client_record_remove(clp);
-       destroy_client(clp);
+       __destroy_client(clp);
 }
 
 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
@@ -1468,7 +1817,8 @@ static void gen_confirm(struct nfs4_client *clp)
        memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
 }
 
-static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
+static struct nfs4_stid *
+find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
 {
        struct nfs4_stid *ret;
 
@@ -1478,16 +1828,21 @@ static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
        return ret;
 }
 
-static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
+static struct nfs4_stid *
+find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
 {
        struct nfs4_stid *s;
 
-       s = find_stateid(cl, t);
-       if (!s)
-               return NULL;
-       if (typemask & s->sc_type)
-               return s;
-       return NULL;
+       spin_lock(&cl->cl_lock);
+       s = find_stateid_locked(cl, t);
+       if (s != NULL) {
+               if (typemask & s->sc_type)
+                       atomic_inc(&s->sc_count);
+               else
+                       s = NULL;
+       }
+       spin_unlock(&cl->cl_lock);
+       return s;
 }
 
 static struct nfs4_client *create_client(struct xdr_netobj name,
@@ -1497,7 +1852,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
        struct sockaddr *sa = svc_addr(rqstp);
        int ret;
        struct net *net = SVC_NET(rqstp);
-       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        clp = alloc_client(name);
        if (clp == NULL)
@@ -1505,12 +1859,10 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
 
        ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
        if (ret) {
-               spin_lock(&nn->client_lock);
                free_client(clp);
-               spin_unlock(&nn->client_lock);
                return NULL;
        }
-       nfsd4_init_callback(&clp->cl_cb_null);
+       INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null);
        clp->cl_time = get_seconds();
        clear_bit(0, &clp->cl_cb_slot_busy);
        copy_verf(clp, verf);
@@ -2391,6 +2743,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
                        goto out_put_session;
                cstate->slot = slot;
                cstate->session = session;
+               cstate->clp = clp;
                /* Return the cached reply status and set cstate->status
                 * for nfsd4_proc_compound processing */
                status = nfsd4_replay_cache_entry(resp, seq);
@@ -2425,6 +2778,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
 
        cstate->slot = slot;
        cstate->session = session;
+       cstate->clp = clp;
 
 out:
        switch (clp->cl_cb_state) {
@@ -2461,7 +2815,8 @@ nfsd4_sequence_done(struct nfsd4_compoundres *resp)
                }
                /* Drop session reference that was taken in nfsd4_sequence() */
                nfsd4_put_session(cs->session);
-       }
+       } else if (cs->clp)
+               put_client_renew(cs->clp);
 }
 
 __be32
@@ -2653,19 +3008,20 @@ static struct nfs4_file *nfsd4_alloc_file(void)
 }
 
 /* OPEN Share state helper functions */
-static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
+static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
 {
-       unsigned int hashval = file_hashval(ino);
+       unsigned int hashval = file_hashval(fh);
 
        lockdep_assert_held(&state_lock);
 
        atomic_set(&fp->fi_ref, 1);
+       spin_lock_init(&fp->fi_lock);
        INIT_LIST_HEAD(&fp->fi_stateids);
        INIT_LIST_HEAD(&fp->fi_delegations);
-       ihold(ino);
-       fp->fi_inode = ino;
+       fh_copy_shallow(&fp->fi_fhandle, fh);
        fp->fi_had_conflict = false;
        fp->fi_lease = NULL;
+       fp->fi_share_deny = 0;
        memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
        memset(fp->fi_access, 0, sizeof(fp->fi_access));
        hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
@@ -2724,6 +3080,28 @@ static void init_nfs4_replay(struct nfs4_replay *rp)
        rp->rp_status = nfserr_serverfault;
        rp->rp_buflen = 0;
        rp->rp_buf = rp->rp_ibuf;
+       mutex_init(&rp->rp_mutex);
+}
+
+static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
+               struct nfs4_stateowner *so)
+{
+       if (!nfsd4_has_session(cstate)) {
+               mutex_lock(&so->so_replay.rp_mutex);
+               cstate->replay_owner = so;
+               atomic_inc(&so->so_count);
+       }
+}
+
+void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
+{
+       struct nfs4_stateowner *so = cstate->replay_owner;
+
+       if (so != NULL) {
+               cstate->replay_owner = NULL;
+               mutex_unlock(&so->so_replay.rp_mutex);
+               nfs4_put_stateowner(so);
+       }
 }
 
 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
@@ -2744,110 +3122,134 @@ static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj
        INIT_LIST_HEAD(&sop->so_stateids);
        sop->so_client = clp;
        init_nfs4_replay(&sop->so_replay);
+       atomic_set(&sop->so_count, 1);
        return sop;
 }
 
 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
 {
-       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+       lockdep_assert_held(&clp->cl_lock);
 
-       list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
+       list_add(&oo->oo_owner.so_strhash,
+                &clp->cl_ownerstr_hashtbl[strhashval]);
        list_add(&oo->oo_perclient, &clp->cl_openowners);
 }
 
+static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
+{
+       unhash_openowner_locked(openowner(so));
+}
+
+static void nfs4_free_openowner(struct nfs4_stateowner *so)
+{
+       struct nfs4_openowner *oo = openowner(so);
+
+       kmem_cache_free(openowner_slab, oo);
+}
+
+static const struct nfs4_stateowner_operations openowner_ops = {
+       .so_unhash =    nfs4_unhash_openowner,
+       .so_free =      nfs4_free_openowner,
+};
+
 static struct nfs4_openowner *
-alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp,
-                          struct nfsd4_open *open,
+alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
                           struct nfsd4_compound_state *cstate)
 {
-       struct nfs4_openowner *oo;
+       struct nfs4_client *clp = cstate->clp;
+       struct nfs4_openowner *oo, *ret;
 
        oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
        if (!oo)
                return NULL;
+       oo->oo_owner.so_ops = &openowner_ops;
        oo->oo_owner.so_is_open_owner = 1;
        oo->oo_owner.so_seqid = open->op_seqid;
-       oo->oo_flags = NFS4_OO_NEW;
+       oo->oo_flags = 0;
        if (nfsd4_has_session(cstate))
                oo->oo_flags |= NFS4_OO_CONFIRMED;
        oo->oo_time = 0;
        oo->oo_last_closed_stid = NULL;
        INIT_LIST_HEAD(&oo->oo_close_lru);
-       hash_openowner(oo, clp, strhashval);
+       spin_lock(&clp->cl_lock);
+       ret = find_openstateowner_str_locked(strhashval, open, clp);
+       if (ret == NULL) {
+               hash_openowner(oo, clp, strhashval);
+               ret = oo;
+       } else
+               nfs4_free_openowner(&oo->oo_owner);
+       spin_unlock(&clp->cl_lock);
        return oo;
 }
 
 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
        struct nfs4_openowner *oo = open->op_openowner;
 
+       atomic_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_OPEN_STID;
        INIT_LIST_HEAD(&stp->st_locks);
-       list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
-       list_add(&stp->st_perfile, &fp->fi_stateids);
        stp->st_stateowner = &oo->oo_owner;
+       atomic_inc(&stp->st_stateowner->so_count);
        get_nfs4_file(fp);
-       stp->st_file = fp;
+       stp->st_stid.sc_file = fp;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = 0;
-       set_access(open->op_share_access, stp);
-       set_deny(open->op_share_deny, stp);
        stp->st_openstp = NULL;
+       spin_lock(&oo->oo_owner.so_client->cl_lock);
+       list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
+       spin_lock(&fp->fi_lock);
+       list_add(&stp->st_perfile, &fp->fi_stateids);
+       spin_unlock(&fp->fi_lock);
+       spin_unlock(&oo->oo_owner.so_client->cl_lock);
 }
 
+/*
+ * In the 4.0 case we need to keep the owners around a little while to handle
+ * CLOSE replay. We still do need to release any file access that is held by
+ * them before returning however.
+ */
 static void
-move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
+move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
 {
-       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       struct nfs4_openowner *oo = openowner(s->st_stateowner);
+       struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
+                                               nfsd_net_id);
 
        dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
 
-       list_move_tail(&oo->oo_close_lru, &nn->close_lru);
-       oo->oo_time = get_seconds();
-}
-
-static int
-same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
-                                                       clientid_t *clid)
-{
-       return (sop->so_owner.len == owner->len) &&
-               0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
-               (sop->so_client->cl_clientid.cl_id == clid->cl_id);
-}
-
-static struct nfs4_openowner *
-find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
-                       bool sessions, struct nfsd_net *nn)
-{
-       struct nfs4_stateowner *so;
-       struct nfs4_openowner *oo;
-       struct nfs4_client *clp;
+       /*
+        * We know that we hold one reference via nfsd4_close, and another
+        * "persistent" reference for the client. If the refcount is higher
+        * than 2, then there are still calls in progress that are using this
+        * stateid. We can't put the sc_file reference until they are finished.
+        * Wait for the refcount to drop to 2. Since it has been unhashed,
+        * there should be no danger of the refcount going back up again at
+        * this point.
+        */
+       wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
 
-       list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
-               if (!so->so_is_open_owner)
-                       continue;
-               if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
-                       oo = openowner(so);
-                       clp = oo->oo_owner.so_client;
-                       if ((bool)clp->cl_minorversion != sessions)
-                               return NULL;
-                       renew_client(oo->oo_owner.so_client);
-                       return oo;
-               }
+       release_all_access(s);
+       if (s->st_stid.sc_file) {
+               put_nfs4_file(s->st_stid.sc_file);
+               s->st_stid.sc_file = NULL;
        }
-       return NULL;
+       release_last_closed_stateid(oo);
+       oo->oo_last_closed_stid = s;
+       list_move_tail(&oo->oo_close_lru, &nn->close_lru);
+       oo->oo_time = get_seconds();
 }
 
 /* search file_hashtbl[] for file */
 static struct nfs4_file *
-find_file_locked(struct inode *ino)
+find_file_locked(struct knfsd_fh *fh)
 {
-       unsigned int hashval = file_hashval(ino);
+       unsigned int hashval = file_hashval(fh);
        struct nfs4_file *fp;
 
        lockdep_assert_held(&state_lock);
 
        hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
-               if (fp->fi_inode == ino) {
+               if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
                        get_nfs4_file(fp);
                        return fp;
                }
@@ -2856,25 +3258,25 @@ find_file_locked(struct inode *ino)
 }
 
 static struct nfs4_file *
-find_file(struct inode *ino)
+find_file(struct knfsd_fh *fh)
 {
        struct nfs4_file *fp;
 
        spin_lock(&state_lock);
-       fp = find_file_locked(ino);
+       fp = find_file_locked(fh);
        spin_unlock(&state_lock);
        return fp;
 }
 
 static struct nfs4_file *
-find_or_add_file(struct inode *ino, struct nfs4_file *new)
+find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
 {
        struct nfs4_file *fp;
 
        spin_lock(&state_lock);
-       fp = find_file_locked(ino);
+       fp = find_file_locked(fh);
        if (fp == NULL) {
-               nfsd4_init_file(new, ino);
+               nfsd4_init_file(new, fh);
                fp = new;
        }
        spin_unlock(&state_lock);
@@ -2889,47 +3291,53 @@ find_or_add_file(struct inode *ino, struct nfs4_file *new)
 static __be32
 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
 {
-       struct inode *ino = current_fh->fh_dentry->d_inode;
        struct nfs4_file *fp;
-       struct nfs4_ol_stateid *stp;
-       __be32 ret;
+       __be32 ret = nfs_ok;
 
-       fp = find_file(ino);
+       fp = find_file(&current_fh->fh_handle);
        if (!fp)
-               return nfs_ok;
-       ret = nfserr_locked;
-       /* Search for conflicting share reservations */
-       list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
-               if (test_deny(deny_type, stp) ||
-                   test_deny(NFS4_SHARE_DENY_BOTH, stp))
-                       goto out;
-       }
-       ret = nfs_ok;
-out:
+               return ret;
+       /* Check for conflicting share reservations */
+       spin_lock(&fp->fi_lock);
+       if (fp->fi_share_deny & deny_type)
+               ret = nfserr_locked;
+       spin_unlock(&fp->fi_lock);
        put_nfs4_file(fp);
        return ret;
 }
 
-static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
+void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp)
 {
-       struct nfs4_client *clp = dp->dl_stid.sc_client;
-       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+       struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
+                                         nfsd_net_id);
 
-       lockdep_assert_held(&state_lock);
-       /* We're assuming the state code never drops its reference
+       block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
+
+       /*
+        * We can't do this in nfsd_break_deleg_cb because it is
+        * already holding inode->i_lock.
+        *
+        * If the dl_time != 0, then we know that it has already been
+        * queued for a lease break. Don't queue it again.
+        */
+       spin_lock(&state_lock);
+       if (dp->dl_time == 0) {
+               dp->dl_time = get_seconds();
+               list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
+       }
+       spin_unlock(&state_lock);
+}
+
+static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
+{
+       /*
+        * We're assuming the state code never drops its reference
         * without first removing the lease.  Since we're in this lease
         * callback (and since the lease code is serialized by the kernel
         * lock) we know the server hasn't removed the lease yet, we know
-        * it's safe to take a reference: */
-       atomic_inc(&dp->dl_count);
-
-       list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
-
-       /* Only place dl_time is set; protected by i_lock: */
-       dp->dl_time = get_seconds();
-
-       block_delegations(&dp->dl_fh);
-
+        * it's safe to take a reference.
+        */
+       atomic_inc(&dp->dl_stid.sc_count);
        nfsd4_cb_recall(dp);
 }
 
@@ -2954,11 +3362,20 @@ static void nfsd_break_deleg_cb(struct file_lock *fl)
         */
        fl->fl_break_time = 0;
 
-       spin_lock(&state_lock);
+       spin_lock(&fp->fi_lock);
        fp->fi_had_conflict = true;
-       list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
-               nfsd_break_one_deleg(dp);
-       spin_unlock(&state_lock);
+       /*
+        * If there are no delegations on the list, then we can't count on this
+        * lease ever being cleaned up. Set the fl_break_time to jiffies so that
+        * time_out_leases will do it ASAP. The fact that fi_had_conflict is now
+        * true should keep any new delegations from being hashed.
+        */
+       if (list_empty(&fp->fi_delegations))
+               fl->fl_break_time = jiffies;
+       else
+               list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+                       nfsd_break_one_deleg(dp);
+       spin_unlock(&fp->fi_lock);
 }
 
 static
@@ -2986,6 +3403,38 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
        return nfserr_bad_seqid;
 }
 
+static __be32 lookup_clientid(clientid_t *clid,
+               struct nfsd4_compound_state *cstate,
+               struct nfsd_net *nn)
+{
+       struct nfs4_client *found;
+
+       if (cstate->clp) {
+               found = cstate->clp;
+               if (!same_clid(&found->cl_clientid, clid))
+                       return nfserr_stale_clientid;
+               return nfs_ok;
+       }
+
+       if (STALE_CLIENTID(clid, nn))
+               return nfserr_stale_clientid;
+
+       /*
+        * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
+        * cached already then we know this is for is for v4.0 and "sessions"
+        * will be false.
+        */
+       WARN_ON_ONCE(cstate->session);
+       found = find_confirmed_client(clid, false, nn);
+       if (!found)
+               return nfserr_expired;
+
+       /* Cache the nfs4_client in cstate! */
+       cstate->clp = found;
+       atomic_inc(&found->cl_refcount);
+       return nfs_ok;
+}
+
 __be32
 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
                    struct nfsd4_open *open, struct nfsd_net *nn)
@@ -3006,19 +3455,19 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
        if (open->op_file == NULL)
                return nfserr_jukebox;
 
-       strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
-       oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
+       status = lookup_clientid(clientid, cstate, nn);
+       if (status)
+               return status;
+       clp = cstate->clp;
+
+       strhashval = ownerstr_hashval(&open->op_owner);
+       oo = find_openstateowner_str(strhashval, open, clp);
        open->op_openowner = oo;
        if (!oo) {
-               clp = find_confirmed_client(clientid, cstate->minorversion,
-                                           nn);
-               if (clp == NULL)
-                       return nfserr_expired;
                goto new_owner;
        }
        if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
                /* Replace unconfirmed owners without checking for replay. */
-               clp = oo->oo_owner.so_client;
                release_openowner(oo);
                open->op_openowner = NULL;
                goto new_owner;
@@ -3026,15 +3475,14 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
        status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
        if (status)
                return status;
-       clp = oo->oo_owner.so_client;
        goto alloc_stateid;
 new_owner:
-       oo = alloc_init_open_stateowner(strhashval, clp, open, cstate);
+       oo = alloc_init_open_stateowner(strhashval, open, cstate);
        if (oo == NULL)
                return nfserr_jukebox;
        open->op_openowner = oo;
 alloc_stateid:
-       open->op_stp = nfs4_alloc_stateid(clp);
+       open->op_stp = nfs4_alloc_open_stateid(clp);
        if (!open->op_stp)
                return nfserr_jukebox;
        return nfs_ok;
@@ -3076,14 +3524,18 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
 {
        int flags;
        __be32 status = nfserr_bad_stateid;
+       struct nfs4_delegation *deleg;
 
-       *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
-       if (*dp == NULL)
+       deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
+       if (deleg == NULL)
                goto out;
        flags = share_access_to_flags(open->op_share_access);
-       status = nfs4_check_delegmode(*dp, flags);
-       if (status)
-               *dp = NULL;
+       status = nfs4_check_delegmode(deleg, flags);
+       if (status) {
+               nfs4_put_stid(&deleg->dl_stid);
+               goto out;
+       }
+       *dp = deleg;
 out:
        if (!nfsd4_is_deleg_cur(open))
                return nfs_ok;
@@ -3093,24 +3545,25 @@ out:
        return nfs_ok;
 }
 
-static __be32
-nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
+static struct nfs4_ol_stateid *
+nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
 {
-       struct nfs4_ol_stateid *local;
+       struct nfs4_ol_stateid *local, *ret = NULL;
        struct nfs4_openowner *oo = open->op_openowner;
 
+       spin_lock(&fp->fi_lock);
        list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
                /* ignore lock owners */
                if (local->st_stateowner->so_is_open_owner == 0)
                        continue;
-               /* remember if we have seen this open owner */
-               if (local->st_stateowner == &oo->oo_owner)
-                       *stpp = local;
-               /* check for conflicting share reservations */
-               if (!test_share(local, open))
-                       return nfserr_share_denied;
+               if (local->st_stateowner == &oo->oo_owner) {
+                       ret = local;
+                       atomic_inc(&ret->st_stid.sc_count);
+                       break;
+               }
        }
-       return nfs_ok;
+       spin_unlock(&fp->fi_lock);
+       return ret;
 }
 
 static inline int nfs4_access_to_access(u32 nfs4_access)
@@ -3140,53 +3593,99 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
 }
 
 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
-               struct svc_fh *cur_fh, struct nfsd4_open *open)
+               struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
+               struct nfsd4_open *open)
 {
+       struct file *filp = NULL;
        __be32 status;
        int oflag = nfs4_access_to_omode(open->op_share_access);
        int access = nfs4_access_to_access(open->op_share_access);
+       unsigned char old_access_bmap, old_deny_bmap;
+
+       spin_lock(&fp->fi_lock);
+
+       /*
+        * Are we trying to set a deny mode that would conflict with
+        * current access?
+        */
+       status = nfs4_file_check_deny(fp, open->op_share_deny);
+       if (status != nfs_ok) {
+               spin_unlock(&fp->fi_lock);
+               goto out;
+       }
+
+       /* set access to the file */
+       status = nfs4_file_get_access(fp, open->op_share_access);
+       if (status != nfs_ok) {
+               spin_unlock(&fp->fi_lock);
+               goto out;
+       }
+
+       /* Set access bits in stateid */
+       old_access_bmap = stp->st_access_bmap;
+       set_access(open->op_share_access, stp);
+
+       /* Set new deny mask */
+       old_deny_bmap = stp->st_deny_bmap;
+       set_deny(open->op_share_deny, stp);
+       fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
 
        if (!fp->fi_fds[oflag]) {
-               status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
-                       &fp->fi_fds[oflag]);
+               spin_unlock(&fp->fi_lock);
+               status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
                if (status)
-                       goto out;
+                       goto out_put_access;
+               spin_lock(&fp->fi_lock);
+               if (!fp->fi_fds[oflag]) {
+                       fp->fi_fds[oflag] = filp;
+                       filp = NULL;
+               }
        }
-       nfs4_file_get_access(fp, oflag);
+       spin_unlock(&fp->fi_lock);
+       if (filp)
+               fput(filp);
 
        status = nfsd4_truncate(rqstp, cur_fh, open);
        if (status)
                goto out_put_access;
-
-       return nfs_ok;
-
-out_put_access:
-       nfs4_file_put_access(fp, oflag);
 out:
        return status;
+out_put_access:
+       stp->st_access_bmap = old_access_bmap;
+       nfs4_file_put_access(fp, open->op_share_access);
+       reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
+       goto out;
 }
 
 static __be32
 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
 {
-       u32 op_share_access = open->op_share_access;
        __be32 status;
+       unsigned char old_deny_bmap;
 
-       if (!test_access(op_share_access, stp))
-               status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
-       else
-               status = nfsd4_truncate(rqstp, cur_fh, open);
+       if (!test_access(open->op_share_access, stp))
+               return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
 
-       if (status)
+       /* test and set deny mode */
+       spin_lock(&fp->fi_lock);
+       status = nfs4_file_check_deny(fp, open->op_share_deny);
+       if (status == nfs_ok) {
+               old_deny_bmap = stp->st_deny_bmap;
+               set_deny(open->op_share_deny, stp);
+               fp->fi_share_deny |=
+                               (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
+       }
+       spin_unlock(&fp->fi_lock);
+
+       if (status != nfs_ok)
                return status;
 
-       /* remember the open */
-       set_access(op_share_access, stp);
-       set_deny(open->op_share_deny, stp);
-       return nfs_ok;
+       status = nfsd4_truncate(rqstp, cur_fh, open);
+       if (status != nfs_ok)
+               reset_union_bmap_deny(old_deny_bmap, stp);
+       return status;
 }
 
-
 static void
 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
 {
@@ -3206,7 +3705,7 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
        return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
 }
 
-static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
+static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
 {
        struct file_lock *fl;
 
@@ -3218,53 +3717,101 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int f
        fl->fl_flags = FL_DELEG;
        fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
        fl->fl_end = OFFSET_MAX;
-       fl->fl_owner = (fl_owner_t)(dp->dl_file);
+       fl->fl_owner = (fl_owner_t)fp;
        fl->fl_pid = current->tgid;
        return fl;
 }
 
 static int nfs4_setlease(struct nfs4_delegation *dp)
 {
-       struct nfs4_file *fp = dp->dl_file;
+       struct nfs4_file *fp = dp->dl_stid.sc_file;
        struct file_lock *fl;
-       int status;
+       struct file *filp;
+       int status = 0;
 
-       fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
+       fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
        if (!fl)
                return -ENOMEM;
-       fl->fl_file = find_readable_file(fp);
-       status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
-       if (status)
-               goto out_free;
+       filp = find_readable_file(fp);
+       if (!filp) {
+               /* We should always have a readable file here */
+               WARN_ON_ONCE(1);
+               return -EBADF;
+       }
+       fl->fl_file = filp;
+       status = vfs_setlease(filp, fl->fl_type, &fl);
+       if (status) {
+               locks_free_lock(fl);
+               goto out_fput;
+       }
+       spin_lock(&state_lock);
+       spin_lock(&fp->fi_lock);
+       /* Did the lease get broken before we took the lock? */
+       status = -EAGAIN;
+       if (fp->fi_had_conflict)
+               goto out_unlock;
+       /* Race breaker */
+       if (fp->fi_lease) {
+               status = 0;
+               atomic_inc(&fp->fi_delegees);
+               hash_delegation_locked(dp, fp);
+               goto out_unlock;
+       }
        fp->fi_lease = fl;
-       fp->fi_deleg_file = get_file(fl->fl_file);
+       fp->fi_deleg_file = filp;
        atomic_set(&fp->fi_delegees, 1);
-       spin_lock(&state_lock);
        hash_delegation_locked(dp, fp);
+       spin_unlock(&fp->fi_lock);
        spin_unlock(&state_lock);
        return 0;
-out_free:
-       locks_free_lock(fl);
+out_unlock:
+       spin_unlock(&fp->fi_lock);
+       spin_unlock(&state_lock);
+out_fput:
+       fput(filp);
        return status;
 }
 
-static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
+static struct nfs4_delegation *
+nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+                   struct nfs4_file *fp)
 {
+       int status;
+       struct nfs4_delegation *dp;
+
        if (fp->fi_had_conflict)
-               return -EAGAIN;
+               return ERR_PTR(-EAGAIN);
+
+       dp = alloc_init_deleg(clp, fh);
+       if (!dp)
+               return ERR_PTR(-ENOMEM);
+
        get_nfs4_file(fp);
-       dp->dl_file = fp;
-       if (!fp->fi_lease)
-               return nfs4_setlease(dp);
        spin_lock(&state_lock);
+       spin_lock(&fp->fi_lock);
+       dp->dl_stid.sc_file = fp;
+       if (!fp->fi_lease) {
+               spin_unlock(&fp->fi_lock);
+               spin_unlock(&state_lock);
+               status = nfs4_setlease(dp);
+               goto out;
+       }
        atomic_inc(&fp->fi_delegees);
        if (fp->fi_had_conflict) {
-               spin_unlock(&state_lock);
-               return -EAGAIN;
+               status = -EAGAIN;
+               goto out_unlock;
        }
        hash_delegation_locked(dp, fp);
+       status = 0;
+out_unlock:
+       spin_unlock(&fp->fi_lock);
        spin_unlock(&state_lock);
-       return 0;
+out:
+       if (status) {
+               nfs4_put_stid(&dp->dl_stid);
+               return ERR_PTR(status);
+       }
+       return dp;
 }
 
 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
@@ -3295,11 +3842,12 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
  * proper support for them.
  */
 static void
-nfs4_open_delegation(struct net *net, struct svc_fh *fh,
-                    struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
+nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
+                       struct nfs4_ol_stateid *stp)
 {
        struct nfs4_delegation *dp;
-       struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
+       struct nfs4_openowner *oo = openowner(stp->st_stateowner);
+       struct nfs4_client *clp = stp->st_stid.sc_client;
        int cb_up;
        int status = 0;
 
@@ -3318,7 +3866,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
                         * Let's not give out any delegations till everyone's
                         * had the chance to reclaim theirs....
                         */
-                       if (locks_in_grace(net))
+                       if (locks_in_grace(clp->net))
                                goto out_no_deleg;
                        if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
                                goto out_no_deleg;
@@ -3337,21 +3885,17 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
                default:
                        goto out_no_deleg;
        }
-       dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
-       if (dp == NULL)
+       dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
+       if (IS_ERR(dp))
                goto out_no_deleg;
-       status = nfs4_set_delegation(dp, stp->st_file);
-       if (status)
-               goto out_free;
 
        memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
 
        dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
                STATEID_VAL(&dp->dl_stid.sc_stateid));
        open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+       nfs4_put_stid(&dp->dl_stid);
        return;
-out_free:
-       destroy_delegation(dp);
 out_no_deleg:
        open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
        if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
@@ -3393,7 +3937,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfsd4_compoundres *resp = rqstp->rq_resp;
        struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
        struct nfs4_file *fp = NULL;
-       struct inode *ino = current_fh->fh_dentry->d_inode;
        struct nfs4_ol_stateid *stp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
@@ -3403,13 +3946,12 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
         * and check for delegations in the process of being recalled.
         * If not found, create the nfs4_file struct
         */
-       fp = find_or_add_file(ino, open->op_file);
+       fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
        if (fp != open->op_file) {
-               if ((status = nfs4_check_open(fp, open, &stp)))
-                       goto out;
                status = nfs4_check_deleg(cl, open, &dp);
                if (status)
                        goto out;
+               stp = nfsd4_find_existing_open(fp, open);
        } else {
                open->op_file = NULL;
                status = nfserr_bad_stateid;
@@ -3428,12 +3970,14 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                if (status)
                        goto out;
        } else {
-               status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
-               if (status)
-                       goto out;
                stp = open->op_stp;
                open->op_stp = NULL;
                init_open_stateid(stp, fp, open);
+               status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
+               if (status) {
+                       release_open_stateid(stp);
+                       goto out;
+               }
        }
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -3450,7 +3994,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        * Attempt to hand out a delegation. No error return, because the
        * OPEN succeeds even if we fail.
        */
-       nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
+       nfs4_open_delegation(current_fh, open, stp);
 nodeleg:
        status = nfs_ok;
 
@@ -3473,41 +4017,27 @@ out:
        if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
            !nfsd4_has_session(&resp->cstate))
                open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
+       if (dp)
+               nfs4_put_stid(&dp->dl_stid);
+       if (stp)
+               nfs4_put_stid(&stp->st_stid);
 
        return status;
 }
 
-void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
+void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
+                             struct nfsd4_open *open, __be32 status)
 {
        if (open->op_openowner) {
-               struct nfs4_openowner *oo = open->op_openowner;
-
-               if (!list_empty(&oo->oo_owner.so_stateids))
-                       list_del_init(&oo->oo_close_lru);
-               if (oo->oo_flags & NFS4_OO_NEW) {
-                       if (status) {
-                               release_openowner(oo);
-                               open->op_openowner = NULL;
-                       } else
-                               oo->oo_flags &= ~NFS4_OO_NEW;
-               }
+               struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
+
+               nfsd4_cstate_assign_replay(cstate, so);
+               nfs4_put_stateowner(so);
        }
        if (open->op_file)
                nfsd4_free_file(open->op_file);
        if (open->op_stp)
-               free_generic_stateid(open->op_stp);
-}
-
-static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp)
-{
-       struct nfs4_client *found;
-
-       if (STALE_CLIENTID(clid, nn))
-               return nfserr_stale_clientid;
-       found = find_confirmed_client(clid, session, nn);
-       if (clp)
-               *clp = found;
-       return found ? nfs_ok : nfserr_expired;
+               nfs4_put_stid(&open->op_stp->st_stid);
 }
 
 __be32
@@ -3521,9 +4051,10 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        nfs4_lock_state();
        dprintk("process_renew(%08x/%08x): starting\n", 
                        clid->cl_boot, clid->cl_id);
-       status = lookup_clientid(clid, cstate->minorversion, nn, &clp);
+       status = lookup_clientid(clid, cstate, nn);
        if (status)
                goto out;
+       clp = cstate->clp;
        status = nfserr_cb_path_down;
        if (!list_empty(&clp->cl_delegations)
                        && clp->cl_cb_state != NFSD4_CB_UP)
@@ -3581,13 +4112,15 @@ nfs4_laundromat(struct nfsd_net *nn)
                                clp->cl_clientid.cl_id);
                        continue;
                }
-               list_move(&clp->cl_lru, &reaplist);
+               unhash_client_locked(clp);
+               list_add(&clp->cl_lru, &reaplist);
        }
        spin_unlock(&nn->client_lock);
        list_for_each_safe(pos, next, &reaplist) {
                clp = list_entry(pos, struct nfs4_client, cl_lru);
                dprintk("NFSD: purging unused client (clientid %08x)\n",
                        clp->cl_clientid.cl_id);
+               list_del_init(&clp->cl_lru);
                expire_client(clp);
        }
        spin_lock(&state_lock);
@@ -3600,11 +4133,14 @@ nfs4_laundromat(struct nfsd_net *nn)
                        new_timeo = min(new_timeo, t);
                        break;
                }
-               list_move(&dp->dl_recall_lru, &reaplist);
+               unhash_delegation_locked(dp);
+               list_add(&dp->dl_recall_lru, &reaplist);
        }
        spin_unlock(&state_lock);
-       list_for_each_safe(pos, next, &reaplist) {
-               dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+       while (!list_empty(&reaplist)) {
+               dp = list_first_entry(&reaplist, struct nfs4_delegation,
+                                       dl_recall_lru);
+               list_del_init(&dp->dl_recall_lru);
                revoke_delegation(dp);
        }
        list_for_each_safe(pos, next, &nn->close_lru) {
@@ -3614,7 +4150,7 @@ nfs4_laundromat(struct nfsd_net *nn)
                        new_timeo = min(new_timeo, t);
                        break;
                }
-               release_openowner(oo);
+               release_last_closed_stateid(oo);
        }
        new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
        nfs4_unlock_state();
@@ -3640,7 +4176,7 @@ laundromat_main(struct work_struct *laundry)
 
 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
 {
-       if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
+       if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
                return nfserr_bad_stateid;
        return nfs_ok;
 }
@@ -3742,10 +4278,10 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
 {
        struct nfs4_stid *s;
        struct nfs4_ol_stateid *ols;
-       __be32 status;
+       __be32 status = nfserr_bad_stateid;
 
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
-               return nfserr_bad_stateid;
+               return status;
        /* Client debugging aid. */
        if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
                char addr_str[INET6_ADDRSTRLEN];
@@ -3753,32 +4289,42 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
                                 sizeof(addr_str));
                pr_warn_ratelimited("NFSD: client %s testing state ID "
                                        "with incorrect client ID\n", addr_str);
-               return nfserr_bad_stateid;
+               return status;
        }
-       s = find_stateid(cl, stateid);
+       spin_lock(&cl->cl_lock);
+       s = find_stateid_locked(cl, stateid);
        if (!s)
-               return nfserr_bad_stateid;
+               goto out_unlock;
        status = check_stateid_generation(stateid, &s->sc_stateid, 1);
        if (status)
-               return status;
+               goto out_unlock;
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
-               return nfs_ok;
+               status = nfs_ok;
+               break;
        case NFS4_REVOKED_DELEG_STID:
-               return nfserr_deleg_revoked;
+               status = nfserr_deleg_revoked;
+               break;
        case NFS4_OPEN_STID:
        case NFS4_LOCK_STID:
                ols = openlockstateid(s);
                if (ols->st_stateowner->so_is_open_owner
                                && !(openowner(ols->st_stateowner)->oo_flags
                                                & NFS4_OO_CONFIRMED))
-                       return nfserr_bad_stateid;
-               return nfs_ok;
+                       status = nfserr_bad_stateid;
+               else
+                       status = nfs_ok;
+               break;
        default:
                printk("unknown stateid type %x\n", s->sc_type);
+               /* Fallthrough */
        case NFS4_CLOSED_STID:
-               return nfserr_bad_stateid;
+       case NFS4_CLOSED_DELEG_STID:
+               status = nfserr_bad_stateid;
        }
+out_unlock:
+       spin_unlock(&cl->cl_lock);
+       return status;
 }
 
 static __be32
@@ -3786,22 +4332,19 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
                     stateid_t *stateid, unsigned char typemask,
                     struct nfs4_stid **s, struct nfsd_net *nn)
 {
-       struct nfs4_client *cl;
        __be32 status;
-       bool sessions = cstate->minorversion != 0;
 
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
                return nfserr_bad_stateid;
-       status = lookup_clientid(&stateid->si_opaque.so_clid, sessions,
-                                                       nn, &cl);
+       status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
        if (status == nfserr_stale_clientid) {
-               if (sessions)
+               if (cstate->session)
                        return nfserr_bad_stateid;
                return nfserr_stale_stateid;
        }
        if (status)
                return status;
-       *s = find_stateid_by_type(cl, stateid, typemask);
+       *s = find_stateid_by_type(cstate->clp, stateid, typemask);
        if (!*s)
                return nfserr_bad_stateid;
        return nfs_ok;
@@ -3838,7 +4381,7 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
                                NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
                                &s, nn);
        if (status)
-               goto out;
+               goto unlock_state;
        status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
        if (status)
                goto out;
@@ -3849,12 +4392,13 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
                if (status)
                        goto out;
                if (filpp) {
-                       file = dp->dl_file->fi_deleg_file;
+                       file = dp->dl_stid.sc_file->fi_deleg_file;
                        if (!file) {
                                WARN_ON_ONCE(1);
                                status = nfserr_serverfault;
                                goto out;
                        }
+                       get_file(file);
                }
                break;
        case NFS4_OPEN_STID:
@@ -3870,10 +4414,12 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
                if (status)
                        goto out;
                if (filpp) {
+                       struct nfs4_file *fp = stp->st_stid.sc_file;
+
                        if (flags & RD_STATE)
-                               file = find_readable_file(stp->st_file);
+                               file = find_readable_file(fp);
                        else
-                               file = find_writeable_file(stp->st_file);
+                               file = find_writeable_file(fp);
                }
                break;
        default:
@@ -3882,23 +4428,14 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
        }
        status = nfs_ok;
        if (file)
-               *filpp = get_file(file);
+               *filpp = file;
 out:
+       nfs4_put_stid(s);
+unlock_state:
        nfs4_unlock_state();
        return status;
 }
 
-static __be32
-nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
-{
-       struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
-
-       if (check_for_locks(stp->st_file, lo))
-               return nfserr_locks_held;
-       release_lockowner_if_empty(lo);
-       return nfs_ok;
-}
-
 /*
  * Test if the stateid is valid
  */
@@ -3925,35 +4462,50 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        stateid_t *stateid = &free_stateid->fr_stateid;
        struct nfs4_stid *s;
        struct nfs4_delegation *dp;
+       struct nfs4_ol_stateid *stp;
        struct nfs4_client *cl = cstate->session->se_client;
        __be32 ret = nfserr_bad_stateid;
 
        nfs4_lock_state();
-       s = find_stateid(cl, stateid);
+       spin_lock(&cl->cl_lock);
+       s = find_stateid_locked(cl, stateid);
        if (!s)
-               goto out;
+               goto out_unlock;
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
                ret = nfserr_locks_held;
-               goto out;
+               break;
        case NFS4_OPEN_STID:
-       case NFS4_LOCK_STID:
                ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
                if (ret)
-                       goto out;
-               if (s->sc_type == NFS4_LOCK_STID)
-                       ret = nfsd4_free_lock_stateid(openlockstateid(s));
-               else
-                       ret = nfserr_locks_held;
+                       break;
+               ret = nfserr_locks_held;
                break;
+       case NFS4_LOCK_STID:
+               ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+               if (ret)
+                       break;
+               stp = openlockstateid(s);
+               ret = nfserr_locks_held;
+               if (check_for_locks(stp->st_stid.sc_file,
+                                   lockowner(stp->st_stateowner)))
+                       break;
+               unhash_lock_stateid(stp);
+               spin_unlock(&cl->cl_lock);
+               nfs4_put_stid(s);
+               ret = nfs_ok;
+               goto out;
        case NFS4_REVOKED_DELEG_STID:
                dp = delegstateid(s);
-               destroy_revoked_delegation(dp);
+               list_del_init(&dp->dl_recall_lru);
+               spin_unlock(&cl->cl_lock);
+               nfs4_put_stid(s);
                ret = nfs_ok;
-               break;
-       default:
-               ret = nfserr_bad_stateid;
+               goto out;
+       /* Default falls through and returns nfserr_bad_stateid */
        }
+out_unlock:
+       spin_unlock(&cl->cl_lock);
 out:
        nfs4_unlock_state();
        return ret;
@@ -4010,12 +4562,13 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
        if (status)
                return status;
        stp = openlockstateid(s);
-       if (!nfsd4_has_session(cstate))
-               cstate->replay_owner = stp->st_stateowner;
+       nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
 
        status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
        if (!status)
                *stpp = stp;
+       else
+               nfs4_put_stid(&stp->st_stid);
        return status;
 }
 
@@ -4024,14 +4577,18 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
 {
        __be32 status;
        struct nfs4_openowner *oo;
+       struct nfs4_ol_stateid *stp;
 
        status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
-                                               NFS4_OPEN_STID, stpp, nn);
+                                               NFS4_OPEN_STID, &stp, nn);
        if (status)
                return status;
-       oo = openowner((*stpp)->st_stateowner);
-       if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
+       oo = openowner(stp->st_stateowner);
+       if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
+               nfs4_put_stid(&stp->st_stid);
                return nfserr_bad_stateid;
+       }
+       *stpp = stp;
        return nfs_ok;
 }
 
@@ -4061,7 +4618,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        oo = openowner(stp->st_stateowner);
        status = nfserr_bad_stateid;
        if (oo->oo_flags & NFS4_OO_CONFIRMED)
-               goto out;
+               goto put_stateid;
        oo->oo_flags |= NFS4_OO_CONFIRMED;
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4070,10 +4627,11 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
        nfsd4_client_record_create(oo->oo_owner.so_client);
        status = nfs_ok;
+put_stateid:
+       nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
-       if (!cstate->replay_owner)
-               nfs4_unlock_state();
+       nfs4_unlock_state();
        return status;
 }
 
@@ -4081,7 +4639,7 @@ static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 a
 {
        if (!test_access(access, stp))
                return;
-       nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
+       nfs4_file_put_access(stp->st_stid.sc_file, access);
        clear_access(access, stp);
 }
 
@@ -4103,16 +4661,6 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
        }
 }
 
-static void
-reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
-{
-       int i;
-       for (i = 0; i < 4; i++) {
-               if ((i & deny) != i)
-                       clear_deny(i, stp);
-       }
-}
-
 __be32
 nfsd4_open_downgrade(struct svc_rqst *rqstp,
                     struct nfsd4_compound_state *cstate,
@@ -4137,14 +4685,14 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
                goto out; 
        status = nfserr_inval;
        if (!test_access(od->od_share_access, stp)) {
-               dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
+               dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
                        stp->st_access_bmap, od->od_share_access);
-               goto out;
+               goto put_stateid;
        }
        if (!test_deny(od->od_share_deny, stp)) {
-               dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
+               dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
                        stp->st_deny_bmap, od->od_share_deny);
-               goto out;
+               goto put_stateid;
        }
        nfs4_stateid_downgrade(stp, od->od_share_access);
 
@@ -4153,33 +4701,31 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
        status = nfs_ok;
+put_stateid:
+       nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
-       if (!cstate->replay_owner)
-               nfs4_unlock_state();
+       nfs4_unlock_state();
        return status;
 }
 
 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
 {
        struct nfs4_client *clp = s->st_stid.sc_client;
-       struct nfs4_openowner *oo = openowner(s->st_stateowner);
+       LIST_HEAD(reaplist);
 
        s->st_stid.sc_type = NFS4_CLOSED_STID;
-       unhash_open_stateid(s);
+       spin_lock(&clp->cl_lock);
+       unhash_open_stateid(s, &reaplist);
 
        if (clp->cl_minorversion) {
-               free_generic_stateid(s);
-               if (list_empty(&oo->oo_owner.so_stateids))
-                       release_openowner(oo);
+               put_ol_stateid_locked(s, &reaplist);
+               spin_unlock(&clp->cl_lock);
+               free_ol_stateid_reaplist(&reaplist);
        } else {
-               oo->oo_last_closed_stid = s;
-               /*
-                * In the 4.0 case we need to keep the owners around a
-                * little while to handle CLOSE replay.
-                */
-               if (list_empty(&oo->oo_owner.so_stateids))
-                       move_to_close_lru(oo, clp->net);
+               spin_unlock(&clp->cl_lock);
+               free_ol_stateid_reaplist(&reaplist);
+               move_to_close_lru(s, clp->net);
        }
 }
 
@@ -4210,9 +4756,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
 
        nfsd4_close_open_stateid(stp);
+
+       /* put reference from nfs4_preprocess_seqid_op */
+       nfs4_put_stid(&stp->st_stid);
 out:
-       if (!cstate->replay_owner)
-               nfs4_unlock_state();
+       nfs4_unlock_state();
        return status;
 }
 
@@ -4236,9 +4784,11 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        dp = delegstateid(s);
        status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status)
-               goto out;
+               goto put_stateid;
 
        destroy_delegation(dp);
+put_stateid:
+       nfs4_put_stid(&dp->dl_stid);
 out:
        nfs4_unlock_state();
 
@@ -4321,22 +4871,53 @@ nevermind:
 }
 
 static struct nfs4_lockowner *
-find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
-               struct nfsd_net *nn)
+find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
+               struct nfs4_client *clp)
 {
-       unsigned int strhashval = ownerstr_hashval(clid->cl_id, owner);
+       unsigned int strhashval = ownerstr_hashval(owner);
        struct nfs4_stateowner *so;
 
-       list_for_each_entry(so, &nn->ownerstr_hashtbl[strhashval], so_strhash) {
+       list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
+                           so_strhash) {
                if (so->so_is_open_owner)
                        continue;
-               if (!same_owner_str(so, owner, clid))
+               if (!same_owner_str(so, owner))
                        continue;
+               atomic_inc(&so->so_count);
                return lockowner(so);
        }
        return NULL;
 }
 
+static struct nfs4_lockowner *
+find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
+               struct nfs4_client *clp)
+{
+       struct nfs4_lockowner *lo;
+
+       spin_lock(&clp->cl_lock);
+       lo = find_lockowner_str_locked(clid, owner, clp);
+       spin_unlock(&clp->cl_lock);
+       return lo;
+}
+
+static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
+{
+       unhash_lockowner_locked(lockowner(sop));
+}
+
+static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
+{
+       struct nfs4_lockowner *lo = lockowner(sop);
+
+       kmem_cache_free(lockowner_slab, lo);
+}
+
+static const struct nfs4_stateowner_operations lockowner_ops = {
+       .so_unhash =    nfs4_unhash_lockowner,
+       .so_free =      nfs4_free_lockowner,
+};
+
 /*
  * Alloc a lock owner structure.
  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
@@ -4345,56 +4926,107 @@ find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
  * strhashval = ownerstr_hashval
  */
 static struct nfs4_lockowner *
-alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
-       struct nfs4_lockowner *lo;
-       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
+                          struct nfs4_ol_stateid *open_stp,
+                          struct nfsd4_lock *lock)
+{
+       struct nfs4_lockowner *lo, *ret;
 
        lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
        if (!lo)
                return NULL;
        INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
        lo->lo_owner.so_is_open_owner = 0;
-       /* It is the openowner seqid that will be incremented in encode in the
-        * case of new lockowners; so increment the lock seqid manually: */
-       lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
-       list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
+       lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
+       lo->lo_owner.so_ops = &lockowner_ops;
+       spin_lock(&clp->cl_lock);
+       ret = find_lockowner_str_locked(&clp->cl_clientid,
+                       &lock->lk_new_owner, clp);
+       if (ret == NULL) {
+               list_add(&lo->lo_owner.so_strhash,
+                        &clp->cl_ownerstr_hashtbl[strhashval]);
+               ret = lo;
+       } else
+               nfs4_free_lockowner(&lo->lo_owner);
+       spin_unlock(&clp->cl_lock);
        return lo;
 }
 
-static struct nfs4_ol_stateid *
-alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
+static void
+init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+                 struct nfs4_file *fp, struct inode *inode,
+                 struct nfs4_ol_stateid *open_stp)
 {
-       struct nfs4_ol_stateid *stp;
        struct nfs4_client *clp = lo->lo_owner.so_client;
 
-       stp = nfs4_alloc_stateid(clp);
-       if (stp == NULL)
-               return NULL;
+       lockdep_assert_held(&clp->cl_lock);
+
+       atomic_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_LOCK_STID;
-       list_add(&stp->st_perfile, &fp->fi_stateids);
-       list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
        stp->st_stateowner = &lo->lo_owner;
+       atomic_inc(&lo->lo_owner.so_count);
        get_nfs4_file(fp);
-       stp->st_file = fp;
+       stp->st_stid.sc_file = fp;
+       stp->st_stid.sc_free = nfs4_free_lock_stateid;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
        list_add(&stp->st_locks, &open_stp->st_locks);
-       return stp;
+       list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+       spin_lock(&fp->fi_lock);
+       list_add(&stp->st_perfile, &fp->fi_stateids);
+       spin_unlock(&fp->fi_lock);
 }
 
 static struct nfs4_ol_stateid *
 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
 {
        struct nfs4_ol_stateid *lst;
+       struct nfs4_client *clp = lo->lo_owner.so_client;
+
+       lockdep_assert_held(&clp->cl_lock);
 
        list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
-               if (lst->st_file == fp)
+               if (lst->st_stid.sc_file == fp) {
+                       atomic_inc(&lst->st_stid.sc_count);
                        return lst;
+               }
        }
        return NULL;
 }
 
+static struct nfs4_ol_stateid *
+find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
+                           struct inode *inode, struct nfs4_ol_stateid *ost,
+                           bool *new)
+{
+       struct nfs4_stid *ns = NULL;
+       struct nfs4_ol_stateid *lst;
+       struct nfs4_openowner *oo = openowner(ost->st_stateowner);
+       struct nfs4_client *clp = oo->oo_owner.so_client;
+
+       spin_lock(&clp->cl_lock);
+       lst = find_lock_stateid(lo, fi);
+       if (lst == NULL) {
+               spin_unlock(&clp->cl_lock);
+               ns = nfs4_alloc_stid(clp, stateid_slab);
+               if (ns == NULL)
+                       return NULL;
+
+               spin_lock(&clp->cl_lock);
+               lst = find_lock_stateid(lo, fi);
+               if (likely(!lst)) {
+                       lst = openlockstateid(ns);
+                       init_lock_stateid(lst, lo, fi, inode, ost);
+                       ns = NULL;
+                       *new = true;
+               }
+       }
+       spin_unlock(&clp->cl_lock);
+       if (ns)
+               nfs4_put_stid(ns);
+       return lst;
+}
 
 static int
 check_lock_length(u64 offset, u64 length)
@@ -4405,48 +5037,53 @@ check_lock_length(u64 offset, u64 length)
 
 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
 {
-       struct nfs4_file *fp = lock_stp->st_file;
-       int oflag = nfs4_access_to_omode(access);
+       struct nfs4_file *fp = lock_stp->st_stid.sc_file;
+
+       lockdep_assert_held(&fp->fi_lock);
 
        if (test_access(access, lock_stp))
                return;
-       nfs4_file_get_access(fp, oflag);
+       __nfs4_file_get_access(fp, access);
        set_access(access, lock_stp);
 }
 
-static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
+static __be32
+lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
+                           struct nfs4_ol_stateid *ost,
+                           struct nfsd4_lock *lock,
+                           struct nfs4_ol_stateid **lst, bool *new)
 {
-       struct nfs4_file *fi = ost->st_file;
+       __be32 status;
+       struct nfs4_file *fi = ost->st_stid.sc_file;
        struct nfs4_openowner *oo = openowner(ost->st_stateowner);
        struct nfs4_client *cl = oo->oo_owner.so_client;
+       struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
        struct nfs4_lockowner *lo;
        unsigned int strhashval;
-       struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
 
-       lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, nn);
+       lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
        if (!lo) {
-               strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
-                               &lock->v.new.owner);
+               strhashval = ownerstr_hashval(&lock->v.new.owner);
                lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
                if (lo == NULL)
                        return nfserr_jukebox;
        } else {
                /* with an existing lockowner, seqids must be the same */
+               status = nfserr_bad_seqid;
                if (!cstate->minorversion &&
                    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
-                       return nfserr_bad_seqid;
+                       goto out;
        }
 
-       *lst = find_lock_stateid(lo, fi);
+       *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
        if (*lst == NULL) {
-               *lst = alloc_init_lock_stateid(lo, fi, ost);
-               if (*lst == NULL) {
-                       release_lockowner_if_empty(lo);
-                       return nfserr_jukebox;
-               }
-               *new = true;
+               status = nfserr_jukebox;
+               goto out;
        }
-       return nfs_ok;
+       status = nfs_ok;
+out:
+       nfs4_put_stateowner(&lo->lo_owner);
+       return status;
 }
 
 /*
@@ -4458,14 +5095,16 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 {
        struct nfs4_openowner *open_sop = NULL;
        struct nfs4_lockowner *lock_sop = NULL;
-       struct nfs4_ol_stateid *lock_stp;
+       struct nfs4_ol_stateid *lock_stp = NULL;
+       struct nfs4_ol_stateid *open_stp = NULL;
+       struct nfs4_file *fp;
        struct file *filp = NULL;
        struct file_lock *file_lock = NULL;
        struct file_lock *conflock = NULL;
        __be32 status = 0;
-       bool new_state = false;
        int lkflg;
        int err;
+       bool new = false;
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
@@ -4485,8 +5124,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        nfs4_lock_state();
 
        if (lock->lk_is_new) {
-               struct nfs4_ol_stateid *open_stp = NULL;
-
                if (nfsd4_has_session(cstate))
                        /* See rfc 5661 18.10.3: given clientid is ignored: */
                        memcpy(&lock->v.new.clientid,
@@ -4510,12 +5147,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                                &lock->v.new.clientid))
                        goto out;
                status = lookup_or_create_lock_state(cstate, open_stp, lock,
-                                                       &lock_stp, &new_state);
-       } else
+                                                       &lock_stp, &new);
+       } else {
                status = nfs4_preprocess_seqid_op(cstate,
                                       lock->lk_old_lock_seqid,
                                       &lock->lk_old_lock_stateid,
                                       NFS4_LOCK_STID, &lock_stp, nn);
+       }
        if (status)
                goto out;
        lock_sop = lockowner(lock_stp->st_stateowner);
@@ -4539,20 +5177,25 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                goto out;
        }
 
+       fp = lock_stp->st_stid.sc_file;
        locks_init_lock(file_lock);
        switch (lock->lk_type) {
                case NFS4_READ_LT:
                case NFS4_READW_LT:
-                       filp = find_readable_file(lock_stp->st_file);
+                       spin_lock(&fp->fi_lock);
+                       filp = find_readable_file_locked(fp);
                        if (filp)
                                get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
+                       spin_unlock(&fp->fi_lock);
                        file_lock->fl_type = F_RDLCK;
                        break;
                case NFS4_WRITE_LT:
                case NFS4_WRITEW_LT:
-                       filp = find_writeable_file(lock_stp->st_file);
+                       spin_lock(&fp->fi_lock);
+                       filp = find_writeable_file_locked(fp);
                        if (filp)
                                get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
+                       spin_unlock(&fp->fi_lock);
                        file_lock->fl_type = F_WRLCK;
                        break;
                default:
@@ -4601,11 +5244,28 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                break;
        }
 out:
-       if (status && new_state)
-               release_lock_stateid(lock_stp);
+       if (filp)
+               fput(filp);
+       if (lock_stp) {
+               /* Bump seqid manually if the 4.0 replay owner is openowner */
+               if (cstate->replay_owner &&
+                   cstate->replay_owner != &lock_sop->lo_owner &&
+                   seqid_mutating_err(ntohl(status)))
+                       lock_sop->lo_owner.so_seqid++;
+
+               /*
+                * If this is a new, never-before-used stateid, and we are
+                * returning an error, then just go ahead and release it.
+                */
+               if (status && new)
+                       release_lock_stateid(lock_stp);
+
+               nfs4_put_stid(&lock_stp->st_stid);
+       }
+       if (open_stp)
+               nfs4_put_stid(&open_stp->st_stid);
        nfsd4_bump_seqid(cstate, status);
-       if (!cstate->replay_owner)
-               nfs4_unlock_state();
+       nfs4_unlock_state();
        if (file_lock)
                locks_free_lock(file_lock);
        if (conflock)
@@ -4638,7 +5298,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
            struct nfsd4_lockt *lockt)
 {
        struct file_lock *file_lock = NULL;
-       struct nfs4_lockowner *lo;
+       struct nfs4_lockowner *lo = NULL;
        __be32 status;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
@@ -4651,7 +5311,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        nfs4_lock_state();
 
        if (!nfsd4_has_session(cstate)) {
-               status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL);
+               status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
                if (status)
                        goto out;
        }
@@ -4681,7 +5341,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                goto out;
        }
 
-       lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, nn);
+       lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
+                               cstate->clp);
        if (lo)
                file_lock->fl_owner = (fl_owner_t)lo;
        file_lock->fl_pid = current->tgid;
@@ -4701,6 +5362,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
        }
 out:
+       if (lo)
+               nfs4_put_stateowner(&lo->lo_owner);
        nfs4_unlock_state();
        if (file_lock)
                locks_free_lock(file_lock);
@@ -4732,16 +5395,16 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                        &stp, nn);
        if (status)
                goto out;
-       filp = find_any_file(stp->st_file);
+       filp = find_any_file(stp->st_stid.sc_file);
        if (!filp) {
                status = nfserr_lock_range;
-               goto out;
+               goto put_stateid;
        }
        file_lock = locks_alloc_lock();
        if (!file_lock) {
                dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
                status = nfserr_jukebox;
-               goto out;
+               goto fput;
        }
        locks_init_lock(file_lock);
        file_lock->fl_type = F_UNLCK;
@@ -4763,41 +5426,52 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        }
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
-
+fput:
+       fput(filp);
+put_stateid:
+       nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
-       if (!cstate->replay_owner)
-               nfs4_unlock_state();
+       nfs4_unlock_state();
        if (file_lock)
                locks_free_lock(file_lock);
        return status;
 
 out_nfserr:
        status = nfserrno(err);
-       goto out;
+       goto fput;
 }
 
 /*
  * returns
- *     1: locks held by lockowner
- *     0: no locks held by lockowner
+ *     true:  locks held by lockowner
+ *     false: no locks held by lockowner
  */
-static int
-check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
+static bool
+check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
 {
        struct file_lock **flpp;
-       struct inode *inode = filp->fi_inode;
-       int status = 0;
+       int status = false;
+       struct file *filp = find_any_file(fp);
+       struct inode *inode;
+
+       if (!filp) {
+               /* Any valid lock stateid should have some sort of access */
+               WARN_ON_ONCE(1);
+               return status;
+       }
+
+       inode = file_inode(filp);
 
        spin_lock(&inode->i_lock);
        for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
                if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
-                       status = 1;
-                       goto out;
+                       status = true;
+                       break;
                }
        }
-out:
        spin_unlock(&inode->i_lock);
+       fput(filp);
        return status;
 }
 
@@ -4807,50 +5481,49 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
                        struct nfsd4_release_lockowner *rlockowner)
 {
        clientid_t *clid = &rlockowner->rl_clientid;
-       struct nfs4_stateowner *sop = NULL, *tmp;
-       struct nfs4_lockowner *lo;
+       struct nfs4_stateowner *sop;
+       struct nfs4_lockowner *lo = NULL;
        struct nfs4_ol_stateid *stp;
        struct xdr_netobj *owner = &rlockowner->rl_owner;
-       unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
+       unsigned int hashval = ownerstr_hashval(owner);
        __be32 status;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+       struct nfs4_client *clp;
 
        dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
                clid->cl_boot, clid->cl_id);
 
        nfs4_lock_state();
 
-       status = lookup_clientid(clid, cstate->minorversion, nn, NULL);
+       status = lookup_clientid(clid, cstate, nn);
        if (status)
                goto out;
 
-       status = nfserr_locks_held;
-
+       clp = cstate->clp;
        /* Find the matching lock stateowner */
-       list_for_each_entry(tmp, &nn->ownerstr_hashtbl[hashval], so_strhash) {
-               if (tmp->so_is_open_owner)
+       spin_lock(&clp->cl_lock);
+       list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
+                           so_strhash) {
+
+               if (sop->so_is_open_owner || !same_owner_str(sop, owner))
                        continue;
-               if (same_owner_str(tmp, owner, clid)) {
-                       sop = tmp;
-                       break;
-               }
-       }
 
-       /* No matching owner found, maybe a replay? Just declare victory... */
-       if (!sop) {
-               status = nfs_ok;
-               goto out;
-       }
+               /* see if there are still any locks associated with it */
+               lo = lockowner(sop);
+               list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
+                       if (check_for_locks(stp->st_stid.sc_file, lo)) {
+                               status = nfserr_locks_held;
+                               spin_unlock(&clp->cl_lock);
+                               goto out;
+                       }
+               }
 
-       lo = lockowner(sop);
-       /* see if there are still any locks associated with it */
-       list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
-               if (check_for_locks(stp->st_file, lo))
-                       goto out;
+               atomic_inc(&sop->so_count);
+               break;
        }
-
-       status = nfs_ok;
-       release_lockowner(lo);
+       spin_unlock(&clp->cl_lock);
+       if (lo)
+               release_lockowner(lo);
 out:
        nfs4_unlock_state();
        return status;
@@ -4940,16 +5613,21 @@ nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
 * Called from OPEN. Look for clientid in reclaim list.
 */
 __be32
-nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn)
+nfs4_check_open_reclaim(clientid_t *clid,
+               struct nfsd4_compound_state *cstate,
+               struct nfsd_net *nn)
 {
-       struct nfs4_client *clp;
+       __be32 status;
 
        /* find clientid in conf_id_hashtbl */
-       clp = find_confirmed_client(clid, sessions, nn);
-       if (clp == NULL)
+       status = lookup_clientid(clid, cstate, nn);
+       if (status)
+               return nfserr_reclaim_bad;
+
+       if (nfsd4_client_record_check(cstate->clp))
                return nfserr_reclaim_bad;
 
-       return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
+       return nfs_ok;
 }
 
 #ifdef CONFIG_NFSD_FAULT_INJECTION
@@ -5049,8 +5727,19 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
 
        lockdep_assert_held(&state_lock);
        list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
-               if (victims)
-                       list_move(&dp->dl_recall_lru, victims);
+               if (victims) {
+                       /*
+                        * It's not safe to mess with delegations that have a
+                        * non-zero dl_time. They might have already been broken
+                        * and could be processed by the laundromat outside of
+                        * the state_lock. Just leave them be.
+                        */
+                       if (dp->dl_time != 0)
+                               continue;
+
+                       unhash_delegation_locked(dp);
+                       list_add(&dp->dl_recall_lru, victims);
+               }
                if (++count == max)
                        break;
        }
@@ -5067,22 +5756,29 @@ u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
        count = nfsd_find_all_delegations(clp, max, &victims);
        spin_unlock(&state_lock);
 
-       list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
+       list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) {
+               list_del_init(&dp->dl_recall_lru);
                revoke_delegation(dp);
+       }
 
        return count;
 }
 
 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
 {
-       struct nfs4_delegation *dp, *next;
+       struct nfs4_delegation *dp;
        LIST_HEAD(victims);
        u64 count;
 
        spin_lock(&state_lock);
        count = nfsd_find_all_delegations(clp, max, &victims);
-       list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
+       while (!list_empty(&victims)) {
+               dp = list_first_entry(&victims, struct nfs4_delegation,
+                                       dl_recall_lru);
+               list_del_init(&dp->dl_recall_lru);
+               dp->dl_time = 0;
                nfsd_break_one_deleg(dp);
+       }
        spin_unlock(&state_lock);
 
        return count;
@@ -5169,10 +5865,6 @@ static int nfs4_state_create_net(struct net *net)
                        CLIENT_HASH_SIZE, GFP_KERNEL);
        if (!nn->unconf_id_hashtbl)
                goto err_unconf_id;
-       nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
-                       OWNER_HASH_SIZE, GFP_KERNEL);
-       if (!nn->ownerstr_hashtbl)
-               goto err_ownerstr;
        nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
                        SESSION_HASH_SIZE, GFP_KERNEL);
        if (!nn->sessionid_hashtbl)
@@ -5182,8 +5874,6 @@ static int nfs4_state_create_net(struct net *net)
                INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
                INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
        }
-       for (i = 0; i < OWNER_HASH_SIZE; i++)
-               INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
        for (i = 0; i < SESSION_HASH_SIZE; i++)
                INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
        nn->conf_name_tree = RB_ROOT;
@@ -5199,8 +5889,6 @@ static int nfs4_state_create_net(struct net *net)
        return 0;
 
 err_sessionid:
-       kfree(nn->ownerstr_hashtbl);
-err_ownerstr:
        kfree(nn->unconf_id_hashtbl);
 err_unconf_id:
        kfree(nn->conf_id_hashtbl);
@@ -5230,7 +5918,6 @@ nfs4_state_destroy_net(struct net *net)
        }
 
        kfree(nn->sessionid_hashtbl);
-       kfree(nn->ownerstr_hashtbl);
        kfree(nn->unconf_id_hashtbl);
        kfree(nn->conf_id_hashtbl);
        put_net(net);
@@ -5299,12 +5986,14 @@ nfs4_state_shutdown_net(struct net *net)
        spin_lock(&state_lock);
        list_for_each_safe(pos, next, &nn->del_recall_lru) {
                dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-               list_move(&dp->dl_recall_lru, &reaplist);
+               unhash_delegation_locked(dp);
+               list_add(&dp->dl_recall_lru, &reaplist);
        }
        spin_unlock(&state_lock);
        list_for_each_safe(pos, next, &reaplist) {
                dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-               destroy_delegation(dp);
+               list_del_init(&dp->dl_recall_lru);
+               nfs4_put_stid(&dp->dl_stid);
        }
 
        nfsd4_client_tracking_exit(net);