[PATCH] NFS: cleanup: shrink struct nfs_open_context
[pandora-kernel.git] / fs / nfs / nfs4state.c
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40
41 #include <linux/config.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48
49 #include "nfs4_fs.h"
50 #include "callback.h"
51 #include "delegation.h"
52
53 #define OPENOWNER_POOL_SIZE     8
54
55 const nfs4_stateid zero_stateid;
56
57 static DEFINE_SPINLOCK(state_spinlock);
58 static LIST_HEAD(nfs4_clientid_list);
59
60 static void nfs4_recover_state(void *);
61
62 void
63 init_nfsv4_state(struct nfs_server *server)
64 {
65         server->nfs4_state = NULL;
66         INIT_LIST_HEAD(&server->nfs4_siblings);
67 }
68
69 void
70 destroy_nfsv4_state(struct nfs_server *server)
71 {
72         if (server->mnt_path) {
73                 kfree(server->mnt_path);
74                 server->mnt_path = NULL;
75         }
76         if (server->nfs4_state) {
77                 nfs4_put_client(server->nfs4_state);
78                 server->nfs4_state = NULL;
79         }
80 }
81
82 /*
83  * nfs4_get_client(): returns an empty client structure
84  * nfs4_put_client(): drops reference to client structure
85  *
86  * Since these are allocated/deallocated very rarely, we don't
87  * bother putting them in a slab cache...
88  */
89 static struct nfs4_client *
90 nfs4_alloc_client(struct in_addr *addr)
91 {
92         struct nfs4_client *clp;
93
94         if (nfs_callback_up() < 0)
95                 return NULL;
96         if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
97                 nfs_callback_down();
98                 return NULL;
99         }
100         memset(clp, 0, sizeof(*clp));
101         memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
102         init_rwsem(&clp->cl_sem);
103         INIT_LIST_HEAD(&clp->cl_delegations);
104         INIT_LIST_HEAD(&clp->cl_state_owners);
105         INIT_LIST_HEAD(&clp->cl_unused);
106         spin_lock_init(&clp->cl_lock);
107         atomic_set(&clp->cl_count, 1);
108         INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
109         INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
110         INIT_LIST_HEAD(&clp->cl_superblocks);
111         init_waitqueue_head(&clp->cl_waitq);
112         rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
113         clp->cl_boot_time = CURRENT_TIME;
114         clp->cl_state = 1 << NFS4CLNT_OK;
115         return clp;
116 }
117
118 static void
119 nfs4_free_client(struct nfs4_client *clp)
120 {
121         struct nfs4_state_owner *sp;
122
123         while (!list_empty(&clp->cl_unused)) {
124                 sp = list_entry(clp->cl_unused.next,
125                                 struct nfs4_state_owner,
126                                 so_list);
127                 list_del(&sp->so_list);
128                 kfree(sp);
129         }
130         BUG_ON(!list_empty(&clp->cl_state_owners));
131         if (clp->cl_cred)
132                 put_rpccred(clp->cl_cred);
133         nfs_idmap_delete(clp);
134         if (clp->cl_rpcclient)
135                 rpc_shutdown_client(clp->cl_rpcclient);
136         kfree(clp);
137         nfs_callback_down();
138 }
139
140 static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
141 {
142         struct nfs4_client *clp;
143         list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
144                 if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
145                         atomic_inc(&clp->cl_count);
146                         return clp;
147                 }
148         }
149         return NULL;
150 }
151
152 struct nfs4_client *nfs4_find_client(struct in_addr *addr)
153 {
154         struct nfs4_client *clp;
155         spin_lock(&state_spinlock);
156         clp = __nfs4_find_client(addr);
157         spin_unlock(&state_spinlock);
158         return clp;
159 }
160
161 struct nfs4_client *
162 nfs4_get_client(struct in_addr *addr)
163 {
164         struct nfs4_client *clp, *new = NULL;
165
166         spin_lock(&state_spinlock);
167         for (;;) {
168                 clp = __nfs4_find_client(addr);
169                 if (clp != NULL)
170                         break;
171                 clp = new;
172                 if (clp != NULL) {
173                         list_add(&clp->cl_servers, &nfs4_clientid_list);
174                         new = NULL;
175                         break;
176                 }
177                 spin_unlock(&state_spinlock);
178                 new = nfs4_alloc_client(addr);
179                 spin_lock(&state_spinlock);
180                 if (new == NULL)
181                         break;
182         }
183         spin_unlock(&state_spinlock);
184         if (new)
185                 nfs4_free_client(new);
186         return clp;
187 }
188
189 void
190 nfs4_put_client(struct nfs4_client *clp)
191 {
192         if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
193                 return;
194         list_del(&clp->cl_servers);
195         spin_unlock(&state_spinlock);
196         BUG_ON(!list_empty(&clp->cl_superblocks));
197         wake_up_all(&clp->cl_waitq);
198         rpc_wake_up(&clp->cl_rpcwaitq);
199         nfs4_kill_renewd(clp);
200         nfs4_free_client(clp);
201 }
202
203 static int __nfs4_init_client(struct nfs4_client *clp)
204 {
205         int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
206         if (status == 0)
207                 status = nfs4_proc_setclientid_confirm(clp);
208         if (status == 0)
209                 nfs4_schedule_state_renewal(clp);
210         return status;
211 }
212
213 int nfs4_init_client(struct nfs4_client *clp)
214 {
215         return nfs4_map_errors(__nfs4_init_client(clp));
216 }
217
218 u32
219 nfs4_alloc_lockowner_id(struct nfs4_client *clp)
220 {
221         return clp->cl_lockowner_id ++;
222 }
223
224 static struct nfs4_state_owner *
225 nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
226 {
227         struct nfs4_state_owner *sp = NULL;
228
229         if (!list_empty(&clp->cl_unused)) {
230                 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
231                 atomic_inc(&sp->so_count);
232                 sp->so_cred = cred;
233                 list_move(&sp->so_list, &clp->cl_state_owners);
234                 clp->cl_nunused--;
235         }
236         return sp;
237 }
238
239 static struct nfs4_state_owner *
240 nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
241 {
242         struct nfs4_state_owner *sp, *res = NULL;
243
244         list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
245                 if (sp->so_cred != cred)
246                         continue;
247                 atomic_inc(&sp->so_count);
248                 /* Move to the head of the list */
249                 list_move(&sp->so_list, &clp->cl_state_owners);
250                 res = sp;
251                 break;
252         }
253         return res;
254 }
255
256 /*
257  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
258  * create a new state_owner.
259  *
260  */
261 static struct nfs4_state_owner *
262 nfs4_alloc_state_owner(void)
263 {
264         struct nfs4_state_owner *sp;
265
266         sp = kmalloc(sizeof(*sp),GFP_KERNEL);
267         if (!sp)
268                 return NULL;
269         init_MUTEX(&sp->so_sema);
270         sp->so_seqid = 0;                 /* arbitrary */
271         INIT_LIST_HEAD(&sp->so_states);
272         INIT_LIST_HEAD(&sp->so_delegations);
273         atomic_set(&sp->so_count, 1);
274         return sp;
275 }
276
277 void
278 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
279 {
280         struct nfs4_client *clp = sp->so_client;
281         spin_lock(&clp->cl_lock);
282         list_del_init(&sp->so_list);
283         spin_unlock(&clp->cl_lock);
284 }
285
286 /*
287  * Note: must be called with clp->cl_sem held in order to prevent races
288  *       with reboot recovery!
289  */
290 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
291 {
292         struct nfs4_client *clp = server->nfs4_state;
293         struct nfs4_state_owner *sp, *new;
294
295         get_rpccred(cred);
296         new = nfs4_alloc_state_owner();
297         spin_lock(&clp->cl_lock);
298         sp = nfs4_find_state_owner(clp, cred);
299         if (sp == NULL)
300                 sp = nfs4_client_grab_unused(clp, cred);
301         if (sp == NULL && new != NULL) {
302                 list_add(&new->so_list, &clp->cl_state_owners);
303                 new->so_client = clp;
304                 new->so_id = nfs4_alloc_lockowner_id(clp);
305                 new->so_cred = cred;
306                 sp = new;
307                 new = NULL;
308         }
309         spin_unlock(&clp->cl_lock);
310         if (new)
311                 kfree(new);
312         if (sp != NULL)
313                 return sp;
314         put_rpccred(cred);
315         return NULL;
316 }
317
318 /*
319  * Must be called with clp->cl_sem held in order to avoid races
320  * with state recovery...
321  */
322 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
323 {
324         struct nfs4_client *clp = sp->so_client;
325         struct rpc_cred *cred = sp->so_cred;
326
327         if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
328                 return;
329         if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
330                 goto out_free;
331         if (list_empty(&sp->so_list))
332                 goto out_free;
333         list_move(&sp->so_list, &clp->cl_unused);
334         clp->cl_nunused++;
335         spin_unlock(&clp->cl_lock);
336         put_rpccred(cred);
337         cred = NULL;
338         return;
339 out_free:
340         list_del(&sp->so_list);
341         spin_unlock(&clp->cl_lock);
342         put_rpccred(cred);
343         kfree(sp);
344 }
345
346 static struct nfs4_state *
347 nfs4_alloc_open_state(void)
348 {
349         struct nfs4_state *state;
350
351         state = kmalloc(sizeof(*state), GFP_KERNEL);
352         if (!state)
353                 return NULL;
354         state->state = 0;
355         state->nreaders = 0;
356         state->nwriters = 0;
357         state->flags = 0;
358         memset(state->stateid.data, 0, sizeof(state->stateid.data));
359         atomic_set(&state->count, 1);
360         INIT_LIST_HEAD(&state->lock_states);
361         init_MUTEX(&state->lock_sema);
362         rwlock_init(&state->state_lock);
363         return state;
364 }
365
366 static struct nfs4_state *
367 __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
368 {
369         struct nfs_inode *nfsi = NFS_I(inode);
370         struct nfs4_state *state;
371
372         mode &= (FMODE_READ|FMODE_WRITE);
373         list_for_each_entry(state, &nfsi->open_states, inode_states) {
374                 if (state->owner->so_cred != cred)
375                         continue;
376                 if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
377                         continue;
378                 if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
379                         continue;
380                 if ((state->state & mode) != mode)
381                         continue;
382                 atomic_inc(&state->count);
383                 if (mode & FMODE_READ)
384                         state->nreaders++;
385                 if (mode & FMODE_WRITE)
386                         state->nwriters++;
387                 return state;
388         }
389         return NULL;
390 }
391
392 static struct nfs4_state *
393 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
394 {
395         struct nfs_inode *nfsi = NFS_I(inode);
396         struct nfs4_state *state;
397
398         list_for_each_entry(state, &nfsi->open_states, inode_states) {
399                 /* Is this in the process of being freed? */
400                 if (state->nreaders == 0 && state->nwriters == 0)
401                         continue;
402                 if (state->owner == owner) {
403                         atomic_inc(&state->count);
404                         return state;
405                 }
406         }
407         return NULL;
408 }
409
410 struct nfs4_state *
411 nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
412 {
413         struct nfs4_state *state;
414
415         spin_lock(&inode->i_lock);
416         state = __nfs4_find_state(inode, cred, mode);
417         spin_unlock(&inode->i_lock);
418         return state;
419 }
420
421 static void
422 nfs4_free_open_state(struct nfs4_state *state)
423 {
424         kfree(state);
425 }
426
427 struct nfs4_state *
428 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
429 {
430         struct nfs4_state *state, *new;
431         struct nfs_inode *nfsi = NFS_I(inode);
432
433         spin_lock(&inode->i_lock);
434         state = __nfs4_find_state_byowner(inode, owner);
435         spin_unlock(&inode->i_lock);
436         if (state)
437                 goto out;
438         new = nfs4_alloc_open_state();
439         spin_lock(&inode->i_lock);
440         state = __nfs4_find_state_byowner(inode, owner);
441         if (state == NULL && new != NULL) {
442                 state = new;
443                 /* Caller *must* be holding owner->so_sem */
444                 /* Note: The reclaim code dictates that we add stateless
445                  * and read-only stateids to the end of the list */
446                 list_add_tail(&state->open_states, &owner->so_states);
447                 state->owner = owner;
448                 atomic_inc(&owner->so_count);
449                 list_add(&state->inode_states, &nfsi->open_states);
450                 state->inode = igrab(inode);
451                 spin_unlock(&inode->i_lock);
452         } else {
453                 spin_unlock(&inode->i_lock);
454                 if (new)
455                         nfs4_free_open_state(new);
456         }
457 out:
458         return state;
459 }
460
461 /*
462  * Beware! Caller must be holding exactly one
463  * reference to clp->cl_sem and owner->so_sema!
464  */
465 void nfs4_put_open_state(struct nfs4_state *state)
466 {
467         struct inode *inode = state->inode;
468         struct nfs4_state_owner *owner = state->owner;
469
470         if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
471                 return;
472         if (!list_empty(&state->inode_states))
473                 list_del(&state->inode_states);
474         spin_unlock(&inode->i_lock);
475         list_del(&state->open_states);
476         iput(inode);
477         BUG_ON (state->state != 0);
478         nfs4_free_open_state(state);
479         nfs4_put_state_owner(owner);
480 }
481
482 /*
483  * Beware! Caller must be holding no references to clp->cl_sem!
484  * of owner->so_sema!
485  */
486 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
487 {
488         struct inode *inode = state->inode;
489         struct nfs4_state_owner *owner = state->owner;
490         struct nfs4_client *clp = owner->so_client;
491         int newstate;
492
493         atomic_inc(&owner->so_count);
494         down_read(&clp->cl_sem);
495         down(&owner->so_sema);
496         /* Protect against nfs4_find_state() */
497         spin_lock(&inode->i_lock);
498         if (mode & FMODE_READ)
499                 state->nreaders--;
500         if (mode & FMODE_WRITE)
501                 state->nwriters--;
502         if (state->nwriters == 0) {
503                 if (state->nreaders == 0)
504                         list_del_init(&state->inode_states);
505                 /* See reclaim code */
506                 list_move_tail(&state->open_states, &owner->so_states);
507         }
508         spin_unlock(&inode->i_lock);
509         newstate = 0;
510         if (state->state != 0) {
511                 if (state->nreaders)
512                         newstate |= FMODE_READ;
513                 if (state->nwriters)
514                         newstate |= FMODE_WRITE;
515                 if (state->state == newstate)
516                         goto out;
517                 if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
518                         return;
519         }
520 out:
521         nfs4_put_open_state(state);
522         up(&owner->so_sema);
523         nfs4_put_state_owner(owner);
524         up_read(&clp->cl_sem);
525 }
526
527 /*
528  * Search the state->lock_states for an existing lock_owner
529  * that is compatible with current->files
530  */
531 static struct nfs4_lock_state *
532 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
533 {
534         struct nfs4_lock_state *pos;
535         list_for_each_entry(pos, &state->lock_states, ls_locks) {
536                 if (pos->ls_owner != fl_owner)
537                         continue;
538                 atomic_inc(&pos->ls_count);
539                 return pos;
540         }
541         return NULL;
542 }
543
544 struct nfs4_lock_state *
545 nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
546 {
547         struct nfs4_lock_state *lsp;
548         read_lock(&state->state_lock);
549         lsp = __nfs4_find_lock_state(state, fl_owner);
550         read_unlock(&state->state_lock);
551         return lsp;
552 }
553
554 /*
555  * Return a compatible lock_state. If no initialized lock_state structure
556  * exists, return an uninitialized one.
557  *
558  * The caller must be holding state->lock_sema
559  */
560 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
561 {
562         struct nfs4_lock_state *lsp;
563         struct nfs4_client *clp = state->owner->so_client;
564
565         lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
566         if (lsp == NULL)
567                 return NULL;
568         lsp->ls_flags = 0;
569         lsp->ls_seqid = 0;      /* arbitrary */
570         lsp->ls_id = -1; 
571         memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
572         atomic_set(&lsp->ls_count, 1);
573         lsp->ls_owner = fl_owner;
574         INIT_LIST_HEAD(&lsp->ls_locks);
575         spin_lock(&clp->cl_lock);
576         lsp->ls_id = nfs4_alloc_lockowner_id(clp);
577         spin_unlock(&clp->cl_lock);
578         return lsp;
579 }
580
581 /*
582  * Return a compatible lock_state. If no initialized lock_state structure
583  * exists, return an uninitialized one.
584  *
585  * The caller must be holding state->lock_sema and clp->cl_sem
586  */
587 struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
588 {
589         struct nfs4_lock_state * lsp;
590         
591         lsp = nfs4_find_lock_state(state, owner);
592         if (lsp == NULL)
593                 lsp = nfs4_alloc_lock_state(state, owner);
594         return lsp;
595 }
596
597 /*
598  * Byte-range lock aware utility to initialize the stateid of read/write
599  * requests.
600  */
601 void
602 nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
603 {
604         if (test_bit(LK_STATE_IN_USE, &state->flags)) {
605                 struct nfs4_lock_state *lsp;
606
607                 lsp = nfs4_find_lock_state(state, fl_owner);
608                 if (lsp) {
609                         memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
610                         nfs4_put_lock_state(lsp);
611                         return;
612                 }
613         }
614         memcpy(dst, &state->stateid, sizeof(*dst));
615 }
616
617 /*
618 * Called with state->lock_sema and clp->cl_sem held.
619 */
620 void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
621 {
622         if (status == NFS_OK || seqid_mutating_err(-status))
623                 lsp->ls_seqid++;
624 }
625
626 /* 
627 * Check to see if the request lock (type FL_UNLK) effects the fl lock.
628 *
629 * fl and request must have the same posix owner
630 *
631 * return: 
632 * 0 -> fl not effected by request
633 * 1 -> fl consumed by request
634 */
635
636 static int
637 nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
638 {
639         if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end)
640                 return 1;
641         return 0;
642 }
643
644 /*
645  * Post an initialized lock_state on the state->lock_states list.
646  */
647 void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
648 {
649         if (!list_empty(&lsp->ls_locks))
650                 return;
651         atomic_inc(&lsp->ls_count);
652         write_lock(&state->state_lock);
653         list_add(&lsp->ls_locks, &state->lock_states);
654         set_bit(LK_STATE_IN_USE, &state->flags);
655         write_unlock(&state->state_lock);
656 }
657
658 /* 
659  * to decide to 'reap' lock state:
660  * 1) search i_flock for file_locks with fl.lock_state = to ls.
661  * 2) determine if unlock will consume found lock. 
662  *      if so, reap
663  *
664  *      else, don't reap.
665  *
666  */
667 void
668 nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
669 {
670         struct inode *inode = state->inode;
671         struct file_lock *fl;
672
673         for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
674                 if (!(fl->fl_flags & FL_POSIX))
675                         continue;
676                 if (fl->fl_owner != lsp->ls_owner)
677                         continue;
678                 /* Exit if we find at least one lock which is not consumed */
679                 if (nfs4_check_unlock(fl,request) == 0)
680                         return;
681         }
682
683         write_lock(&state->state_lock);
684         list_del_init(&lsp->ls_locks);
685         if (list_empty(&state->lock_states))
686                 clear_bit(LK_STATE_IN_USE, &state->flags);
687         write_unlock(&state->state_lock);
688         nfs4_put_lock_state(lsp);
689 }
690
691 /*
692  * Release reference to lock_state, and free it if we see that
693  * it is no longer in use
694  */
695 void
696 nfs4_put_lock_state(struct nfs4_lock_state *lsp)
697 {
698         if (!atomic_dec_and_test(&lsp->ls_count))
699                 return;
700         BUG_ON (!list_empty(&lsp->ls_locks));
701         kfree(lsp);
702 }
703
704 /*
705 * Called with sp->so_sema and clp->cl_sem held.
706 *
707 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
708 * failed with a seqid incrementing error -
709 * see comments nfs_fs.h:seqid_mutating_error()
710 */
711 void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
712 {
713         if (status == NFS_OK || seqid_mutating_err(-status))
714                 sp->so_seqid++;
715         /* If the server returns BAD_SEQID, unhash state_owner here */
716         if (status == -NFS4ERR_BAD_SEQID)
717                 nfs4_drop_state_owner(sp);
718 }
719
720 static int reclaimer(void *);
721 struct reclaimer_args {
722         struct nfs4_client *clp;
723         struct completion complete;
724 };
725
726 /*
727  * State recovery routine
728  */
729 void
730 nfs4_recover_state(void *data)
731 {
732         struct nfs4_client *clp = (struct nfs4_client *)data;
733         struct reclaimer_args args = {
734                 .clp = clp,
735         };
736         might_sleep();
737
738         init_completion(&args.complete);
739
740         if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
741                 goto out_failed_clear;
742         wait_for_completion(&args.complete);
743         return;
744 out_failed_clear:
745         set_bit(NFS4CLNT_OK, &clp->cl_state);
746         wake_up_all(&clp->cl_waitq);
747         rpc_wake_up(&clp->cl_rpcwaitq);
748 }
749
750 /*
751  * Schedule a state recovery attempt
752  */
753 void
754 nfs4_schedule_state_recovery(struct nfs4_client *clp)
755 {
756         if (!clp)
757                 return;
758         if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
759                 schedule_work(&clp->cl_recoverd);
760 }
761
762 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
763 {
764         struct inode *inode = state->inode;
765         struct file_lock *fl;
766         int status = 0;
767
768         for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
769                 if (!(fl->fl_flags & FL_POSIX))
770                         continue;
771                 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
772                         continue;
773                 status = ops->recover_lock(state, fl);
774                 if (status >= 0)
775                         continue;
776                 switch (status) {
777                         default:
778                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
779                                                 __FUNCTION__, status);
780                         case -NFS4ERR_EXPIRED:
781                         case -NFS4ERR_NO_GRACE:
782                         case -NFS4ERR_RECLAIM_BAD:
783                         case -NFS4ERR_RECLAIM_CONFLICT:
784                                 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
785                                 break;
786                         case -NFS4ERR_STALE_CLIENTID:
787                                 goto out_err;
788                 }
789         }
790         return 0;
791 out_err:
792         return status;
793 }
794
795 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
796 {
797         struct nfs4_state *state;
798         struct nfs4_lock_state *lock;
799         int status = 0;
800
801         /* Note: we rely on the sp->so_states list being ordered 
802          * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
803          * states first.
804          * This is needed to ensure that the server won't give us any
805          * read delegations that we have to return if, say, we are
806          * recovering after a network partition or a reboot from a
807          * server that doesn't support a grace period.
808          */
809         list_for_each_entry(state, &sp->so_states, open_states) {
810                 if (state->state == 0)
811                         continue;
812                 status = ops->recover_open(sp, state);
813                 list_for_each_entry(lock, &state->lock_states, ls_locks)
814                         lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
815                 if (status >= 0) {
816                         status = nfs4_reclaim_locks(ops, state);
817                         if (status < 0)
818                                 goto out_err;
819                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
820                                 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
821                                         printk("%s: Lock reclaim failed!\n",
822                                                         __FUNCTION__);
823                         }
824                         continue;
825                 }
826                 switch (status) {
827                         default:
828                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
829                                                 __FUNCTION__, status);
830                         case -ENOENT:
831                         case -NFS4ERR_RECLAIM_BAD:
832                         case -NFS4ERR_RECLAIM_CONFLICT:
833                                 /*
834                                  * Open state on this file cannot be recovered
835                                  * All we can do is revert to using the zero stateid.
836                                  */
837                                 memset(state->stateid.data, 0,
838                                         sizeof(state->stateid.data));
839                                 /* Mark the file as being 'closed' */
840                                 state->state = 0;
841                                 break;
842                         case -NFS4ERR_EXPIRED:
843                         case -NFS4ERR_NO_GRACE:
844                         case -NFS4ERR_STALE_CLIENTID:
845                                 goto out_err;
846                 }
847         }
848         return 0;
849 out_err:
850         return status;
851 }
852
853 static int reclaimer(void *ptr)
854 {
855         struct reclaimer_args *args = (struct reclaimer_args *)ptr;
856         struct nfs4_client *clp = args->clp;
857         struct nfs4_state_owner *sp;
858         struct nfs4_state_recovery_ops *ops;
859         int status = 0;
860
861         daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
862         allow_signal(SIGKILL);
863
864         atomic_inc(&clp->cl_count);
865         complete(&args->complete);
866
867         /* Ensure exclusive access to NFSv4 state */
868         lock_kernel();
869         down_write(&clp->cl_sem);
870         /* Are there any NFS mounts out there? */
871         if (list_empty(&clp->cl_superblocks))
872                 goto out;
873 restart_loop:
874         status = nfs4_proc_renew(clp);
875         switch (status) {
876                 case 0:
877                 case -NFS4ERR_CB_PATH_DOWN:
878                         goto out;
879                 case -NFS4ERR_STALE_CLIENTID:
880                 case -NFS4ERR_LEASE_MOVED:
881                         ops = &nfs4_reboot_recovery_ops;
882                         break;
883                 default:
884                         ops = &nfs4_network_partition_recovery_ops;
885         };
886         status = __nfs4_init_client(clp);
887         if (status)
888                 goto out_error;
889         /* Mark all delegations for reclaim */
890         nfs_delegation_mark_reclaim(clp);
891         /* Note: list is protected by exclusive lock on cl->cl_sem */
892         list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
893                 status = nfs4_reclaim_open_state(ops, sp);
894                 if (status < 0) {
895                         if (status == -NFS4ERR_NO_GRACE) {
896                                 ops = &nfs4_network_partition_recovery_ops;
897                                 status = nfs4_reclaim_open_state(ops, sp);
898                         }
899                         if (status == -NFS4ERR_STALE_CLIENTID)
900                                 goto restart_loop;
901                         if (status == -NFS4ERR_EXPIRED)
902                                 goto restart_loop;
903                 }
904         }
905         nfs_delegation_reap_unclaimed(clp);
906 out:
907         set_bit(NFS4CLNT_OK, &clp->cl_state);
908         up_write(&clp->cl_sem);
909         unlock_kernel();
910         wake_up_all(&clp->cl_waitq);
911         rpc_wake_up(&clp->cl_rpcwaitq);
912         if (status == -NFS4ERR_CB_PATH_DOWN)
913                 nfs_handle_cb_pathdown(clp);
914         nfs4_put_client(clp);
915         return 0;
916 out_error:
917         printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
918                                 NIPQUAD(clp->cl_addr.s_addr), -status);
919         goto out;
920 }
921
922 /*
923  * Local variables:
924  *  c-basic-offset: 8
925  * End:
926  */