NFS: Revert pnfs ugliness from the generic NFS read code path
[pandora-kernel.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include "internal.h"
33 #include "pnfs.h"
34 #include "iostat.h"
35
36 #define NFSDBG_FACILITY         NFSDBG_PNFS
37
38 /* Locking:
39  *
40  * pnfs_spinlock:
41  *      protects pnfs_modules_tbl.
42  */
43 static DEFINE_SPINLOCK(pnfs_spinlock);
44
45 /*
46  * pnfs_modules_tbl holds all pnfs modules
47  */
48 static LIST_HEAD(pnfs_modules_tbl);
49
50 /* Return the registered pnfs layout driver module matching given id */
51 static struct pnfs_layoutdriver_type *
52 find_pnfs_driver_locked(u32 id)
53 {
54         struct pnfs_layoutdriver_type *local;
55
56         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
57                 if (local->id == id)
58                         goto out;
59         local = NULL;
60 out:
61         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
62         return local;
63 }
64
65 static struct pnfs_layoutdriver_type *
66 find_pnfs_driver(u32 id)
67 {
68         struct pnfs_layoutdriver_type *local;
69
70         spin_lock(&pnfs_spinlock);
71         local = find_pnfs_driver_locked(id);
72         spin_unlock(&pnfs_spinlock);
73         return local;
74 }
75
76 void
77 unset_pnfs_layoutdriver(struct nfs_server *nfss)
78 {
79         if (nfss->pnfs_curr_ld) {
80                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
81                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
82                 module_put(nfss->pnfs_curr_ld->owner);
83         }
84         nfss->pnfs_curr_ld = NULL;
85 }
86
87 /*
88  * Try to set the server's pnfs module to the pnfs layout type specified by id.
89  * Currently only one pNFS layout driver per filesystem is supported.
90  *
91  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
92  */
93 void
94 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
95                       u32 id)
96 {
97         struct pnfs_layoutdriver_type *ld_type = NULL;
98
99         if (id == 0)
100                 goto out_no_driver;
101         if (!(server->nfs_client->cl_exchange_flags &
102                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
103                 printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
104                        id, server->nfs_client->cl_exchange_flags);
105                 goto out_no_driver;
106         }
107         ld_type = find_pnfs_driver(id);
108         if (!ld_type) {
109                 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
110                 ld_type = find_pnfs_driver(id);
111                 if (!ld_type) {
112                         dprintk("%s: No pNFS module found for %u.\n",
113                                 __func__, id);
114                         goto out_no_driver;
115                 }
116         }
117         if (!try_module_get(ld_type->owner)) {
118                 dprintk("%s: Could not grab reference on module\n", __func__);
119                 goto out_no_driver;
120         }
121         server->pnfs_curr_ld = ld_type;
122         if (ld_type->set_layoutdriver
123             && ld_type->set_layoutdriver(server, mntfh)) {
124                 printk(KERN_ERR "%s: Error initializing pNFS layout driver %u.\n",
125                                 __func__, id);
126                 module_put(ld_type->owner);
127                 goto out_no_driver;
128         }
129
130         dprintk("%s: pNFS module for %u set\n", __func__, id);
131         return;
132
133 out_no_driver:
134         dprintk("%s: Using NFSv4 I/O\n", __func__);
135         server->pnfs_curr_ld = NULL;
136 }
137
138 int
139 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
140 {
141         int status = -EINVAL;
142         struct pnfs_layoutdriver_type *tmp;
143
144         if (ld_type->id == 0) {
145                 printk(KERN_ERR "%s id 0 is reserved\n", __func__);
146                 return status;
147         }
148         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
149                 printk(KERN_ERR "%s Layout driver must provide "
150                        "alloc_lseg and free_lseg.\n", __func__);
151                 return status;
152         }
153
154         spin_lock(&pnfs_spinlock);
155         tmp = find_pnfs_driver_locked(ld_type->id);
156         if (!tmp) {
157                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
158                 status = 0;
159                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
160                         ld_type->name);
161         } else {
162                 printk(KERN_ERR "%s Module with id %d already loaded!\n",
163                         __func__, ld_type->id);
164         }
165         spin_unlock(&pnfs_spinlock);
166
167         return status;
168 }
169 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
170
171 void
172 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
173 {
174         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
175         spin_lock(&pnfs_spinlock);
176         list_del(&ld_type->pnfs_tblid);
177         spin_unlock(&pnfs_spinlock);
178 }
179 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
180
181 /*
182  * pNFS client layout cache
183  */
184
185 /* Need to hold i_lock if caller does not already hold reference */
186 void
187 get_layout_hdr(struct pnfs_layout_hdr *lo)
188 {
189         atomic_inc(&lo->plh_refcount);
190 }
191
192 static struct pnfs_layout_hdr *
193 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
194 {
195         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
196         return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
197                 kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
198 }
199
200 static void
201 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
202 {
203         struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
204         put_rpccred(lo->plh_lc_cred);
205         return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
206 }
207
208 static void
209 destroy_layout_hdr(struct pnfs_layout_hdr *lo)
210 {
211         dprintk("%s: freeing layout cache %p\n", __func__, lo);
212         BUG_ON(!list_empty(&lo->plh_layouts));
213         NFS_I(lo->plh_inode)->layout = NULL;
214         pnfs_free_layout_hdr(lo);
215 }
216
217 static void
218 put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
219 {
220         if (atomic_dec_and_test(&lo->plh_refcount))
221                 destroy_layout_hdr(lo);
222 }
223
224 void
225 put_layout_hdr(struct pnfs_layout_hdr *lo)
226 {
227         struct inode *inode = lo->plh_inode;
228
229         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
230                 destroy_layout_hdr(lo);
231                 spin_unlock(&inode->i_lock);
232         }
233 }
234
235 static void
236 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
237 {
238         INIT_LIST_HEAD(&lseg->pls_list);
239         INIT_LIST_HEAD(&lseg->pls_lc_list);
240         atomic_set(&lseg->pls_refcount, 1);
241         smp_mb();
242         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
243         lseg->pls_layout = lo;
244 }
245
246 static void free_lseg(struct pnfs_layout_segment *lseg)
247 {
248         struct inode *ino = lseg->pls_layout->plh_inode;
249
250         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
251         /* Matched by get_layout_hdr in pnfs_insert_layout */
252         put_layout_hdr(NFS_I(ino)->layout);
253 }
254
255 static void
256 put_lseg_common(struct pnfs_layout_segment *lseg)
257 {
258         struct inode *inode = lseg->pls_layout->plh_inode;
259
260         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
261         list_del_init(&lseg->pls_list);
262         if (list_empty(&lseg->pls_layout->plh_segs)) {
263                 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
264                 /* Matched by initial refcount set in alloc_init_layout_hdr */
265                 put_layout_hdr_locked(lseg->pls_layout);
266         }
267         rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
268 }
269
270 void
271 put_lseg(struct pnfs_layout_segment *lseg)
272 {
273         struct inode *inode;
274
275         if (!lseg)
276                 return;
277
278         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
279                 atomic_read(&lseg->pls_refcount),
280                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
281         inode = lseg->pls_layout->plh_inode;
282         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
283                 LIST_HEAD(free_me);
284
285                 put_lseg_common(lseg);
286                 list_add(&lseg->pls_list, &free_me);
287                 spin_unlock(&inode->i_lock);
288                 pnfs_free_lseg_list(&free_me);
289         }
290 }
291 EXPORT_SYMBOL_GPL(put_lseg);
292
293 static inline u64
294 end_offset(u64 start, u64 len)
295 {
296         u64 end;
297
298         end = start + len;
299         return end >= start ? end : NFS4_MAX_UINT64;
300 }
301
302 /* last octet in a range */
303 static inline u64
304 last_byte_offset(u64 start, u64 len)
305 {
306         u64 end;
307
308         BUG_ON(!len);
309         end = start + len;
310         return end > start ? end - 1 : NFS4_MAX_UINT64;
311 }
312
313 /*
314  * is l2 fully contained in l1?
315  *   start1                             end1
316  *   [----------------------------------)
317  *           start2           end2
318  *           [----------------)
319  */
320 static inline int
321 lo_seg_contained(struct pnfs_layout_range *l1,
322                  struct pnfs_layout_range *l2)
323 {
324         u64 start1 = l1->offset;
325         u64 end1 = end_offset(start1, l1->length);
326         u64 start2 = l2->offset;
327         u64 end2 = end_offset(start2, l2->length);
328
329         return (start1 <= start2) && (end1 >= end2);
330 }
331
332 /*
333  * is l1 and l2 intersecting?
334  *   start1                             end1
335  *   [----------------------------------)
336  *                              start2           end2
337  *                              [----------------)
338  */
339 static inline int
340 lo_seg_intersecting(struct pnfs_layout_range *l1,
341                     struct pnfs_layout_range *l2)
342 {
343         u64 start1 = l1->offset;
344         u64 end1 = end_offset(start1, l1->length);
345         u64 start2 = l2->offset;
346         u64 end2 = end_offset(start2, l2->length);
347
348         return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
349                (end2 == NFS4_MAX_UINT64 || end2 > start1);
350 }
351
352 static bool
353 should_free_lseg(struct pnfs_layout_range *lseg_range,
354                  struct pnfs_layout_range *recall_range)
355 {
356         return (recall_range->iomode == IOMODE_ANY ||
357                 lseg_range->iomode == recall_range->iomode) &&
358                lo_seg_intersecting(lseg_range, recall_range);
359 }
360
361 /* Returns 1 if lseg is removed from list, 0 otherwise */
362 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
363                              struct list_head *tmp_list)
364 {
365         int rv = 0;
366
367         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
368                 /* Remove the reference keeping the lseg in the
369                  * list.  It will now be removed when all
370                  * outstanding io is finished.
371                  */
372                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
373                         atomic_read(&lseg->pls_refcount));
374                 if (atomic_dec_and_test(&lseg->pls_refcount)) {
375                         put_lseg_common(lseg);
376                         list_add(&lseg->pls_list, tmp_list);
377                         rv = 1;
378                 }
379         }
380         return rv;
381 }
382
383 /* Returns count of number of matching invalid lsegs remaining in list
384  * after call.
385  */
386 int
387 mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
388                             struct list_head *tmp_list,
389                             struct pnfs_layout_range *recall_range)
390 {
391         struct pnfs_layout_segment *lseg, *next;
392         int invalid = 0, removed = 0;
393
394         dprintk("%s:Begin lo %p\n", __func__, lo);
395
396         if (list_empty(&lo->plh_segs)) {
397                 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
398                         put_layout_hdr_locked(lo);
399                 return 0;
400         }
401         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
402                 if (!recall_range ||
403                     should_free_lseg(&lseg->pls_range, recall_range)) {
404                         dprintk("%s: freeing lseg %p iomode %d "
405                                 "offset %llu length %llu\n", __func__,
406                                 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
407                                 lseg->pls_range.length);
408                         invalid++;
409                         removed += mark_lseg_invalid(lseg, tmp_list);
410                 }
411         dprintk("%s:Return %i\n", __func__, invalid - removed);
412         return invalid - removed;
413 }
414
415 /* note free_me must contain lsegs from a single layout_hdr */
416 void
417 pnfs_free_lseg_list(struct list_head *free_me)
418 {
419         struct pnfs_layout_segment *lseg, *tmp;
420         struct pnfs_layout_hdr *lo;
421
422         if (list_empty(free_me))
423                 return;
424
425         lo = list_first_entry(free_me, struct pnfs_layout_segment,
426                               pls_list)->pls_layout;
427
428         if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
429                 struct nfs_client *clp;
430
431                 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
432                 spin_lock(&clp->cl_lock);
433                 list_del_init(&lo->plh_layouts);
434                 spin_unlock(&clp->cl_lock);
435         }
436         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
437                 list_del(&lseg->pls_list);
438                 free_lseg(lseg);
439         }
440 }
441
442 void
443 pnfs_destroy_layout(struct nfs_inode *nfsi)
444 {
445         struct pnfs_layout_hdr *lo;
446         LIST_HEAD(tmp_list);
447
448         spin_lock(&nfsi->vfs_inode.i_lock);
449         lo = nfsi->layout;
450         if (lo) {
451                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
452                 mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
453         }
454         spin_unlock(&nfsi->vfs_inode.i_lock);
455         pnfs_free_lseg_list(&tmp_list);
456 }
457
458 /*
459  * Called by the state manger to remove all layouts established under an
460  * expired lease.
461  */
462 void
463 pnfs_destroy_all_layouts(struct nfs_client *clp)
464 {
465         struct nfs_server *server;
466         struct pnfs_layout_hdr *lo;
467         LIST_HEAD(tmp_list);
468
469         nfs4_deviceid_mark_client_invalid(clp);
470         nfs4_deviceid_purge_client(clp);
471
472         spin_lock(&clp->cl_lock);
473         rcu_read_lock();
474         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
475                 if (!list_empty(&server->layouts))
476                         list_splice_init(&server->layouts, &tmp_list);
477         }
478         rcu_read_unlock();
479         spin_unlock(&clp->cl_lock);
480
481         while (!list_empty(&tmp_list)) {
482                 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
483                                 plh_layouts);
484                 dprintk("%s freeing layout for inode %lu\n", __func__,
485                         lo->plh_inode->i_ino);
486                 list_del_init(&lo->plh_layouts);
487                 pnfs_destroy_layout(NFS_I(lo->plh_inode));
488         }
489 }
490
491 /* update lo->plh_stateid with new if is more recent */
492 void
493 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
494                         bool update_barrier)
495 {
496         u32 oldseq, newseq;
497
498         oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
499         newseq = be32_to_cpu(new->stateid.seqid);
500         if ((int)(newseq - oldseq) > 0) {
501                 memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
502                 if (update_barrier) {
503                         u32 new_barrier = be32_to_cpu(new->stateid.seqid);
504
505                         if ((int)(new_barrier - lo->plh_barrier))
506                                 lo->plh_barrier = new_barrier;
507                 } else {
508                         /* Because of wraparound, we want to keep the barrier
509                          * "close" to the current seqids.  It needs to be
510                          * within 2**31 to count as "behind", so if it
511                          * gets too near that limit, give us a litle leeway
512                          * and bring it to within 2**30.
513                          * NOTE - and yes, this is all unsigned arithmetic.
514                          */
515                         if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
516                                 lo->plh_barrier = newseq - (1 << 30);
517                 }
518         }
519 }
520
521 /* lget is set to 1 if called from inside send_layoutget call chain */
522 static bool
523 pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
524                         int lget)
525 {
526         if ((stateid) &&
527             (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
528                 return true;
529         return lo->plh_block_lgets ||
530                 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
531                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
532                 (list_empty(&lo->plh_segs) &&
533                  (atomic_read(&lo->plh_outstanding) > lget));
534 }
535
536 int
537 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
538                               struct nfs4_state *open_state)
539 {
540         int status = 0;
541
542         dprintk("--> %s\n", __func__);
543         spin_lock(&lo->plh_inode->i_lock);
544         if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
545                 status = -EAGAIN;
546         } else if (list_empty(&lo->plh_segs)) {
547                 int seq;
548
549                 do {
550                         seq = read_seqbegin(&open_state->seqlock);
551                         memcpy(dst->data, open_state->stateid.data,
552                                sizeof(open_state->stateid.data));
553                 } while (read_seqretry(&open_state->seqlock, seq));
554         } else
555                 memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
556         spin_unlock(&lo->plh_inode->i_lock);
557         dprintk("<-- %s\n", __func__);
558         return status;
559 }
560
561 /*
562 * Get layout from server.
563 *    for now, assume that whole file layouts are requested.
564 *    arg->offset: 0
565 *    arg->length: all ones
566 */
567 static struct pnfs_layout_segment *
568 send_layoutget(struct pnfs_layout_hdr *lo,
569            struct nfs_open_context *ctx,
570            struct pnfs_layout_range *range,
571            gfp_t gfp_flags)
572 {
573         struct inode *ino = lo->plh_inode;
574         struct nfs_server *server = NFS_SERVER(ino);
575         struct nfs4_layoutget *lgp;
576         struct pnfs_layout_segment *lseg = NULL;
577         struct page **pages = NULL;
578         int i;
579         u32 max_resp_sz, max_pages;
580
581         dprintk("--> %s\n", __func__);
582
583         BUG_ON(ctx == NULL);
584         lgp = kzalloc(sizeof(*lgp), gfp_flags);
585         if (lgp == NULL)
586                 return NULL;
587
588         /* allocate pages for xdr post processing */
589         max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
590         max_pages = max_resp_sz >> PAGE_SHIFT;
591
592         pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
593         if (!pages)
594                 goto out_err_free;
595
596         for (i = 0; i < max_pages; i++) {
597                 pages[i] = alloc_page(gfp_flags);
598                 if (!pages[i])
599                         goto out_err_free;
600         }
601
602         lgp->args.minlength = PAGE_CACHE_SIZE;
603         if (lgp->args.minlength > range->length)
604                 lgp->args.minlength = range->length;
605         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
606         lgp->args.range = *range;
607         lgp->args.type = server->pnfs_curr_ld->id;
608         lgp->args.inode = ino;
609         lgp->args.ctx = get_nfs_open_context(ctx);
610         lgp->args.layout.pages = pages;
611         lgp->args.layout.pglen = max_pages * PAGE_SIZE;
612         lgp->lsegpp = &lseg;
613         lgp->gfp_flags = gfp_flags;
614
615         /* Synchronously retrieve layout information from server and
616          * store in lseg.
617          */
618         nfs4_proc_layoutget(lgp);
619         if (!lseg) {
620                 /* remember that LAYOUTGET failed and suspend trying */
621                 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
622         }
623
624         /* free xdr pages */
625         for (i = 0; i < max_pages; i++)
626                 __free_page(pages[i]);
627         kfree(pages);
628
629         return lseg;
630
631 out_err_free:
632         /* free any allocated xdr pages, lgp as it's not used */
633         if (pages) {
634                 for (i = 0; i < max_pages; i++) {
635                         if (!pages[i])
636                                 break;
637                         __free_page(pages[i]);
638                 }
639                 kfree(pages);
640         }
641         kfree(lgp);
642         return NULL;
643 }
644
645 /* Initiates a LAYOUTRETURN(FILE) */
646 int
647 _pnfs_return_layout(struct inode *ino)
648 {
649         struct pnfs_layout_hdr *lo = NULL;
650         struct nfs_inode *nfsi = NFS_I(ino);
651         LIST_HEAD(tmp_list);
652         struct nfs4_layoutreturn *lrp;
653         nfs4_stateid stateid;
654         int status = 0;
655
656         dprintk("--> %s\n", __func__);
657
658         spin_lock(&ino->i_lock);
659         lo = nfsi->layout;
660         if (!lo) {
661                 spin_unlock(&ino->i_lock);
662                 dprintk("%s: no layout to return\n", __func__);
663                 return status;
664         }
665         stateid = nfsi->layout->plh_stateid;
666         /* Reference matched in nfs4_layoutreturn_release */
667         get_layout_hdr(lo);
668         mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
669         lo->plh_block_lgets++;
670         spin_unlock(&ino->i_lock);
671         pnfs_free_lseg_list(&tmp_list);
672
673         WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
674
675         lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
676         if (unlikely(lrp == NULL)) {
677                 status = -ENOMEM;
678                 set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
679                 set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
680                 put_layout_hdr(lo);
681                 goto out;
682         }
683
684         lrp->args.stateid = stateid;
685         lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
686         lrp->args.inode = ino;
687         lrp->args.layout = lo;
688         lrp->clp = NFS_SERVER(ino)->nfs_client;
689
690         status = nfs4_proc_layoutreturn(lrp);
691 out:
692         dprintk("<-- %s status: %d\n", __func__, status);
693         return status;
694 }
695
696 bool pnfs_roc(struct inode *ino)
697 {
698         struct pnfs_layout_hdr *lo;
699         struct pnfs_layout_segment *lseg, *tmp;
700         LIST_HEAD(tmp_list);
701         bool found = false;
702
703         spin_lock(&ino->i_lock);
704         lo = NFS_I(ino)->layout;
705         if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
706             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
707                 goto out_nolayout;
708         list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
709                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
710                         mark_lseg_invalid(lseg, &tmp_list);
711                         found = true;
712                 }
713         if (!found)
714                 goto out_nolayout;
715         lo->plh_block_lgets++;
716         get_layout_hdr(lo); /* matched in pnfs_roc_release */
717         spin_unlock(&ino->i_lock);
718         pnfs_free_lseg_list(&tmp_list);
719         return true;
720
721 out_nolayout:
722         spin_unlock(&ino->i_lock);
723         return false;
724 }
725
726 void pnfs_roc_release(struct inode *ino)
727 {
728         struct pnfs_layout_hdr *lo;
729
730         spin_lock(&ino->i_lock);
731         lo = NFS_I(ino)->layout;
732         lo->plh_block_lgets--;
733         put_layout_hdr_locked(lo);
734         spin_unlock(&ino->i_lock);
735 }
736
737 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
738 {
739         struct pnfs_layout_hdr *lo;
740
741         spin_lock(&ino->i_lock);
742         lo = NFS_I(ino)->layout;
743         if ((int)(barrier - lo->plh_barrier) > 0)
744                 lo->plh_barrier = barrier;
745         spin_unlock(&ino->i_lock);
746 }
747
748 bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
749 {
750         struct nfs_inode *nfsi = NFS_I(ino);
751         struct pnfs_layout_segment *lseg;
752         bool found = false;
753
754         spin_lock(&ino->i_lock);
755         list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
756                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
757                         found = true;
758                         break;
759                 }
760         if (!found) {
761                 struct pnfs_layout_hdr *lo = nfsi->layout;
762                 u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
763
764                 /* Since close does not return a layout stateid for use as
765                  * a barrier, we choose the worst-case barrier.
766                  */
767                 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
768         }
769         spin_unlock(&ino->i_lock);
770         return found;
771 }
772
773 /*
774  * Compare two layout segments for sorting into layout cache.
775  * We want to preferentially return RW over RO layouts, so ensure those
776  * are seen first.
777  */
778 static s64
779 cmp_layout(struct pnfs_layout_range *l1,
780            struct pnfs_layout_range *l2)
781 {
782         s64 d;
783
784         /* high offset > low offset */
785         d = l1->offset - l2->offset;
786         if (d)
787                 return d;
788
789         /* short length > long length */
790         d = l2->length - l1->length;
791         if (d)
792                 return d;
793
794         /* read > read/write */
795         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
796 }
797
798 static void
799 pnfs_insert_layout(struct pnfs_layout_hdr *lo,
800                    struct pnfs_layout_segment *lseg)
801 {
802         struct pnfs_layout_segment *lp;
803
804         dprintk("%s:Begin\n", __func__);
805
806         assert_spin_locked(&lo->plh_inode->i_lock);
807         list_for_each_entry(lp, &lo->plh_segs, pls_list) {
808                 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
809                         continue;
810                 list_add_tail(&lseg->pls_list, &lp->pls_list);
811                 dprintk("%s: inserted lseg %p "
812                         "iomode %d offset %llu length %llu before "
813                         "lp %p iomode %d offset %llu length %llu\n",
814                         __func__, lseg, lseg->pls_range.iomode,
815                         lseg->pls_range.offset, lseg->pls_range.length,
816                         lp, lp->pls_range.iomode, lp->pls_range.offset,
817                         lp->pls_range.length);
818                 goto out;
819         }
820         list_add_tail(&lseg->pls_list, &lo->plh_segs);
821         dprintk("%s: inserted lseg %p "
822                 "iomode %d offset %llu length %llu at tail\n",
823                 __func__, lseg, lseg->pls_range.iomode,
824                 lseg->pls_range.offset, lseg->pls_range.length);
825 out:
826         get_layout_hdr(lo);
827
828         dprintk("%s:Return\n", __func__);
829 }
830
831 static struct pnfs_layout_hdr *
832 alloc_init_layout_hdr(struct inode *ino,
833                       struct nfs_open_context *ctx,
834                       gfp_t gfp_flags)
835 {
836         struct pnfs_layout_hdr *lo;
837
838         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
839         if (!lo)
840                 return NULL;
841         atomic_set(&lo->plh_refcount, 1);
842         INIT_LIST_HEAD(&lo->plh_layouts);
843         INIT_LIST_HEAD(&lo->plh_segs);
844         INIT_LIST_HEAD(&lo->plh_bulk_recall);
845         lo->plh_inode = ino;
846         lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
847         return lo;
848 }
849
850 static struct pnfs_layout_hdr *
851 pnfs_find_alloc_layout(struct inode *ino,
852                        struct nfs_open_context *ctx,
853                        gfp_t gfp_flags)
854 {
855         struct nfs_inode *nfsi = NFS_I(ino);
856         struct pnfs_layout_hdr *new = NULL;
857
858         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
859
860         assert_spin_locked(&ino->i_lock);
861         if (nfsi->layout) {
862                 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
863                         return NULL;
864                 else
865                         return nfsi->layout;
866         }
867         spin_unlock(&ino->i_lock);
868         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
869         spin_lock(&ino->i_lock);
870
871         if (likely(nfsi->layout == NULL))       /* Won the race? */
872                 nfsi->layout = new;
873         else
874                 pnfs_free_layout_hdr(new);
875         return nfsi->layout;
876 }
877
878 /*
879  * iomode matching rules:
880  * iomode       lseg    match
881  * -----        -----   -----
882  * ANY          READ    true
883  * ANY          RW      true
884  * RW           READ    false
885  * RW           RW      true
886  * READ         READ    true
887  * READ         RW      true
888  */
889 static int
890 is_matching_lseg(struct pnfs_layout_range *ls_range,
891                  struct pnfs_layout_range *range)
892 {
893         struct pnfs_layout_range range1;
894
895         if ((range->iomode == IOMODE_RW &&
896              ls_range->iomode != IOMODE_RW) ||
897             !lo_seg_intersecting(ls_range, range))
898                 return 0;
899
900         /* range1 covers only the first byte in the range */
901         range1 = *range;
902         range1.length = 1;
903         return lo_seg_contained(ls_range, &range1);
904 }
905
906 /*
907  * lookup range in layout
908  */
909 static struct pnfs_layout_segment *
910 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
911                 struct pnfs_layout_range *range)
912 {
913         struct pnfs_layout_segment *lseg, *ret = NULL;
914
915         dprintk("%s:Begin\n", __func__);
916
917         assert_spin_locked(&lo->plh_inode->i_lock);
918         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
919                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
920                     is_matching_lseg(&lseg->pls_range, range)) {
921                         ret = get_lseg(lseg);
922                         break;
923                 }
924                 if (lseg->pls_range.offset > range->offset)
925                         break;
926         }
927
928         dprintk("%s:Return lseg %p ref %d\n",
929                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
930         return ret;
931 }
932
933 /*
934  * Layout segment is retreived from the server if not cached.
935  * The appropriate layout segment is referenced and returned to the caller.
936  */
937 struct pnfs_layout_segment *
938 pnfs_update_layout(struct inode *ino,
939                    struct nfs_open_context *ctx,
940                    loff_t pos,
941                    u64 count,
942                    enum pnfs_iomode iomode,
943                    gfp_t gfp_flags)
944 {
945         struct pnfs_layout_range arg = {
946                 .iomode = iomode,
947                 .offset = pos,
948                 .length = count,
949         };
950         unsigned pg_offset;
951         struct nfs_inode *nfsi = NFS_I(ino);
952         struct nfs_server *server = NFS_SERVER(ino);
953         struct nfs_client *clp = server->nfs_client;
954         struct pnfs_layout_hdr *lo;
955         struct pnfs_layout_segment *lseg = NULL;
956         bool first = false;
957
958         if (!pnfs_enabled_sb(NFS_SERVER(ino)))
959                 return NULL;
960         spin_lock(&ino->i_lock);
961         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
962         if (lo == NULL) {
963                 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
964                 goto out_unlock;
965         }
966
967         /* Do we even need to bother with this? */
968         if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
969             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
970                 dprintk("%s matches recall, use MDS\n", __func__);
971                 goto out_unlock;
972         }
973
974         /* if LAYOUTGET already failed once we don't try again */
975         if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
976                 goto out_unlock;
977
978         /* Check to see if the layout for the given range already exists */
979         lseg = pnfs_find_lseg(lo, &arg);
980         if (lseg)
981                 goto out_unlock;
982
983         if (pnfs_layoutgets_blocked(lo, NULL, 0))
984                 goto out_unlock;
985         atomic_inc(&lo->plh_outstanding);
986
987         get_layout_hdr(lo);
988         if (list_empty(&lo->plh_segs))
989                 first = true;
990         spin_unlock(&ino->i_lock);
991         if (first) {
992                 /* The lo must be on the clp list if there is any
993                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
994                  */
995                 spin_lock(&clp->cl_lock);
996                 BUG_ON(!list_empty(&lo->plh_layouts));
997                 list_add_tail(&lo->plh_layouts, &server->layouts);
998                 spin_unlock(&clp->cl_lock);
999         }
1000
1001         pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1002         if (pg_offset) {
1003                 arg.offset -= pg_offset;
1004                 arg.length += pg_offset;
1005         }
1006         if (arg.length != NFS4_MAX_UINT64)
1007                 arg.length = PAGE_CACHE_ALIGN(arg.length);
1008
1009         lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1010         if (!lseg && first) {
1011                 spin_lock(&clp->cl_lock);
1012                 list_del_init(&lo->plh_layouts);
1013                 spin_unlock(&clp->cl_lock);
1014         }
1015         atomic_dec(&lo->plh_outstanding);
1016         put_layout_hdr(lo);
1017 out:
1018         dprintk("%s end, state 0x%lx lseg %p\n", __func__,
1019                 nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
1020         return lseg;
1021 out_unlock:
1022         spin_unlock(&ino->i_lock);
1023         goto out;
1024 }
1025 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1026
1027 int
1028 pnfs_layout_process(struct nfs4_layoutget *lgp)
1029 {
1030         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1031         struct nfs4_layoutget_res *res = &lgp->res;
1032         struct pnfs_layout_segment *lseg;
1033         struct inode *ino = lo->plh_inode;
1034         struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
1035         int status = 0;
1036
1037         /* Inject layout blob into I/O device driver */
1038         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1039         if (!lseg || IS_ERR(lseg)) {
1040                 if (!lseg)
1041                         status = -ENOMEM;
1042                 else
1043                         status = PTR_ERR(lseg);
1044                 dprintk("%s: Could not allocate layout: error %d\n",
1045                        __func__, status);
1046                 goto out;
1047         }
1048
1049         spin_lock(&ino->i_lock);
1050         if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
1051             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1052                 dprintk("%s forget reply due to recall\n", __func__);
1053                 goto out_forget_reply;
1054         }
1055
1056         if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
1057                 dprintk("%s forget reply due to state\n", __func__);
1058                 goto out_forget_reply;
1059         }
1060         init_lseg(lo, lseg);
1061         lseg->pls_range = res->range;
1062         *lgp->lsegpp = get_lseg(lseg);
1063         pnfs_insert_layout(lo, lseg);
1064
1065         if (res->return_on_close) {
1066                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1067                 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1068         }
1069
1070         /* Done processing layoutget. Set the layout stateid */
1071         pnfs_set_layout_stateid(lo, &res->stateid, false);
1072         spin_unlock(&ino->i_lock);
1073 out:
1074         return status;
1075
1076 out_forget_reply:
1077         spin_unlock(&ino->i_lock);
1078         lseg->pls_layout = lo;
1079         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1080         goto out;
1081 }
1082
1083 void
1084 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1085 {
1086         BUG_ON(pgio->pg_lseg != NULL);
1087
1088         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1089                                            req->wb_context,
1090                                            req_offset(req),
1091                                            req->wb_bytes,
1092                                            IOMODE_READ,
1093                                            GFP_KERNEL);
1094         /* If no lseg, fall back to read through mds */
1095         if (pgio->pg_lseg == NULL)
1096                 nfs_pageio_reset_read_mds(pgio);
1097
1098 }
1099 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1100
1101 void
1102 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1103 {
1104         BUG_ON(pgio->pg_lseg != NULL);
1105
1106         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1107                                            req->wb_context,
1108                                            req_offset(req),
1109                                            req->wb_bytes,
1110                                            IOMODE_RW,
1111                                            GFP_NOFS);
1112         /* If no lseg, fall back to write through mds */
1113         if (pgio->pg_lseg == NULL)
1114                 nfs_pageio_reset_write_mds(pgio);
1115 }
1116 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1117
1118 bool
1119 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
1120 {
1121         struct nfs_server *server = NFS_SERVER(inode);
1122         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1123
1124         if (ld == NULL)
1125                 return false;
1126         nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
1127         return true;
1128 }
1129
1130 bool
1131 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
1132 {
1133         struct nfs_server *server = NFS_SERVER(inode);
1134         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1135
1136         if (ld == NULL)
1137                 return false;
1138         nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
1139         return true;
1140 }
1141
1142 bool
1143 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1144                      struct nfs_page *req)
1145 {
1146         if (pgio->pg_lseg == NULL)
1147                 return nfs_generic_pg_test(pgio, prev, req);
1148
1149         /*
1150          * Test if a nfs_page is fully contained in the pnfs_layout_range.
1151          * Note that this test makes several assumptions:
1152          * - that the previous nfs_page in the struct nfs_pageio_descriptor
1153          *   is known to lie within the range.
1154          *   - that the nfs_page being tested is known to be contiguous with the
1155          *   previous nfs_page.
1156          *   - Layout ranges are page aligned, so we only have to test the
1157          *   start offset of the request.
1158          *
1159          * Please also note that 'end_offset' is actually the offset of the
1160          * first byte that lies outside the pnfs_layout_range. FIXME?
1161          *
1162          */
1163         return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1164                                          pgio->pg_lseg->pls_range.length);
1165 }
1166 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1167
1168 /*
1169  * Called by non rpc-based layout drivers
1170  */
1171 void pnfs_ld_write_done(struct nfs_write_data *data)
1172 {
1173         if (likely(!data->pnfs_error)) {
1174                 pnfs_set_layoutcommit(data);
1175                 data->mds_ops->rpc_call_done(&data->task, data);
1176         } else {
1177                 put_lseg(data->lseg);
1178                 data->lseg = NULL;
1179                 dprintk("pnfs write error = %d\n", data->pnfs_error);
1180         }
1181         data->mds_ops->rpc_release(data);
1182 }
1183 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1184
1185 static void
1186 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1187                 struct nfs_write_data *data)
1188 {
1189         list_splice_tail_init(&data->pages, &desc->pg_list);
1190         if (data->req && list_empty(&data->req->wb_list))
1191                 nfs_list_add_request(data->req, &desc->pg_list);
1192         nfs_pageio_reset_write_mds(desc);
1193         desc->pg_recoalesce = 1;
1194         nfs_writedata_release(data);
1195 }
1196
1197 static enum pnfs_try_status
1198 pnfs_try_to_write_data(struct nfs_write_data *wdata,
1199                         const struct rpc_call_ops *call_ops,
1200                         struct pnfs_layout_segment *lseg,
1201                         int how)
1202 {
1203         struct inode *inode = wdata->inode;
1204         enum pnfs_try_status trypnfs;
1205         struct nfs_server *nfss = NFS_SERVER(inode);
1206
1207         wdata->mds_ops = call_ops;
1208         wdata->lseg = get_lseg(lseg);
1209
1210         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1211                 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1212
1213         trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1214         if (trypnfs == PNFS_NOT_ATTEMPTED) {
1215                 put_lseg(wdata->lseg);
1216                 wdata->lseg = NULL;
1217         } else
1218                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1219
1220         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1221         return trypnfs;
1222 }
1223
1224 static void
1225 pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1226 {
1227         struct nfs_write_data *data;
1228         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1229         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1230
1231         desc->pg_lseg = NULL;
1232         while (!list_empty(head)) {
1233                 enum pnfs_try_status trypnfs;
1234
1235                 data = list_entry(head->next, struct nfs_write_data, list);
1236                 list_del_init(&data->list);
1237
1238                 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1239                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1240                         pnfs_write_through_mds(desc, data);
1241         }
1242         put_lseg(lseg);
1243 }
1244
1245 int
1246 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1247 {
1248         LIST_HEAD(head);
1249         int ret;
1250
1251         ret = nfs_generic_flush(desc, &head);
1252         if (ret != 0) {
1253                 put_lseg(desc->pg_lseg);
1254                 desc->pg_lseg = NULL;
1255                 return ret;
1256         }
1257         pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
1258         return 0;
1259 }
1260 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1261
1262 static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1263 {
1264         struct nfs_pageio_descriptor pgio;
1265
1266         put_lseg(data->lseg);
1267         data->lseg = NULL;
1268         dprintk("pnfs write error = %d\n", data->pnfs_error);
1269
1270         nfs_pageio_init_read_mds(&pgio, data->inode);
1271
1272         while (!list_empty(&data->pages)) {
1273                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1274
1275                 nfs_list_remove_request(req);
1276                 nfs_pageio_add_request(&pgio, req);
1277         }
1278         nfs_pageio_complete(&pgio);
1279 }
1280
1281 /*
1282  * Called by non rpc-based layout drivers
1283  */
1284 void pnfs_ld_read_done(struct nfs_read_data *data)
1285 {
1286         if (likely(!data->pnfs_error)) {
1287                 __nfs4_read_done_cb(data);
1288                 data->mds_ops->rpc_call_done(&data->task, data);
1289         } else
1290                 pnfs_ld_handle_read_error(data);
1291         data->mds_ops->rpc_release(data);
1292 }
1293 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1294
1295 static void
1296 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1297                 struct nfs_read_data *data)
1298 {
1299         list_splice_tail_init(&data->pages, &desc->pg_list);
1300         if (data->req && list_empty(&data->req->wb_list))
1301                 nfs_list_add_request(data->req, &desc->pg_list);
1302         nfs_pageio_reset_read_mds(desc);
1303         desc->pg_recoalesce = 1;
1304         nfs_readdata_release(data);
1305 }
1306
1307 /*
1308  * Call the appropriate parallel I/O subsystem read function.
1309  */
1310 static enum pnfs_try_status
1311 pnfs_try_to_read_data(struct nfs_read_data *rdata,
1312                        const struct rpc_call_ops *call_ops,
1313                        struct pnfs_layout_segment *lseg)
1314 {
1315         struct inode *inode = rdata->inode;
1316         struct nfs_server *nfss = NFS_SERVER(inode);
1317         enum pnfs_try_status trypnfs;
1318
1319         rdata->mds_ops = call_ops;
1320         rdata->lseg = get_lseg(lseg);
1321
1322         dprintk("%s: Reading ino:%lu %u@%llu\n",
1323                 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1324
1325         trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1326         if (trypnfs == PNFS_NOT_ATTEMPTED) {
1327                 put_lseg(rdata->lseg);
1328                 rdata->lseg = NULL;
1329         } else {
1330                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1331         }
1332         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1333         return trypnfs;
1334 }
1335
1336 static void
1337 pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1338 {
1339         struct nfs_read_data *data;
1340         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1341         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1342
1343         desc->pg_lseg = NULL;
1344         while (!list_empty(head)) {
1345                 enum pnfs_try_status trypnfs;
1346
1347                 data = list_entry(head->next, struct nfs_read_data, list);
1348                 list_del_init(&data->list);
1349
1350                 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1351                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1352                         pnfs_read_through_mds(desc, data);
1353         }
1354         put_lseg(lseg);
1355 }
1356
1357 int
1358 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1359 {
1360         LIST_HEAD(head);
1361         int ret;
1362
1363         ret = nfs_generic_pagein(desc, &head);
1364         if (ret != 0) {
1365                 put_lseg(desc->pg_lseg);
1366                 desc->pg_lseg = NULL;
1367                 return ret;
1368         }
1369         pnfs_do_multiple_reads(desc, &head);
1370         return 0;
1371 }
1372 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1373
1374 /*
1375  * There can be multiple RW segments.
1376  */
1377 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1378 {
1379         struct pnfs_layout_segment *lseg;
1380
1381         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1382                 if (lseg->pls_range.iomode == IOMODE_RW &&
1383                     test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1384                         list_add(&lseg->pls_lc_list, listp);
1385         }
1386 }
1387
1388 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1389 {
1390         if (lseg->pls_range.iomode == IOMODE_RW) {
1391                 dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
1392                 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
1393         } else {
1394                 dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
1395                 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
1396         }
1397 }
1398 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1399
1400 void
1401 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1402 {
1403         struct nfs_inode *nfsi = NFS_I(wdata->inode);
1404         loff_t end_pos = wdata->mds_offset + wdata->res.count;
1405         bool mark_as_dirty = false;
1406
1407         spin_lock(&nfsi->vfs_inode.i_lock);
1408         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1409                 mark_as_dirty = true;
1410                 dprintk("%s: Set layoutcommit for inode %lu ",
1411                         __func__, wdata->inode->i_ino);
1412         }
1413         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
1414                 /* references matched in nfs4_layoutcommit_release */
1415                 get_lseg(wdata->lseg);
1416         }
1417         if (end_pos > nfsi->layout->plh_lwb)
1418                 nfsi->layout->plh_lwb = end_pos;
1419         spin_unlock(&nfsi->vfs_inode.i_lock);
1420         dprintk("%s: lseg %p end_pos %llu\n",
1421                 __func__, wdata->lseg, nfsi->layout->plh_lwb);
1422
1423         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1424          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1425         if (mark_as_dirty)
1426                 mark_inode_dirty_sync(wdata->inode);
1427 }
1428 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1429
1430 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1431 {
1432         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1433
1434         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1435                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1436 }
1437
1438 /*
1439  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1440  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1441  * data to disk to allow the server to recover the data if it crashes.
1442  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1443  * is off, and a COMMIT is sent to a data server, or
1444  * if WRITEs to a data server return NFS_DATA_SYNC.
1445  */
1446 int
1447 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1448 {
1449         struct nfs4_layoutcommit_data *data;
1450         struct nfs_inode *nfsi = NFS_I(inode);
1451         loff_t end_pos;
1452         int status = 0;
1453
1454         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1455
1456         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1457                 return 0;
1458
1459         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1460         data = kzalloc(sizeof(*data), GFP_NOFS);
1461         if (!data) {
1462                 status = -ENOMEM;
1463                 goto out;
1464         }
1465
1466         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1467                 goto out_free;
1468
1469         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1470                 if (!sync) {
1471                         status = -EAGAIN;
1472                         goto out_free;
1473                 }
1474                 status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
1475                                         nfs_wait_bit_killable, TASK_KILLABLE);
1476                 if (status)
1477                         goto out_free;
1478         }
1479
1480         INIT_LIST_HEAD(&data->lseg_list);
1481         spin_lock(&inode->i_lock);
1482         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1483                 clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
1484                 spin_unlock(&inode->i_lock);
1485                 wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
1486                 goto out_free;
1487         }
1488
1489         pnfs_list_write_lseg(inode, &data->lseg_list);
1490
1491         end_pos = nfsi->layout->plh_lwb;
1492         nfsi->layout->plh_lwb = 0;
1493
1494         memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
1495                 sizeof(nfsi->layout->plh_stateid.data));
1496         spin_unlock(&inode->i_lock);
1497
1498         data->args.inode = inode;
1499         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1500         nfs_fattr_init(&data->fattr);
1501         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1502         data->res.fattr = &data->fattr;
1503         data->args.lastbytewritten = end_pos - 1;
1504         data->res.server = NFS_SERVER(inode);
1505
1506         status = nfs4_proc_layoutcommit(data, sync);
1507 out:
1508         if (status)
1509                 mark_inode_dirty_sync(inode);
1510         dprintk("<-- %s status %d\n", __func__, status);
1511         return status;
1512 out_free:
1513         kfree(data);
1514         goto out;
1515 }