NFSv4.1: Free the pnfs_layout_hdr outside the inode->i_lock
[pandora-kernel.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36
37 #define NFSDBG_FACILITY         NFSDBG_PNFS
38 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
39
40 /* Locking:
41  *
42  * pnfs_spinlock:
43  *      protects pnfs_modules_tbl.
44  */
45 static DEFINE_SPINLOCK(pnfs_spinlock);
46
47 /*
48  * pnfs_modules_tbl holds all pnfs modules
49  */
50 static LIST_HEAD(pnfs_modules_tbl);
51
52 /* Return the registered pnfs layout driver module matching given id */
53 static struct pnfs_layoutdriver_type *
54 find_pnfs_driver_locked(u32 id)
55 {
56         struct pnfs_layoutdriver_type *local;
57
58         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
59                 if (local->id == id)
60                         goto out;
61         local = NULL;
62 out:
63         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
64         return local;
65 }
66
67 static struct pnfs_layoutdriver_type *
68 find_pnfs_driver(u32 id)
69 {
70         struct pnfs_layoutdriver_type *local;
71
72         spin_lock(&pnfs_spinlock);
73         local = find_pnfs_driver_locked(id);
74         if (local != NULL && !try_module_get(local->owner)) {
75                 dprintk("%s: Could not grab reference on module\n", __func__);
76                 local = NULL;
77         }
78         spin_unlock(&pnfs_spinlock);
79         return local;
80 }
81
82 void
83 unset_pnfs_layoutdriver(struct nfs_server *nfss)
84 {
85         if (nfss->pnfs_curr_ld) {
86                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
87                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
88                 /* Decrement the MDS count. Purge the deviceid cache if zero */
89                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
90                         nfs4_deviceid_purge_client(nfss->nfs_client);
91                 module_put(nfss->pnfs_curr_ld->owner);
92         }
93         nfss->pnfs_curr_ld = NULL;
94 }
95
96 /*
97  * Try to set the server's pnfs module to the pnfs layout type specified by id.
98  * Currently only one pNFS layout driver per filesystem is supported.
99  *
100  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
101  */
102 void
103 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
104                       u32 id)
105 {
106         struct pnfs_layoutdriver_type *ld_type = NULL;
107
108         if (id == 0)
109                 goto out_no_driver;
110         if (!(server->nfs_client->cl_exchange_flags &
111                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
112                 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
113                         __func__, id, server->nfs_client->cl_exchange_flags);
114                 goto out_no_driver;
115         }
116         ld_type = find_pnfs_driver(id);
117         if (!ld_type) {
118                 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
119                 ld_type = find_pnfs_driver(id);
120                 if (!ld_type) {
121                         dprintk("%s: No pNFS module found for %u.\n",
122                                 __func__, id);
123                         goto out_no_driver;
124                 }
125         }
126         server->pnfs_curr_ld = ld_type;
127         if (ld_type->set_layoutdriver
128             && ld_type->set_layoutdriver(server, mntfh)) {
129                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
130                         "driver %u.\n", __func__, id);
131                 module_put(ld_type->owner);
132                 goto out_no_driver;
133         }
134         /* Bump the MDS count */
135         atomic_inc(&server->nfs_client->cl_mds_count);
136
137         dprintk("%s: pNFS module for %u set\n", __func__, id);
138         return;
139
140 out_no_driver:
141         dprintk("%s: Using NFSv4 I/O\n", __func__);
142         server->pnfs_curr_ld = NULL;
143 }
144
145 int
146 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
147 {
148         int status = -EINVAL;
149         struct pnfs_layoutdriver_type *tmp;
150
151         if (ld_type->id == 0) {
152                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
153                 return status;
154         }
155         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
156                 printk(KERN_ERR "NFS: %s Layout driver must provide "
157                        "alloc_lseg and free_lseg.\n", __func__);
158                 return status;
159         }
160
161         spin_lock(&pnfs_spinlock);
162         tmp = find_pnfs_driver_locked(ld_type->id);
163         if (!tmp) {
164                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
165                 status = 0;
166                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
167                         ld_type->name);
168         } else {
169                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
170                         __func__, ld_type->id);
171         }
172         spin_unlock(&pnfs_spinlock);
173
174         return status;
175 }
176 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
177
178 void
179 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
180 {
181         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
182         spin_lock(&pnfs_spinlock);
183         list_del(&ld_type->pnfs_tblid);
184         spin_unlock(&pnfs_spinlock);
185 }
186 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
187
188 /*
189  * pNFS client layout cache
190  */
191
192 /* Need to hold i_lock if caller does not already hold reference */
193 void
194 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
195 {
196         atomic_inc(&lo->plh_refcount);
197 }
198
199 static struct pnfs_layout_hdr *
200 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
201 {
202         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
203         return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
204                 kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
205 }
206
207 static void
208 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
209 {
210         struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
211         put_rpccred(lo->plh_lc_cred);
212         return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
213 }
214
215 static void
216 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
217 {
218         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
219         dprintk("%s: freeing layout cache %p\n", __func__, lo);
220         BUG_ON(!list_empty(&lo->plh_layouts));
221         nfsi->layout = NULL;
222         /* Reset MDS Threshold I/O counters */
223         nfsi->write_io = 0;
224         nfsi->read_io = 0;
225 }
226
227 void
228 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
229 {
230         struct inode *inode = lo->plh_inode;
231
232         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
233                 pnfs_detach_layout_hdr(lo);
234                 spin_unlock(&inode->i_lock);
235                 pnfs_free_layout_hdr(lo);
236         }
237 }
238
239 static int
240 pnfs_iomode_to_fail_bit(u32 iomode)
241 {
242         return iomode == IOMODE_RW ?
243                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
244 }
245
246 static void
247 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
248 {
249         lo->plh_retry_timestamp = jiffies;
250         if (test_and_set_bit(fail_bit, &lo->plh_flags))
251                 atomic_inc(&lo->plh_refcount);
252 }
253
254 static void
255 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
256 {
257         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
258                 atomic_dec(&lo->plh_refcount);
259 }
260
261 static void
262 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
263 {
264         struct inode *inode = lo->plh_inode;
265         struct pnfs_layout_range range = {
266                 .iomode = iomode,
267                 .offset = 0,
268                 .length = NFS4_MAX_UINT64,
269         };
270         LIST_HEAD(head);
271
272         spin_lock(&inode->i_lock);
273         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
274         pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
275         spin_unlock(&inode->i_lock);
276         pnfs_free_lseg_list(&head);
277         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
278                         iomode == IOMODE_RW ?  "RW" : "READ");
279 }
280
281 static bool
282 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
283 {
284         unsigned long start, end;
285         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
286
287         if (test_bit(fail_bit, &lo->plh_flags) == 0)
288                 return false;
289         end = jiffies;
290         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
291         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
292                 /* It is time to retry the failed layoutgets */
293                 pnfs_layout_clear_fail_bit(lo, fail_bit);
294                 return false;
295         }
296         return true;
297 }
298
299 static void
300 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
301 {
302         INIT_LIST_HEAD(&lseg->pls_list);
303         INIT_LIST_HEAD(&lseg->pls_lc_list);
304         atomic_set(&lseg->pls_refcount, 1);
305         smp_mb();
306         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
307         lseg->pls_layout = lo;
308 }
309
310 static void free_lseg(struct pnfs_layout_segment *lseg)
311 {
312         struct inode *ino = lseg->pls_layout->plh_inode;
313
314         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
315         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
316         pnfs_put_layout_hdr(NFS_I(ino)->layout);
317 }
318
319 static void
320 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
321                 struct pnfs_layout_segment *lseg)
322 {
323         struct inode *inode = lo->plh_inode;
324
325         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
326         list_del_init(&lseg->pls_list);
327         if (list_empty(&lo->plh_segs))
328                 set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags);
329         rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
330 }
331
332 void
333 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
334 {
335         struct pnfs_layout_hdr *lo;
336         struct inode *inode;
337
338         if (!lseg)
339                 return;
340
341         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
342                 atomic_read(&lseg->pls_refcount),
343                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
344         lo = lseg->pls_layout;
345         inode = lo->plh_inode;
346         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
347                 LIST_HEAD(free_me);
348
349                 pnfs_layout_remove_lseg(lo, lseg);
350                 spin_unlock(&inode->i_lock);
351                 list_add(&lseg->pls_list, &free_me);
352                 pnfs_free_lseg_list(&free_me);
353         }
354 }
355 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
356
357 static inline u64
358 end_offset(u64 start, u64 len)
359 {
360         u64 end;
361
362         end = start + len;
363         return end >= start ? end : NFS4_MAX_UINT64;
364 }
365
366 /* last octet in a range */
367 static inline u64
368 last_byte_offset(u64 start, u64 len)
369 {
370         u64 end;
371
372         BUG_ON(!len);
373         end = start + len;
374         return end > start ? end - 1 : NFS4_MAX_UINT64;
375 }
376
377 /*
378  * is l2 fully contained in l1?
379  *   start1                             end1
380  *   [----------------------------------)
381  *           start2           end2
382  *           [----------------)
383  */
384 static inline int
385 lo_seg_contained(struct pnfs_layout_range *l1,
386                  struct pnfs_layout_range *l2)
387 {
388         u64 start1 = l1->offset;
389         u64 end1 = end_offset(start1, l1->length);
390         u64 start2 = l2->offset;
391         u64 end2 = end_offset(start2, l2->length);
392
393         return (start1 <= start2) && (end1 >= end2);
394 }
395
396 /*
397  * is l1 and l2 intersecting?
398  *   start1                             end1
399  *   [----------------------------------)
400  *                              start2           end2
401  *                              [----------------)
402  */
403 static inline int
404 lo_seg_intersecting(struct pnfs_layout_range *l1,
405                     struct pnfs_layout_range *l2)
406 {
407         u64 start1 = l1->offset;
408         u64 end1 = end_offset(start1, l1->length);
409         u64 start2 = l2->offset;
410         u64 end2 = end_offset(start2, l2->length);
411
412         return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
413                (end2 == NFS4_MAX_UINT64 || end2 > start1);
414 }
415
416 static bool
417 should_free_lseg(struct pnfs_layout_range *lseg_range,
418                  struct pnfs_layout_range *recall_range)
419 {
420         return (recall_range->iomode == IOMODE_ANY ||
421                 lseg_range->iomode == recall_range->iomode) &&
422                lo_seg_intersecting(lseg_range, recall_range);
423 }
424
425 /* Returns 1 if lseg is removed from list, 0 otherwise */
426 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
427                              struct list_head *tmp_list)
428 {
429         int rv = 0;
430
431         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
432                 /* Remove the reference keeping the lseg in the
433                  * list.  It will now be removed when all
434                  * outstanding io is finished.
435                  */
436                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
437                         atomic_read(&lseg->pls_refcount));
438                 if (atomic_dec_and_test(&lseg->pls_refcount)) {
439                         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
440                         list_add(&lseg->pls_list, tmp_list);
441                         rv = 1;
442                 }
443         }
444         return rv;
445 }
446
447 /* Returns count of number of matching invalid lsegs remaining in list
448  * after call.
449  */
450 int
451 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
452                             struct list_head *tmp_list,
453                             struct pnfs_layout_range *recall_range)
454 {
455         struct pnfs_layout_segment *lseg, *next;
456         int invalid = 0, removed = 0;
457
458         dprintk("%s:Begin lo %p\n", __func__, lo);
459
460         if (list_empty(&lo->plh_segs)) {
461                 set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags);
462                 return 0;
463         }
464         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
465                 if (!recall_range ||
466                     should_free_lseg(&lseg->pls_range, recall_range)) {
467                         dprintk("%s: freeing lseg %p iomode %d "
468                                 "offset %llu length %llu\n", __func__,
469                                 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
470                                 lseg->pls_range.length);
471                         invalid++;
472                         removed += mark_lseg_invalid(lseg, tmp_list);
473                 }
474         dprintk("%s:Return %i\n", __func__, invalid - removed);
475         return invalid - removed;
476 }
477
478 /* note free_me must contain lsegs from a single layout_hdr */
479 void
480 pnfs_free_lseg_list(struct list_head *free_me)
481 {
482         struct pnfs_layout_segment *lseg, *tmp;
483         struct pnfs_layout_hdr *lo;
484
485         if (list_empty(free_me))
486                 return;
487
488         lo = list_first_entry(free_me, struct pnfs_layout_segment,
489                               pls_list)->pls_layout;
490
491         if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
492                 struct nfs_client *clp;
493
494                 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
495                 spin_lock(&clp->cl_lock);
496                 list_del_init(&lo->plh_layouts);
497                 spin_unlock(&clp->cl_lock);
498         }
499         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
500                 list_del(&lseg->pls_list);
501                 free_lseg(lseg);
502         }
503 }
504
505 void
506 pnfs_destroy_layout(struct nfs_inode *nfsi)
507 {
508         struct pnfs_layout_hdr *lo;
509         LIST_HEAD(tmp_list);
510
511         spin_lock(&nfsi->vfs_inode.i_lock);
512         lo = nfsi->layout;
513         if (lo) {
514                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
515                 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
516                 pnfs_get_layout_hdr(lo);
517                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
518                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
519                 spin_unlock(&nfsi->vfs_inode.i_lock);
520                 pnfs_free_lseg_list(&tmp_list);
521                 pnfs_put_layout_hdr(lo);
522         } else
523                 spin_unlock(&nfsi->vfs_inode.i_lock);
524 }
525 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
526
527 /*
528  * Called by the state manger to remove all layouts established under an
529  * expired lease.
530  */
531 void
532 pnfs_destroy_all_layouts(struct nfs_client *clp)
533 {
534         struct nfs_server *server;
535         struct pnfs_layout_hdr *lo;
536         LIST_HEAD(tmp_list);
537
538         nfs4_deviceid_mark_client_invalid(clp);
539         nfs4_deviceid_purge_client(clp);
540
541         spin_lock(&clp->cl_lock);
542         rcu_read_lock();
543         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
544                 if (!list_empty(&server->layouts))
545                         list_splice_init(&server->layouts, &tmp_list);
546         }
547         rcu_read_unlock();
548         spin_unlock(&clp->cl_lock);
549
550         while (!list_empty(&tmp_list)) {
551                 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
552                                 plh_layouts);
553                 dprintk("%s freeing layout for inode %lu\n", __func__,
554                         lo->plh_inode->i_ino);
555                 list_del_init(&lo->plh_layouts);
556                 pnfs_destroy_layout(NFS_I(lo->plh_inode));
557         }
558 }
559
560 /* update lo->plh_stateid with new if is more recent */
561 void
562 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
563                         bool update_barrier)
564 {
565         u32 oldseq, newseq;
566
567         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
568         newseq = be32_to_cpu(new->seqid);
569         if ((int)(newseq - oldseq) > 0) {
570                 nfs4_stateid_copy(&lo->plh_stateid, new);
571                 if (update_barrier) {
572                         u32 new_barrier = be32_to_cpu(new->seqid);
573
574                         if ((int)(new_barrier - lo->plh_barrier))
575                                 lo->plh_barrier = new_barrier;
576                 } else {
577                         /* Because of wraparound, we want to keep the barrier
578                          * "close" to the current seqids.  It needs to be
579                          * within 2**31 to count as "behind", so if it
580                          * gets too near that limit, give us a litle leeway
581                          * and bring it to within 2**30.
582                          * NOTE - and yes, this is all unsigned arithmetic.
583                          */
584                         if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
585                                 lo->plh_barrier = newseq - (1 << 30);
586                 }
587         }
588 }
589
590 /* lget is set to 1 if called from inside send_layoutget call chain */
591 static bool
592 pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
593                         int lget)
594 {
595         if ((stateid) &&
596             (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
597                 return true;
598         return lo->plh_block_lgets ||
599                 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
600                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
601                 (list_empty(&lo->plh_segs) &&
602                  (atomic_read(&lo->plh_outstanding) > lget));
603 }
604
605 int
606 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
607                               struct nfs4_state *open_state)
608 {
609         int status = 0;
610
611         dprintk("--> %s\n", __func__);
612         spin_lock(&lo->plh_inode->i_lock);
613         if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
614                 status = -EAGAIN;
615         } else if (list_empty(&lo->plh_segs)) {
616                 int seq;
617
618                 do {
619                         seq = read_seqbegin(&open_state->seqlock);
620                         nfs4_stateid_copy(dst, &open_state->stateid);
621                 } while (read_seqretry(&open_state->seqlock, seq));
622         } else
623                 nfs4_stateid_copy(dst, &lo->plh_stateid);
624         spin_unlock(&lo->plh_inode->i_lock);
625         dprintk("<-- %s\n", __func__);
626         return status;
627 }
628
629 /*
630 * Get layout from server.
631 *    for now, assume that whole file layouts are requested.
632 *    arg->offset: 0
633 *    arg->length: all ones
634 */
635 static struct pnfs_layout_segment *
636 send_layoutget(struct pnfs_layout_hdr *lo,
637            struct nfs_open_context *ctx,
638            struct pnfs_layout_range *range,
639            gfp_t gfp_flags)
640 {
641         struct inode *ino = lo->plh_inode;
642         struct nfs_server *server = NFS_SERVER(ino);
643         struct nfs4_layoutget *lgp;
644         struct pnfs_layout_segment *lseg;
645
646         dprintk("--> %s\n", __func__);
647
648         BUG_ON(ctx == NULL);
649         lgp = kzalloc(sizeof(*lgp), gfp_flags);
650         if (lgp == NULL)
651                 return NULL;
652
653         lgp->args.minlength = PAGE_CACHE_SIZE;
654         if (lgp->args.minlength > range->length)
655                 lgp->args.minlength = range->length;
656         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
657         lgp->args.range = *range;
658         lgp->args.type = server->pnfs_curr_ld->id;
659         lgp->args.inode = ino;
660         lgp->args.ctx = get_nfs_open_context(ctx);
661         lgp->gfp_flags = gfp_flags;
662
663         /* Synchronously retrieve layout information from server and
664          * store in lseg.
665          */
666         lseg = nfs4_proc_layoutget(lgp, gfp_flags);
667         if (IS_ERR(lseg)) {
668                 switch (PTR_ERR(lseg)) {
669                 case -ENOMEM:
670                 case -ERESTARTSYS:
671                         break;
672                 default:
673                         /* remember that LAYOUTGET failed and suspend trying */
674                         pnfs_layout_io_set_failed(lo, range->iomode);
675                 }
676                 return NULL;
677         }
678
679         return lseg;
680 }
681
682 /*
683  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
684  * when the layout segment list is empty.
685  *
686  * Note that a pnfs_layout_hdr can exist with an empty layout segment
687  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
688  * deviceid is marked invalid.
689  */
690 int
691 _pnfs_return_layout(struct inode *ino)
692 {
693         struct pnfs_layout_hdr *lo = NULL;
694         struct nfs_inode *nfsi = NFS_I(ino);
695         LIST_HEAD(tmp_list);
696         struct nfs4_layoutreturn *lrp;
697         nfs4_stateid stateid;
698         int status = 0, empty;
699
700         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
701
702         spin_lock(&ino->i_lock);
703         lo = nfsi->layout;
704         if (!lo || pnfs_test_layout_returned(lo)) {
705                 spin_unlock(&ino->i_lock);
706                 dprintk("NFS: %s no layout to return\n", __func__);
707                 goto out;
708         }
709         stateid = nfsi->layout->plh_stateid;
710         /* Reference matched in nfs4_layoutreturn_release */
711         pnfs_get_layout_hdr(lo);
712         empty = list_empty(&lo->plh_segs);
713         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
714         /* Don't send a LAYOUTRETURN if list was initially empty */
715         if (empty) {
716                 spin_unlock(&ino->i_lock);
717                 pnfs_put_layout_hdr(lo);
718                 dprintk("NFS: %s no layout segments to return\n", __func__);
719                 goto out;
720         }
721         lo->plh_block_lgets++;
722         pnfs_mark_layout_returned(lo);
723         spin_unlock(&ino->i_lock);
724         pnfs_free_lseg_list(&tmp_list);
725
726         WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
727
728         lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
729         if (unlikely(lrp == NULL)) {
730                 status = -ENOMEM;
731                 pnfs_layout_io_set_failed(lo, IOMODE_RW);
732                 pnfs_layout_io_set_failed(lo, IOMODE_READ);
733                 pnfs_clear_layout_returned(lo);
734                 pnfs_put_layout_hdr(lo);
735                 goto out;
736         }
737
738         lrp->args.stateid = stateid;
739         lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
740         lrp->args.inode = ino;
741         lrp->args.layout = lo;
742         lrp->clp = NFS_SERVER(ino)->nfs_client;
743
744         status = nfs4_proc_layoutreturn(lrp);
745 out:
746         dprintk("<-- %s status: %d\n", __func__, status);
747         return status;
748 }
749 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
750
751 bool pnfs_roc(struct inode *ino)
752 {
753         struct pnfs_layout_hdr *lo;
754         struct pnfs_layout_segment *lseg, *tmp;
755         LIST_HEAD(tmp_list);
756         bool found = false;
757
758         spin_lock(&ino->i_lock);
759         lo = NFS_I(ino)->layout;
760         if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
761             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
762                 goto out_nolayout;
763         list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
764                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
765                         mark_lseg_invalid(lseg, &tmp_list);
766                         found = true;
767                 }
768         if (!found)
769                 goto out_nolayout;
770         lo->plh_block_lgets++;
771         pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
772         spin_unlock(&ino->i_lock);
773         pnfs_free_lseg_list(&tmp_list);
774         return true;
775
776 out_nolayout:
777         spin_unlock(&ino->i_lock);
778         return false;
779 }
780
781 void pnfs_roc_release(struct inode *ino)
782 {
783         struct pnfs_layout_hdr *lo;
784
785         spin_lock(&ino->i_lock);
786         lo = NFS_I(ino)->layout;
787         lo->plh_block_lgets--;
788         if (atomic_dec_and_test(&lo->plh_refcount)) {
789                 pnfs_detach_layout_hdr(lo);
790                 spin_unlock(&ino->i_lock);
791                 pnfs_free_layout_hdr(lo);
792         } else
793                 spin_unlock(&ino->i_lock);
794 }
795
796 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
797 {
798         struct pnfs_layout_hdr *lo;
799
800         spin_lock(&ino->i_lock);
801         lo = NFS_I(ino)->layout;
802         if ((int)(barrier - lo->plh_barrier) > 0)
803                 lo->plh_barrier = barrier;
804         spin_unlock(&ino->i_lock);
805 }
806
807 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
808 {
809         struct nfs_inode *nfsi = NFS_I(ino);
810         struct pnfs_layout_hdr *lo;
811         struct pnfs_layout_segment *lseg;
812         u32 current_seqid;
813         bool found = false;
814
815         spin_lock(&ino->i_lock);
816         list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
817                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
818                         rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
819                         found = true;
820                         goto out;
821                 }
822         lo = nfsi->layout;
823         current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
824
825         /* Since close does not return a layout stateid for use as
826          * a barrier, we choose the worst-case barrier.
827          */
828         *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
829 out:
830         spin_unlock(&ino->i_lock);
831         return found;
832 }
833
834 /*
835  * Compare two layout segments for sorting into layout cache.
836  * We want to preferentially return RW over RO layouts, so ensure those
837  * are seen first.
838  */
839 static s64
840 cmp_layout(struct pnfs_layout_range *l1,
841            struct pnfs_layout_range *l2)
842 {
843         s64 d;
844
845         /* high offset > low offset */
846         d = l1->offset - l2->offset;
847         if (d)
848                 return d;
849
850         /* short length > long length */
851         d = l2->length - l1->length;
852         if (d)
853                 return d;
854
855         /* read > read/write */
856         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
857 }
858
859 static void
860 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
861                    struct pnfs_layout_segment *lseg)
862 {
863         struct pnfs_layout_segment *lp;
864
865         dprintk("%s:Begin\n", __func__);
866
867         assert_spin_locked(&lo->plh_inode->i_lock);
868         list_for_each_entry(lp, &lo->plh_segs, pls_list) {
869                 if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
870                         continue;
871                 list_add_tail(&lseg->pls_list, &lp->pls_list);
872                 dprintk("%s: inserted lseg %p "
873                         "iomode %d offset %llu length %llu before "
874                         "lp %p iomode %d offset %llu length %llu\n",
875                         __func__, lseg, lseg->pls_range.iomode,
876                         lseg->pls_range.offset, lseg->pls_range.length,
877                         lp, lp->pls_range.iomode, lp->pls_range.offset,
878                         lp->pls_range.length);
879                 goto out;
880         }
881         list_add_tail(&lseg->pls_list, &lo->plh_segs);
882         dprintk("%s: inserted lseg %p "
883                 "iomode %d offset %llu length %llu at tail\n",
884                 __func__, lseg, lseg->pls_range.iomode,
885                 lseg->pls_range.offset, lseg->pls_range.length);
886 out:
887         pnfs_get_layout_hdr(lo);
888
889         dprintk("%s:Return\n", __func__);
890 }
891
892 static struct pnfs_layout_hdr *
893 alloc_init_layout_hdr(struct inode *ino,
894                       struct nfs_open_context *ctx,
895                       gfp_t gfp_flags)
896 {
897         struct pnfs_layout_hdr *lo;
898
899         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
900         if (!lo)
901                 return NULL;
902         atomic_set(&lo->plh_refcount, 1);
903         INIT_LIST_HEAD(&lo->plh_layouts);
904         INIT_LIST_HEAD(&lo->plh_segs);
905         INIT_LIST_HEAD(&lo->plh_bulk_recall);
906         lo->plh_inode = ino;
907         lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
908         return lo;
909 }
910
911 static struct pnfs_layout_hdr *
912 pnfs_find_alloc_layout(struct inode *ino,
913                        struct nfs_open_context *ctx,
914                        gfp_t gfp_flags)
915 {
916         struct nfs_inode *nfsi = NFS_I(ino);
917         struct pnfs_layout_hdr *new = NULL;
918
919         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
920
921         assert_spin_locked(&ino->i_lock);
922         if (nfsi->layout) {
923                 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
924                         return NULL;
925                 pnfs_get_layout_hdr(nfsi->layout);
926                 return nfsi->layout;
927         }
928         spin_unlock(&ino->i_lock);
929         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
930         spin_lock(&ino->i_lock);
931
932         if (likely(nfsi->layout == NULL))       /* Won the race? */
933                 nfsi->layout = new;
934         else
935                 pnfs_free_layout_hdr(new);
936         return nfsi->layout;
937 }
938
939 /*
940  * iomode matching rules:
941  * iomode       lseg    match
942  * -----        -----   -----
943  * ANY          READ    true
944  * ANY          RW      true
945  * RW           READ    false
946  * RW           RW      true
947  * READ         READ    true
948  * READ         RW      true
949  */
950 static int
951 is_matching_lseg(struct pnfs_layout_range *ls_range,
952                  struct pnfs_layout_range *range)
953 {
954         struct pnfs_layout_range range1;
955
956         if ((range->iomode == IOMODE_RW &&
957              ls_range->iomode != IOMODE_RW) ||
958             !lo_seg_intersecting(ls_range, range))
959                 return 0;
960
961         /* range1 covers only the first byte in the range */
962         range1 = *range;
963         range1.length = 1;
964         return lo_seg_contained(ls_range, &range1);
965 }
966
967 /*
968  * lookup range in layout
969  */
970 static struct pnfs_layout_segment *
971 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
972                 struct pnfs_layout_range *range)
973 {
974         struct pnfs_layout_segment *lseg, *ret = NULL;
975
976         dprintk("%s:Begin\n", __func__);
977
978         assert_spin_locked(&lo->plh_inode->i_lock);
979         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
980                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
981                     is_matching_lseg(&lseg->pls_range, range)) {
982                         ret = pnfs_get_lseg(lseg);
983                         break;
984                 }
985                 if (lseg->pls_range.offset > range->offset)
986                         break;
987         }
988
989         dprintk("%s:Return lseg %p ref %d\n",
990                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
991         return ret;
992 }
993
994 /*
995  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
996  * to the MDS or over pNFS
997  *
998  * The nfs_inode read_io and write_io fields are cumulative counters reset
999  * when there are no layout segments. Note that in pnfs_update_layout iomode
1000  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1001  * WRITE request.
1002  *
1003  * A return of true means use MDS I/O.
1004  *
1005  * From rfc 5661:
1006  * If a file's size is smaller than the file size threshold, data accesses
1007  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1008  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1009  * server.  If both file size and I/O size are provided, the client SHOULD
1010  * reach or exceed  both thresholds before sending its read or write
1011  * requests to the data server.
1012  */
1013 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1014                                      struct inode *ino, int iomode)
1015 {
1016         struct nfs4_threshold *t = ctx->mdsthreshold;
1017         struct nfs_inode *nfsi = NFS_I(ino);
1018         loff_t fsize = i_size_read(ino);
1019         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1020
1021         if (t == NULL)
1022                 return ret;
1023
1024         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1025                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1026
1027         switch (iomode) {
1028         case IOMODE_READ:
1029                 if (t->bm & THRESHOLD_RD) {
1030                         dprintk("%s fsize %llu\n", __func__, fsize);
1031                         size_set = true;
1032                         if (fsize < t->rd_sz)
1033                                 size = true;
1034                 }
1035                 if (t->bm & THRESHOLD_RD_IO) {
1036                         dprintk("%s nfsi->read_io %llu\n", __func__,
1037                                 nfsi->read_io);
1038                         io_set = true;
1039                         if (nfsi->read_io < t->rd_io_sz)
1040                                 io = true;
1041                 }
1042                 break;
1043         case IOMODE_RW:
1044                 if (t->bm & THRESHOLD_WR) {
1045                         dprintk("%s fsize %llu\n", __func__, fsize);
1046                         size_set = true;
1047                         if (fsize < t->wr_sz)
1048                                 size = true;
1049                 }
1050                 if (t->bm & THRESHOLD_WR_IO) {
1051                         dprintk("%s nfsi->write_io %llu\n", __func__,
1052                                 nfsi->write_io);
1053                         io_set = true;
1054                         if (nfsi->write_io < t->wr_io_sz)
1055                                 io = true;
1056                 }
1057                 break;
1058         }
1059         if (size_set && io_set) {
1060                 if (size && io)
1061                         ret = true;
1062         } else if (size || io)
1063                 ret = true;
1064
1065         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1066         return ret;
1067 }
1068
1069 /*
1070  * Layout segment is retreived from the server if not cached.
1071  * The appropriate layout segment is referenced and returned to the caller.
1072  */
1073 struct pnfs_layout_segment *
1074 pnfs_update_layout(struct inode *ino,
1075                    struct nfs_open_context *ctx,
1076                    loff_t pos,
1077                    u64 count,
1078                    enum pnfs_iomode iomode,
1079                    gfp_t gfp_flags)
1080 {
1081         struct pnfs_layout_range arg = {
1082                 .iomode = iomode,
1083                 .offset = pos,
1084                 .length = count,
1085         };
1086         unsigned pg_offset;
1087         struct nfs_server *server = NFS_SERVER(ino);
1088         struct nfs_client *clp = server->nfs_client;
1089         struct pnfs_layout_hdr *lo;
1090         struct pnfs_layout_segment *lseg = NULL;
1091         bool first = false;
1092
1093         if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1094                 goto out;
1095
1096         if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1097                 goto out;
1098
1099         spin_lock(&ino->i_lock);
1100         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1101         if (lo == NULL) {
1102                 spin_unlock(&ino->i_lock);
1103                 goto out;
1104         }
1105
1106         /* Do we even need to bother with this? */
1107         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1108                 dprintk("%s matches recall, use MDS\n", __func__);
1109                 goto out_unlock;
1110         }
1111
1112         /* if LAYOUTGET already failed once we don't try again */
1113         if (pnfs_layout_io_test_failed(lo, iomode))
1114                 goto out_unlock;
1115
1116         /* Check to see if the layout for the given range already exists */
1117         lseg = pnfs_find_lseg(lo, &arg);
1118         if (lseg)
1119                 goto out_unlock;
1120
1121         if (pnfs_layoutgets_blocked(lo, NULL, 0))
1122                 goto out_unlock;
1123         atomic_inc(&lo->plh_outstanding);
1124
1125         if (list_empty(&lo->plh_segs))
1126                 first = true;
1127
1128         /* Enable LAYOUTRETURNs */
1129         pnfs_clear_layout_returned(lo);
1130
1131         spin_unlock(&ino->i_lock);
1132         if (first) {
1133                 /* The lo must be on the clp list if there is any
1134                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1135                  */
1136                 spin_lock(&clp->cl_lock);
1137                 BUG_ON(!list_empty(&lo->plh_layouts));
1138                 list_add_tail(&lo->plh_layouts, &server->layouts);
1139                 spin_unlock(&clp->cl_lock);
1140         }
1141
1142         pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1143         if (pg_offset) {
1144                 arg.offset -= pg_offset;
1145                 arg.length += pg_offset;
1146         }
1147         if (arg.length != NFS4_MAX_UINT64)
1148                 arg.length = PAGE_CACHE_ALIGN(arg.length);
1149
1150         lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1151         if (!lseg && first) {
1152                 spin_lock(&clp->cl_lock);
1153                 list_del_init(&lo->plh_layouts);
1154                 spin_unlock(&clp->cl_lock);
1155         }
1156         atomic_dec(&lo->plh_outstanding);
1157 out_put_layout_hdr:
1158         pnfs_put_layout_hdr(lo);
1159 out:
1160         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1161                         "(%s, offset: %llu, length: %llu)\n",
1162                         __func__, ino->i_sb->s_id,
1163                         (unsigned long long)NFS_FILEID(ino),
1164                         lseg == NULL ? "not found" : "found",
1165                         iomode==IOMODE_RW ?  "read/write" : "read-only",
1166                         (unsigned long long)pos,
1167                         (unsigned long long)count);
1168         return lseg;
1169 out_unlock:
1170         spin_unlock(&ino->i_lock);
1171         goto out_put_layout_hdr;
1172 }
1173 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1174
1175 struct pnfs_layout_segment *
1176 pnfs_layout_process(struct nfs4_layoutget *lgp)
1177 {
1178         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1179         struct nfs4_layoutget_res *res = &lgp->res;
1180         struct pnfs_layout_segment *lseg;
1181         struct inode *ino = lo->plh_inode;
1182         int status = 0;
1183
1184         /* Inject layout blob into I/O device driver */
1185         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1186         if (!lseg || IS_ERR(lseg)) {
1187                 if (!lseg)
1188                         status = -ENOMEM;
1189                 else
1190                         status = PTR_ERR(lseg);
1191                 dprintk("%s: Could not allocate layout: error %d\n",
1192                        __func__, status);
1193                 goto out;
1194         }
1195
1196         spin_lock(&ino->i_lock);
1197         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1198                 dprintk("%s forget reply due to recall\n", __func__);
1199                 goto out_forget_reply;
1200         }
1201
1202         if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
1203                 dprintk("%s forget reply due to state\n", __func__);
1204                 goto out_forget_reply;
1205         }
1206         init_lseg(lo, lseg);
1207         lseg->pls_range = res->range;
1208         pnfs_get_lseg(lseg);
1209         pnfs_layout_insert_lseg(lo, lseg);
1210
1211         if (res->return_on_close) {
1212                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1213                 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1214         }
1215
1216         /* Done processing layoutget. Set the layout stateid */
1217         pnfs_set_layout_stateid(lo, &res->stateid, false);
1218         spin_unlock(&ino->i_lock);
1219         return lseg;
1220 out:
1221         return ERR_PTR(status);
1222
1223 out_forget_reply:
1224         spin_unlock(&ino->i_lock);
1225         lseg->pls_layout = lo;
1226         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1227         goto out;
1228 }
1229
1230 void
1231 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1232 {
1233         BUG_ON(pgio->pg_lseg != NULL);
1234
1235         if (req->wb_offset != req->wb_pgbase) {
1236                 nfs_pageio_reset_read_mds(pgio);
1237                 return;
1238         }
1239         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1240                                            req->wb_context,
1241                                            req_offset(req),
1242                                            req->wb_bytes,
1243                                            IOMODE_READ,
1244                                            GFP_KERNEL);
1245         /* If no lseg, fall back to read through mds */
1246         if (pgio->pg_lseg == NULL)
1247                 nfs_pageio_reset_read_mds(pgio);
1248
1249 }
1250 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1251
1252 void
1253 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1254 {
1255         BUG_ON(pgio->pg_lseg != NULL);
1256
1257         if (req->wb_offset != req->wb_pgbase) {
1258                 nfs_pageio_reset_write_mds(pgio);
1259                 return;
1260         }
1261         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1262                                            req->wb_context,
1263                                            req_offset(req),
1264                                            req->wb_bytes,
1265                                            IOMODE_RW,
1266                                            GFP_NOFS);
1267         /* If no lseg, fall back to write through mds */
1268         if (pgio->pg_lseg == NULL)
1269                 nfs_pageio_reset_write_mds(pgio);
1270 }
1271 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1272
1273 void
1274 pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1275                       const struct nfs_pgio_completion_ops *compl_ops)
1276 {
1277         struct nfs_server *server = NFS_SERVER(inode);
1278         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1279
1280         if (ld == NULL)
1281                 nfs_pageio_init_read(pgio, inode, compl_ops);
1282         else
1283                 nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
1284 }
1285
1286 void
1287 pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1288                        int ioflags,
1289                        const struct nfs_pgio_completion_ops *compl_ops)
1290 {
1291         struct nfs_server *server = NFS_SERVER(inode);
1292         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1293
1294         if (ld == NULL)
1295                 nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
1296         else
1297                 nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
1298 }
1299
1300 bool
1301 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1302                      struct nfs_page *req)
1303 {
1304         if (pgio->pg_lseg == NULL)
1305                 return nfs_generic_pg_test(pgio, prev, req);
1306
1307         /*
1308          * Test if a nfs_page is fully contained in the pnfs_layout_range.
1309          * Note that this test makes several assumptions:
1310          * - that the previous nfs_page in the struct nfs_pageio_descriptor
1311          *   is known to lie within the range.
1312          *   - that the nfs_page being tested is known to be contiguous with the
1313          *   previous nfs_page.
1314          *   - Layout ranges are page aligned, so we only have to test the
1315          *   start offset of the request.
1316          *
1317          * Please also note that 'end_offset' is actually the offset of the
1318          * first byte that lies outside the pnfs_layout_range. FIXME?
1319          *
1320          */
1321         return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1322                                          pgio->pg_lseg->pls_range.length);
1323 }
1324 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1325
1326 int pnfs_write_done_resend_to_mds(struct inode *inode,
1327                                 struct list_head *head,
1328                                 const struct nfs_pgio_completion_ops *compl_ops)
1329 {
1330         struct nfs_pageio_descriptor pgio;
1331         LIST_HEAD(failed);
1332
1333         /* Resend all requests through the MDS */
1334         nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
1335         while (!list_empty(head)) {
1336                 struct nfs_page *req = nfs_list_entry(head->next);
1337
1338                 nfs_list_remove_request(req);
1339                 if (!nfs_pageio_add_request(&pgio, req))
1340                         nfs_list_add_request(req, &failed);
1341         }
1342         nfs_pageio_complete(&pgio);
1343
1344         if (!list_empty(&failed)) {
1345                 /* For some reason our attempt to resend pages. Mark the
1346                  * overall send request as having failed, and let
1347                  * nfs_writeback_release_full deal with the error.
1348                  */
1349                 list_move(&failed, head);
1350                 return -EIO;
1351         }
1352         return 0;
1353 }
1354 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1355
1356 static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1357 {
1358         struct nfs_pgio_header *hdr = data->header;
1359
1360         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1361         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1362             PNFS_LAYOUTRET_ON_ERROR) {
1363                 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1364                 pnfs_return_layout(hdr->inode);
1365         }
1366         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1367                 data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
1368                                                         &hdr->pages,
1369                                                         hdr->completion_ops);
1370 }
1371
1372 /*
1373  * Called by non rpc-based layout drivers
1374  */
1375 void pnfs_ld_write_done(struct nfs_write_data *data)
1376 {
1377         struct nfs_pgio_header *hdr = data->header;
1378
1379         if (!hdr->pnfs_error) {
1380                 pnfs_set_layoutcommit(data);
1381                 hdr->mds_ops->rpc_call_done(&data->task, data);
1382         } else
1383                 pnfs_ld_handle_write_error(data);
1384         hdr->mds_ops->rpc_release(data);
1385 }
1386 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1387
1388 static void
1389 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1390                 struct nfs_write_data *data)
1391 {
1392         struct nfs_pgio_header *hdr = data->header;
1393
1394         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1395                 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1396                 nfs_pageio_reset_write_mds(desc);
1397                 desc->pg_recoalesce = 1;
1398         }
1399         nfs_writedata_release(data);
1400 }
1401
1402 static enum pnfs_try_status
1403 pnfs_try_to_write_data(struct nfs_write_data *wdata,
1404                         const struct rpc_call_ops *call_ops,
1405                         struct pnfs_layout_segment *lseg,
1406                         int how)
1407 {
1408         struct nfs_pgio_header *hdr = wdata->header;
1409         struct inode *inode = hdr->inode;
1410         enum pnfs_try_status trypnfs;
1411         struct nfs_server *nfss = NFS_SERVER(inode);
1412
1413         hdr->mds_ops = call_ops;
1414
1415         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1416                 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1417         trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1418         if (trypnfs != PNFS_NOT_ATTEMPTED)
1419                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1420         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1421         return trypnfs;
1422 }
1423
1424 static void
1425 pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
1426 {
1427         struct nfs_write_data *data;
1428         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1429         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1430
1431         desc->pg_lseg = NULL;
1432         while (!list_empty(head)) {
1433                 enum pnfs_try_status trypnfs;
1434
1435                 data = list_first_entry(head, struct nfs_write_data, list);
1436                 list_del_init(&data->list);
1437
1438                 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
1439                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1440                         pnfs_write_through_mds(desc, data);
1441         }
1442         pnfs_put_lseg(lseg);
1443 }
1444
1445 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1446 {
1447         pnfs_put_lseg(hdr->lseg);
1448         nfs_writehdr_free(hdr);
1449 }
1450 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1451
1452 int
1453 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1454 {
1455         struct nfs_write_header *whdr;
1456         struct nfs_pgio_header *hdr;
1457         int ret;
1458
1459         whdr = nfs_writehdr_alloc();
1460         if (!whdr) {
1461                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1462                 pnfs_put_lseg(desc->pg_lseg);
1463                 desc->pg_lseg = NULL;
1464                 return -ENOMEM;
1465         }
1466         hdr = &whdr->header;
1467         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1468         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1469         atomic_inc(&hdr->refcnt);
1470         ret = nfs_generic_flush(desc, hdr);
1471         if (ret != 0) {
1472                 pnfs_put_lseg(desc->pg_lseg);
1473                 desc->pg_lseg = NULL;
1474         } else
1475                 pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
1476         if (atomic_dec_and_test(&hdr->refcnt))
1477                 hdr->completion_ops->completion(hdr);
1478         return ret;
1479 }
1480 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1481
1482 int pnfs_read_done_resend_to_mds(struct inode *inode,
1483                                 struct list_head *head,
1484                                 const struct nfs_pgio_completion_ops *compl_ops)
1485 {
1486         struct nfs_pageio_descriptor pgio;
1487         LIST_HEAD(failed);
1488
1489         /* Resend all requests through the MDS */
1490         nfs_pageio_init_read(&pgio, inode, compl_ops);
1491         while (!list_empty(head)) {
1492                 struct nfs_page *req = nfs_list_entry(head->next);
1493
1494                 nfs_list_remove_request(req);
1495                 if (!nfs_pageio_add_request(&pgio, req))
1496                         nfs_list_add_request(req, &failed);
1497         }
1498         nfs_pageio_complete(&pgio);
1499
1500         if (!list_empty(&failed)) {
1501                 list_move(&failed, head);
1502                 return -EIO;
1503         }
1504         return 0;
1505 }
1506 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1507
1508 static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1509 {
1510         struct nfs_pgio_header *hdr = data->header;
1511
1512         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1513         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1514             PNFS_LAYOUTRET_ON_ERROR) {
1515                 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1516                 pnfs_return_layout(hdr->inode);
1517         }
1518         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1519                 data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
1520                                                         &hdr->pages,
1521                                                         hdr->completion_ops);
1522 }
1523
1524 /*
1525  * Called by non rpc-based layout drivers
1526  */
1527 void pnfs_ld_read_done(struct nfs_read_data *data)
1528 {
1529         struct nfs_pgio_header *hdr = data->header;
1530
1531         if (likely(!hdr->pnfs_error)) {
1532                 __nfs4_read_done_cb(data);
1533                 hdr->mds_ops->rpc_call_done(&data->task, data);
1534         } else
1535                 pnfs_ld_handle_read_error(data);
1536         hdr->mds_ops->rpc_release(data);
1537 }
1538 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1539
1540 static void
1541 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1542                 struct nfs_read_data *data)
1543 {
1544         struct nfs_pgio_header *hdr = data->header;
1545
1546         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1547                 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1548                 nfs_pageio_reset_read_mds(desc);
1549                 desc->pg_recoalesce = 1;
1550         }
1551         nfs_readdata_release(data);
1552 }
1553
1554 /*
1555  * Call the appropriate parallel I/O subsystem read function.
1556  */
1557 static enum pnfs_try_status
1558 pnfs_try_to_read_data(struct nfs_read_data *rdata,
1559                        const struct rpc_call_ops *call_ops,
1560                        struct pnfs_layout_segment *lseg)
1561 {
1562         struct nfs_pgio_header *hdr = rdata->header;
1563         struct inode *inode = hdr->inode;
1564         struct nfs_server *nfss = NFS_SERVER(inode);
1565         enum pnfs_try_status trypnfs;
1566
1567         hdr->mds_ops = call_ops;
1568
1569         dprintk("%s: Reading ino:%lu %u@%llu\n",
1570                 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1571
1572         trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1573         if (trypnfs != PNFS_NOT_ATTEMPTED)
1574                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1575         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1576         return trypnfs;
1577 }
1578
1579 static void
1580 pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
1581 {
1582         struct nfs_read_data *data;
1583         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1584         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1585
1586         desc->pg_lseg = NULL;
1587         while (!list_empty(head)) {
1588                 enum pnfs_try_status trypnfs;
1589
1590                 data = list_first_entry(head, struct nfs_read_data, list);
1591                 list_del_init(&data->list);
1592
1593                 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
1594                 if (trypnfs == PNFS_NOT_ATTEMPTED)
1595                         pnfs_read_through_mds(desc, data);
1596         }
1597         pnfs_put_lseg(lseg);
1598 }
1599
1600 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1601 {
1602         pnfs_put_lseg(hdr->lseg);
1603         nfs_readhdr_free(hdr);
1604 }
1605 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1606
1607 int
1608 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1609 {
1610         struct nfs_read_header *rhdr;
1611         struct nfs_pgio_header *hdr;
1612         int ret;
1613
1614         rhdr = nfs_readhdr_alloc();
1615         if (!rhdr) {
1616                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1617                 ret = -ENOMEM;
1618                 pnfs_put_lseg(desc->pg_lseg);
1619                 desc->pg_lseg = NULL;
1620                 return ret;
1621         }
1622         hdr = &rhdr->header;
1623         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1624         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1625         atomic_inc(&hdr->refcnt);
1626         ret = nfs_generic_pagein(desc, hdr);
1627         if (ret != 0) {
1628                 pnfs_put_lseg(desc->pg_lseg);
1629                 desc->pg_lseg = NULL;
1630         } else
1631                 pnfs_do_multiple_reads(desc, &hdr->rpc_list);
1632         if (atomic_dec_and_test(&hdr->refcnt))
1633                 hdr->completion_ops->completion(hdr);
1634         return ret;
1635 }
1636 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1637
1638 /*
1639  * There can be multiple RW segments.
1640  */
1641 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1642 {
1643         struct pnfs_layout_segment *lseg;
1644
1645         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1646                 if (lseg->pls_range.iomode == IOMODE_RW &&
1647                     test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1648                         list_add(&lseg->pls_lc_list, listp);
1649         }
1650 }
1651
1652 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1653 {
1654         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1655 }
1656 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1657
1658 void
1659 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1660 {
1661         struct nfs_pgio_header *hdr = wdata->header;
1662         struct inode *inode = hdr->inode;
1663         struct nfs_inode *nfsi = NFS_I(inode);
1664         loff_t end_pos = wdata->mds_offset + wdata->res.count;
1665         bool mark_as_dirty = false;
1666
1667         spin_lock(&inode->i_lock);
1668         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1669                 mark_as_dirty = true;
1670                 dprintk("%s: Set layoutcommit for inode %lu ",
1671                         __func__, inode->i_ino);
1672         }
1673         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1674                 /* references matched in nfs4_layoutcommit_release */
1675                 pnfs_get_lseg(hdr->lseg);
1676         }
1677         if (end_pos > nfsi->layout->plh_lwb)
1678                 nfsi->layout->plh_lwb = end_pos;
1679         spin_unlock(&inode->i_lock);
1680         dprintk("%s: lseg %p end_pos %llu\n",
1681                 __func__, hdr->lseg, nfsi->layout->plh_lwb);
1682
1683         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1684          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1685         if (mark_as_dirty)
1686                 mark_inode_dirty_sync(inode);
1687 }
1688 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1689
1690 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1691 {
1692         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1693
1694         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1695                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1696 }
1697
1698 /*
1699  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1700  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1701  * data to disk to allow the server to recover the data if it crashes.
1702  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1703  * is off, and a COMMIT is sent to a data server, or
1704  * if WRITEs to a data server return NFS_DATA_SYNC.
1705  */
1706 int
1707 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1708 {
1709         struct nfs4_layoutcommit_data *data;
1710         struct nfs_inode *nfsi = NFS_I(inode);
1711         loff_t end_pos;
1712         int status = 0;
1713
1714         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1715
1716         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1717                 return 0;
1718
1719         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1720         data = kzalloc(sizeof(*data), GFP_NOFS);
1721         if (!data) {
1722                 status = -ENOMEM;
1723                 goto out;
1724         }
1725
1726         if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1727                 goto out_free;
1728
1729         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1730                 if (!sync) {
1731                         status = -EAGAIN;
1732                         goto out_free;
1733                 }
1734                 status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
1735                                         nfs_wait_bit_killable, TASK_KILLABLE);
1736                 if (status)
1737                         goto out_free;
1738         }
1739
1740         INIT_LIST_HEAD(&data->lseg_list);
1741         spin_lock(&inode->i_lock);
1742         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1743                 clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
1744                 spin_unlock(&inode->i_lock);
1745                 wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
1746                 goto out_free;
1747         }
1748
1749         pnfs_list_write_lseg(inode, &data->lseg_list);
1750
1751         end_pos = nfsi->layout->plh_lwb;
1752         nfsi->layout->plh_lwb = 0;
1753
1754         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1755         spin_unlock(&inode->i_lock);
1756
1757         data->args.inode = inode;
1758         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1759         nfs_fattr_init(&data->fattr);
1760         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1761         data->res.fattr = &data->fattr;
1762         data->args.lastbytewritten = end_pos - 1;
1763         data->res.server = NFS_SERVER(inode);
1764
1765         status = nfs4_proc_layoutcommit(data, sync);
1766 out:
1767         if (status)
1768                 mark_inode_dirty_sync(inode);
1769         dprintk("<-- %s status %d\n", __func__, status);
1770         return status;
1771 out_free:
1772         kfree(data);
1773         goto out;
1774 }
1775
1776 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1777 {
1778         struct nfs4_threshold *thp;
1779
1780         thp = kzalloc(sizeof(*thp), GFP_NOFS);
1781         if (!thp) {
1782                 dprintk("%s mdsthreshold allocation failed\n", __func__);
1783                 return NULL;
1784         }
1785         return thp;
1786 }