2 * linux/fs/nfs/blocklayout/blocklayout.c
4 * Module for the NFSv4.1 pNFS block layout driver.
6 * Copyright (c) 2006 The Regents of the University of Michigan.
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/buffer_head.h> /* various write calls */
39 #include <linux/prefetch.h>
41 #include "blocklayout.h"
43 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
49 struct dentry *bl_device_pipe;
50 wait_queue_head_t bl_wq;
52 static void print_page(struct page *page)
54 dprintk("PRINTPAGE page %p\n", page);
55 dprintk(" PagePrivate %d\n", PagePrivate(page));
56 dprintk(" PageUptodate %d\n", PageUptodate(page));
57 dprintk(" PageError %d\n", PageError(page));
58 dprintk(" PageDirty %d\n", PageDirty(page));
59 dprintk(" PageReferenced %d\n", PageReferenced(page));
60 dprintk(" PageLocked %d\n", PageLocked(page));
61 dprintk(" PageWriteback %d\n", PageWriteback(page));
62 dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
66 /* Given the be associated with isect, determine if page data needs to be
69 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
71 if (be->be_state == PNFS_BLOCK_NONE_DATA)
73 else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
76 return !bl_is_sector_init(be->be_inval, isect);
79 /* Given the be associated with isect, determine if page data can be
82 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
84 return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
85 be->be_state == PNFS_BLOCK_INVALID_DATA);
88 /* The data we are handed might be spread across several bios. We need
89 * to track when the last one is finished.
93 struct rpc_call_ops call_ops;
94 void (*pnfs_callback) (void *data);
98 static inline struct parallel_io *alloc_parallel(void *data)
100 struct parallel_io *rv;
102 rv = kmalloc(sizeof(*rv), GFP_NOFS);
105 kref_init(&rv->refcnt);
110 static inline void get_parallel(struct parallel_io *p)
112 kref_get(&p->refcnt);
115 static void destroy_parallel(struct kref *kref)
117 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
119 dprintk("%s enter\n", __func__);
120 p->pnfs_callback(p->data);
124 static inline void put_parallel(struct parallel_io *p)
126 kref_put(&p->refcnt, destroy_parallel);
130 bl_submit_bio(int rw, struct bio *bio)
133 get_parallel(bio->bi_private);
134 dprintk("%s submitting %s bio %u@%llu\n", __func__,
135 rw == READ ? "read" : "write",
136 bio->bi_size, (unsigned long long)bio->bi_sector);
142 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
143 struct pnfs_block_extent *be,
144 void (*end_io)(struct bio *, int err),
145 struct parallel_io *par)
149 bio = bio_alloc(GFP_NOIO, npg);
153 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
154 bio->bi_bdev = be->be_mdev;
155 bio->bi_end_io = end_io;
156 bio->bi_private = par;
160 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
161 sector_t isect, struct page *page,
162 struct pnfs_block_extent *be,
163 void (*end_io)(struct bio *, int err),
164 struct parallel_io *par)
168 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
170 return ERR_PTR(-ENOMEM);
172 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
173 bio = bl_submit_bio(rw, bio);
179 static void bl_set_lo_fail(struct pnfs_layout_segment *lseg)
181 if (lseg->pls_range.iomode == IOMODE_RW) {
182 dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
183 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
185 dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
186 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
190 /* This is basically copied from mpage_end_io_read */
191 static void bl_end_io_read(struct bio *bio, int err)
193 struct parallel_io *par = bio->bi_private;
194 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
195 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
196 struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
199 struct page *page = bvec->bv_page;
201 if (--bvec >= bio->bi_io_vec)
202 prefetchw(&bvec->bv_page->flags);
204 SetPageUptodate(page);
205 } while (bvec >= bio->bi_io_vec);
207 if (!rdata->pnfs_error)
208 rdata->pnfs_error = -EIO;
209 bl_set_lo_fail(rdata->lseg);
215 static void bl_read_cleanup(struct work_struct *work)
217 struct rpc_task *task;
218 struct nfs_read_data *rdata;
219 dprintk("%s enter\n", __func__);
220 task = container_of(work, struct rpc_task, u.tk_work);
221 rdata = container_of(task, struct nfs_read_data, task);
222 pnfs_ld_read_done(rdata);
226 bl_end_par_io_read(void *data)
228 struct nfs_read_data *rdata = data;
230 INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
231 schedule_work(&rdata->task.u.tk_work);
234 /* We don't want normal .rpc_call_done callback used, so we replace it
237 static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
242 static enum pnfs_try_status
243 bl_read_pagelist(struct nfs_read_data *rdata)
246 struct bio *bio = NULL;
247 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
248 sector_t isect, extent_length = 0;
249 struct parallel_io *par;
250 loff_t f_offset = rdata->args.offset;
251 size_t count = rdata->args.count;
252 struct page **pages = rdata->args.pages;
253 int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
255 dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
256 rdata->npages, f_offset, count);
258 par = alloc_parallel(rdata);
261 par->call_ops = *rdata->mds_ops;
262 par->call_ops.rpc_call_done = bl_rpc_do_nothing;
263 par->pnfs_callback = bl_end_par_io_read;
264 /* At this point, we can no longer jump to use_mds */
266 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
267 /* Code assumes extents are page-aligned */
268 for (i = pg_index; i < rdata->npages; i++) {
269 if (!extent_length) {
270 /* We've used up the previous extent */
272 bl_put_extent(cow_read);
273 bio = bl_submit_bio(READ, bio);
274 /* Get the next one */
275 be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
278 rdata->pnfs_error = -EIO;
281 extent_length = be->be_length -
282 (isect - be->be_f_offset);
284 sector_t cow_length = cow_read->be_length -
285 (isect - cow_read->be_f_offset);
286 extent_length = min(extent_length, cow_length);
289 hole = is_hole(be, isect);
290 if (hole && !cow_read) {
291 bio = bl_submit_bio(READ, bio);
292 /* Fill hole w/ zeroes w/o accessing device */
293 dprintk("%s Zeroing page for hole\n", __func__);
294 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
295 print_page(pages[i]);
296 SetPageUptodate(pages[i]);
298 struct pnfs_block_extent *be_read;
300 be_read = (hole && cow_read) ? cow_read : be;
301 bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
302 isect, pages[i], be_read,
303 bl_end_io_read, par);
305 rdata->pnfs_error = PTR_ERR(bio);
309 isect += PAGE_CACHE_SECTORS;
310 extent_length -= PAGE_CACHE_SECTORS;
312 if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
314 rdata->res.count = rdata->inode->i_size - f_offset;
316 rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
320 bl_put_extent(cow_read);
321 bl_submit_bio(READ, bio);
323 return PNFS_ATTEMPTED;
326 dprintk("Giving up and using normal NFS\n");
327 return PNFS_NOT_ATTEMPTED;
330 static void mark_extents_written(struct pnfs_block_layout *bl,
331 __u64 offset, __u32 count)
334 struct pnfs_block_extent *be;
336 dprintk("%s(%llu, %u)\n", __func__, offset, count);
339 isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
340 end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
341 end >>= SECTOR_SHIFT;
342 while (isect < end) {
344 be = bl_find_get_extent(bl, isect, NULL);
345 BUG_ON(!be); /* FIXME */
346 len = min(end, be->be_f_offset + be->be_length) - isect;
347 if (be->be_state == PNFS_BLOCK_INVALID_DATA)
348 bl_mark_for_commit(be, isect, len); /* What if fails? */
354 static void bl_end_io_write_zero(struct bio *bio, int err)
356 struct parallel_io *par = bio->bi_private;
357 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
358 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
359 struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
362 struct page *page = bvec->bv_page;
364 if (--bvec >= bio->bi_io_vec)
365 prefetchw(&bvec->bv_page->flags);
366 /* This is the zeroing page we added */
367 end_page_writeback(page);
368 page_cache_release(page);
369 } while (bvec >= bio->bi_io_vec);
371 if (!wdata->pnfs_error)
372 wdata->pnfs_error = -EIO;
373 bl_set_lo_fail(wdata->lseg);
379 /* This is basically copied from mpage_end_io_read */
380 static void bl_end_io_write(struct bio *bio, int err)
382 struct parallel_io *par = bio->bi_private;
383 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
384 struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
387 if (!wdata->pnfs_error)
388 wdata->pnfs_error = -EIO;
389 bl_set_lo_fail(wdata->lseg);
395 /* Function scheduled for call during bl_end_par_io_write,
396 * it marks sectors as written and extends the commitlist.
398 static void bl_write_cleanup(struct work_struct *work)
400 struct rpc_task *task;
401 struct nfs_write_data *wdata;
402 dprintk("%s enter\n", __func__);
403 task = container_of(work, struct rpc_task, u.tk_work);
404 wdata = container_of(task, struct nfs_write_data, task);
405 if (!wdata->pnfs_error) {
406 /* Marks for LAYOUTCOMMIT */
407 mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
408 wdata->args.offset, wdata->args.count);
410 pnfs_ld_write_done(wdata);
413 /* Called when last of bios associated with a bl_write_pagelist call finishes */
414 static void bl_end_par_io_write(void *data)
416 struct nfs_write_data *wdata = data;
418 wdata->task.tk_status = 0;
419 wdata->verf.committed = NFS_FILE_SYNC;
420 INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
421 schedule_work(&wdata->task.u.tk_work);
424 /* FIXME STUB - mark intersection of layout and page as bad, so is not
427 static void mark_bad_read(void)
433 * map_block: map a requested I/0 block (isect) into an offset in the LVM
437 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
439 dprintk("%s enter be=%p\n", __func__, be);
441 set_buffer_mapped(bh);
442 bh->b_bdev = be->be_mdev;
443 bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
444 (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
446 dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
447 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
452 /* Given an unmapped page, zero it or read in page for COW, page is locked
456 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
458 struct buffer_head *bh = NULL;
462 dprintk("%s enter, %p\n", __func__, page);
463 BUG_ON(PageUptodate(page));
465 zero_user_segment(page, 0, PAGE_SIZE);
466 SetPageUptodate(page);
470 bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
476 isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
477 map_block(bh, isect, cow_read);
478 if (!bh_uptodate_or_lock(bh))
479 ret = bh_submit_read(bh);
482 SetPageUptodate(page);
485 bl_put_extent(cow_read);
487 free_buffer_head(bh);
489 /* Need to mark layout with bad read...should now
490 * just use nfs4 for reads and writes.
497 static enum pnfs_try_status
498 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
500 int i, ret, npg_zero, pg_index, last = 0;
501 struct bio *bio = NULL;
502 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
503 sector_t isect, last_isect = 0, extent_length = 0;
504 struct parallel_io *par;
505 loff_t offset = wdata->args.offset;
506 size_t count = wdata->args.count;
507 struct page **pages = wdata->args.pages;
512 NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
514 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
515 /* At this point, wdata->pages is a (sequential) list of nfs_pages.
516 * We want to write each, and if there is an error set pnfs_error
517 * to have it redone using nfs.
519 par = alloc_parallel(wdata);
521 return PNFS_NOT_ATTEMPTED;
522 par->call_ops = *wdata->mds_ops;
523 par->call_ops.rpc_call_done = bl_rpc_do_nothing;
524 par->pnfs_callback = bl_end_par_io_write;
525 /* At this point, have to be more careful with error handling */
527 isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
528 be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
529 if (!be || !is_writable(be, isect)) {
530 dprintk("%s no matching extents!\n", __func__);
531 wdata->pnfs_error = -EINVAL;
535 /* First page inside INVALID extent */
536 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
537 temp = offset >> PAGE_CACHE_SHIFT;
538 npg_zero = do_div(temp, npg_per_block);
539 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
540 (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
541 extent_length = be->be_length - (isect - be->be_f_offset);
544 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
545 for (;npg_zero > 0; npg_zero--) {
546 /* page ref released in bl_end_io_write_zero */
547 index = isect >> PAGE_CACHE_SECTOR_SHIFT;
548 dprintk("%s zero %dth page: index %lu isect %llu\n",
549 __func__, npg_zero, index,
550 (unsigned long long)isect);
552 find_or_create_page(wdata->inode->i_mapping, index,
555 dprintk("%s oom\n", __func__);
556 wdata->pnfs_error = -ENOMEM;
560 /* PageDirty: Other will write this out
561 * PageWriteback: Other is writing this out
562 * PageUptodate: It was read before
563 * sector_initialized: already written out
565 if (PageDirty(page) || PageWriteback(page) ||
566 bl_is_sector_init(be->be_inval, isect)) {
569 page_cache_release(page);
572 if (!PageUptodate(page)) {
573 /* New page, readin or zero it */
574 init_page_for_write(page, cow_read);
576 set_page_writeback(page);
579 ret = bl_mark_sectors_init(be->be_inval, isect,
583 dprintk("%s bl_mark_sectors_init fail %d\n",
585 end_page_writeback(page);
586 page_cache_release(page);
587 wdata->pnfs_error = ret;
590 bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
592 bl_end_io_write_zero, par);
594 wdata->pnfs_error = PTR_ERR(bio);
597 /* FIXME: This should be done in bi_end_io */
598 mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
599 page->index << PAGE_CACHE_SHIFT,
602 isect += PAGE_CACHE_SECTORS;
603 extent_length -= PAGE_CACHE_SECTORS;
608 bio = bl_submit_bio(WRITE, bio);
611 pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
612 for (i = pg_index; i < wdata->npages; i++) {
613 if (!extent_length) {
614 /* We've used up the previous extent */
616 bio = bl_submit_bio(WRITE, bio);
617 /* Get the next one */
618 be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
620 if (!be || !is_writable(be, isect)) {
621 wdata->pnfs_error = -EINVAL;
624 extent_length = be->be_length -
625 (isect - be->be_f_offset);
627 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
628 ret = bl_mark_sectors_init(be->be_inval, isect,
632 dprintk("%s bl_mark_sectors_init fail %d\n",
634 wdata->pnfs_error = ret;
638 bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
640 bl_end_io_write, par);
642 wdata->pnfs_error = PTR_ERR(bio);
645 isect += PAGE_CACHE_SECTORS;
647 extent_length -= PAGE_CACHE_SECTORS;
650 /* Last page inside INVALID extent */
651 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
652 bio = bl_submit_bio(WRITE, bio);
653 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
654 npg_zero = npg_per_block - do_div(temp, npg_per_block);
655 if (npg_zero < npg_per_block) {
657 goto fill_invalid_ext;
662 wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
663 if (count < wdata->res.count) {
664 wdata->res.count = count;
668 bl_submit_bio(WRITE, bio);
670 return PNFS_ATTEMPTED;
673 /* FIXME - range ignored */
675 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
678 struct pnfs_block_extent *be;
680 spin_lock(&bl->bl_ext_lock);
681 for (i = 0; i < EXTENT_LISTS; i++) {
682 while (!list_empty(&bl->bl_extents[i])) {
683 be = list_first_entry(&bl->bl_extents[i],
684 struct pnfs_block_extent,
686 list_del(&be->be_node);
690 spin_unlock(&bl->bl_ext_lock);
694 release_inval_marks(struct pnfs_inval_markings *marks)
696 struct pnfs_inval_tracking *pos, *temp;
698 list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
699 list_del(&pos->it_link);
705 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
707 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
709 dprintk("%s enter\n", __func__);
710 release_extents(bl, NULL);
711 release_inval_marks(&bl->bl_inval);
715 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
718 struct pnfs_block_layout *bl;
720 dprintk("%s enter\n", __func__);
721 bl = kzalloc(sizeof(*bl), gfp_flags);
724 spin_lock_init(&bl->bl_ext_lock);
725 INIT_LIST_HEAD(&bl->bl_extents[0]);
726 INIT_LIST_HEAD(&bl->bl_extents[1]);
727 INIT_LIST_HEAD(&bl->bl_commit);
728 INIT_LIST_HEAD(&bl->bl_committing);
730 bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
731 BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
732 return &bl->bl_layout;
735 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
737 dprintk("%s enter\n", __func__);
741 /* We pretty much ignore lseg, and store all data layout wide, so we
742 * can correctly merge.
744 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
745 struct nfs4_layoutget_res *lgr,
748 struct pnfs_layout_segment *lseg;
751 dprintk("%s enter\n", __func__);
752 lseg = kzalloc(sizeof(*lseg), gfp_flags);
754 return ERR_PTR(-ENOMEM);
755 status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
757 /* We don't want to call the full-blown bl_free_lseg,
758 * since on error extents were not touched.
761 return ERR_PTR(status);
767 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
768 const struct nfs4_layoutcommit_args *arg)
770 dprintk("%s enter\n", __func__);
771 encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
775 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
777 struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
779 dprintk("%s enter\n", __func__);
780 clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
783 static void free_blk_mountid(struct block_mount_id *mid)
786 struct pnfs_block_dev *dev;
787 spin_lock(&mid->bm_lock);
788 while (!list_empty(&mid->bm_devlist)) {
789 dev = list_first_entry(&mid->bm_devlist,
790 struct pnfs_block_dev,
792 list_del(&dev->bm_node);
793 bl_free_block_dev(dev);
795 spin_unlock(&mid->bm_lock);
800 /* This is mostly copied from the filelayout's get_device_info function.
801 * It seems much of this should be at the generic pnfs level.
803 static struct pnfs_block_dev *
804 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
805 struct nfs4_deviceid *d_id)
807 struct pnfs_device *dev;
808 struct pnfs_block_dev *rv = NULL;
811 struct page **pages = NULL;
815 * Use the session max response size as the basis for setting
816 * GETDEVICEINFO's maxcount
818 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
819 max_pages = max_resp_sz >> PAGE_SHIFT;
820 dprintk("%s max_resp_sz %u max_pages %d\n",
821 __func__, max_resp_sz, max_pages);
823 dev = kmalloc(sizeof(*dev), GFP_NOFS);
825 dprintk("%s kmalloc failed\n", __func__);
829 pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
834 for (i = 0; i < max_pages; i++) {
835 pages[i] = alloc_page(GFP_NOFS);
840 memcpy(&dev->dev_id, d_id, sizeof(*d_id));
841 dev->layout_type = LAYOUT_BLOCK_VOLUME;
844 dev->pglen = PAGE_SIZE * max_pages;
847 dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
848 rc = nfs4_proc_getdeviceinfo(server, dev);
849 dprintk("%s getdevice info returns %d\n", __func__, rc);
853 rv = nfs4_blk_decode_device(server, dev);
855 for (i = 0; i < max_pages; i++)
856 __free_page(pages[i]);
863 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
865 struct block_mount_id *b_mt_id = NULL;
866 struct pnfs_devicelist *dlist = NULL;
867 struct pnfs_block_dev *bdev;
868 LIST_HEAD(block_disklist);
871 dprintk("%s enter\n", __func__);
873 if (server->pnfs_blksize == 0) {
874 dprintk("%s Server did not return blksize\n", __func__);
877 b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
882 /* Initialize nfs4 block layout mount id */
883 spin_lock_init(&b_mt_id->bm_lock);
884 INIT_LIST_HEAD(&b_mt_id->bm_devlist);
886 dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
892 while (!dlist->eof) {
893 status = nfs4_proc_getdevicelist(server, fh, dlist);
896 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
897 __func__, dlist->num_devs, dlist->eof);
898 for (i = 0; i < dlist->num_devs; i++) {
899 bdev = nfs4_blk_get_deviceinfo(server, fh,
905 spin_lock(&b_mt_id->bm_lock);
906 list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
907 spin_unlock(&b_mt_id->bm_lock);
910 dprintk("%s SUCCESS\n", __func__);
911 server->pnfs_ld_data = b_mt_id;
918 free_blk_mountid(b_mt_id);
923 bl_clear_layoutdriver(struct nfs_server *server)
925 struct block_mount_id *b_mt_id = server->pnfs_ld_data;
927 dprintk("%s enter\n", __func__);
928 free_blk_mountid(b_mt_id);
929 dprintk("%s RETURNS\n", __func__);
933 static const struct nfs_pageio_ops bl_pg_read_ops = {
934 .pg_init = pnfs_generic_pg_init_read,
935 .pg_test = pnfs_generic_pg_test,
936 .pg_doio = pnfs_generic_pg_readpages,
939 static const struct nfs_pageio_ops bl_pg_write_ops = {
940 .pg_init = pnfs_generic_pg_init_write,
941 .pg_test = pnfs_generic_pg_test,
942 .pg_doio = pnfs_generic_pg_writepages,
945 static struct pnfs_layoutdriver_type blocklayout_type = {
946 .id = LAYOUT_BLOCK_VOLUME,
947 .name = "LAYOUT_BLOCK_VOLUME",
948 .read_pagelist = bl_read_pagelist,
949 .write_pagelist = bl_write_pagelist,
950 .alloc_layout_hdr = bl_alloc_layout_hdr,
951 .free_layout_hdr = bl_free_layout_hdr,
952 .alloc_lseg = bl_alloc_lseg,
953 .free_lseg = bl_free_lseg,
954 .encode_layoutcommit = bl_encode_layoutcommit,
955 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
956 .set_layoutdriver = bl_set_layoutdriver,
957 .clear_layoutdriver = bl_clear_layoutdriver,
958 .pg_read_ops = &bl_pg_read_ops,
959 .pg_write_ops = &bl_pg_write_ops,
962 static const struct rpc_pipe_ops bl_upcall_ops = {
963 .upcall = bl_pipe_upcall,
964 .downcall = bl_pipe_downcall,
965 .destroy_msg = bl_pipe_destroy_msg,
968 static int __init nfs4blocklayout_init(void)
970 struct vfsmount *mnt;
974 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
976 ret = pnfs_register_layoutdriver(&blocklayout_type);
980 init_waitqueue_head(&bl_wq);
982 mnt = rpc_get_mount();
988 ret = vfs_path_lookup(mnt->mnt_root,
990 NFS_PIPE_DIRNAME, 0, &path);
994 bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
996 if (IS_ERR(bl_device_pipe)) {
997 ret = PTR_ERR(bl_device_pipe);
1004 pnfs_unregister_layoutdriver(&blocklayout_type);
1008 static void __exit nfs4blocklayout_exit(void)
1010 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1013 pnfs_unregister_layoutdriver(&blocklayout_type);
1014 rpc_unlink(bl_device_pipe);
1017 MODULE_ALIAS("nfs-layouttype4-3");
1019 module_init(nfs4blocklayout_init);
1020 module_exit(nfs4blocklayout_exit);