6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
32 static const struct nfs_pageio_ops nfs_pageio_read_ops;
33 static const struct rpc_call_ops nfs_read_partial_ops;
34 static const struct rpc_call_ops nfs_read_full_ops;
36 static struct kmem_cache *nfs_rdata_cachep;
38 struct nfs_read_header *nfs_readhdr_alloc(unsigned int pagecount)
40 struct nfs_read_header *p;
42 p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
44 struct nfs_pgio_header *hdr = &p->header;
45 struct nfs_read_data *data = &p->rpc_data;
47 INIT_LIST_HEAD(&hdr->pages);
48 INIT_LIST_HEAD(&data->list);
49 data->npages = pagecount;
51 if (pagecount <= ARRAY_SIZE(data->page_array))
52 data->pagevec = data->page_array;
54 data->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
56 kmem_cache_free(nfs_rdata_cachep, p);
64 void nfs_readhdr_free(struct nfs_pgio_header *hdr)
66 struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
68 kmem_cache_free(nfs_rdata_cachep, rhdr);
71 void nfs_readdata_release(struct nfs_read_data *rdata)
73 put_nfs_open_context(rdata->args.context);
74 if (rdata->pagevec != rdata->page_array)
75 kfree(rdata->pagevec);
76 nfs_readhdr_free(rdata->header);
80 int nfs_return_empty_page(struct page *page)
82 zero_user(page, 0, PAGE_CACHE_SIZE);
83 SetPageUptodate(page);
88 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
90 unsigned int remainder = data->args.count - data->res.count;
91 unsigned int base = data->args.pgbase + data->res.count;
95 if (data->res.eof == 0 || remainder == 0)
98 * Note: "remainder" can never be negative, since we check for
99 * this in the XDR code.
101 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
102 base &= ~PAGE_CACHE_MASK;
103 pglen = PAGE_CACHE_SIZE - base;
105 if (remainder <= pglen) {
106 zero_user(*pages, base, remainder);
109 zero_user(*pages, base, pglen);
112 pglen = PAGE_CACHE_SIZE;
117 void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
120 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
121 NFS_SERVER(inode)->rsize, 0);
124 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
126 pgio->pg_ops = &nfs_pageio_read_ops;
127 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
129 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
131 static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
134 if (!pnfs_pageio_init_read(pgio, inode))
135 nfs_pageio_init_read_mds(pgio, inode);
138 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
141 struct nfs_page *new;
143 struct nfs_pageio_descriptor pgio;
145 len = nfs_page_length(page);
147 return nfs_return_empty_page(page);
148 new = nfs_create_request(ctx, inode, page, 0, len);
153 if (len < PAGE_CACHE_SIZE)
154 zero_user_segment(page, len, PAGE_CACHE_SIZE);
156 nfs_pageio_init_read(&pgio, inode);
157 nfs_pageio_add_request(&pgio, new);
158 nfs_pageio_complete(&pgio);
162 static void nfs_readpage_release(struct nfs_page *req)
164 struct inode *d_inode = req->wb_context->dentry->d_inode;
166 if (PageUptodate(req->wb_page))
167 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
169 unlock_page(req->wb_page);
171 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
172 req->wb_context->dentry->d_inode->i_sb->s_id,
173 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
175 (long long)req_offset(req));
176 nfs_release_request(req);
179 int nfs_initiate_read(struct rpc_clnt *clnt,
180 struct nfs_read_data *data,
181 const struct rpc_call_ops *call_ops)
183 struct inode *inode = data->header->inode;
184 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
185 struct rpc_task *task;
186 struct rpc_message msg = {
187 .rpc_argp = &data->args,
188 .rpc_resp = &data->res,
189 .rpc_cred = data->header->cred,
191 struct rpc_task_setup task_setup_data = {
195 .callback_ops = call_ops,
196 .callback_data = data,
197 .workqueue = nfsiod_workqueue,
198 .flags = RPC_TASK_ASYNC | swap_flags,
201 /* Set up the initial task struct. */
202 NFS_PROTO(inode)->read_setup(data, &msg);
204 dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
208 (long long)NFS_FILEID(inode),
210 (unsigned long long)data->args.offset);
212 task = rpc_run_task(&task_setup_data);
214 return PTR_ERR(task);
218 EXPORT_SYMBOL_GPL(nfs_initiate_read);
221 * Set up the NFS read request struct
223 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
224 unsigned int count, unsigned int offset)
226 struct inode *inode = data->header->inode;
228 data->header->req = req;
229 data->header->inode = inode;
230 data->header->cred = req->wb_context->cred;
232 data->args.fh = NFS_FH(inode);
233 data->args.offset = req_offset(req) + offset;
234 data->args.pgbase = req->wb_pgbase + offset;
235 data->args.pages = data->pagevec;
236 data->args.count = count;
237 data->args.context = get_nfs_open_context(req->wb_context);
238 data->args.lock_context = req->wb_lock_context;
240 data->res.fattr = &data->fattr;
241 data->res.count = count;
243 nfs_fattr_init(&data->fattr);
246 static int nfs_do_read(struct nfs_read_data *data,
247 const struct rpc_call_ops *call_ops)
249 struct inode *inode = data->header->inode;
251 return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops);
255 nfs_do_multiple_reads(struct list_head *head,
256 const struct rpc_call_ops *call_ops)
258 struct nfs_read_data *data;
261 while (!list_empty(head)) {
264 data = list_entry(head->next, struct nfs_read_data, list);
265 list_del_init(&data->list);
267 ret2 = nfs_do_read(data, call_ops);
275 nfs_async_read_error(struct list_head *head)
277 struct nfs_page *req;
279 while (!list_empty(head)) {
280 req = nfs_list_entry(head->next);
281 nfs_list_remove_request(req);
282 nfs_readpage_release(req);
287 * Generate multiple requests to fill a single page.
289 * We optimize to reduce the number of read operations on the wire. If we
290 * detect that we're reading a page, or an area of a page, that is past the
291 * end of file, we do not generate NFS read operations but just clear the
292 * parts of the page that would have come back zero from the server anyway.
294 * We rely on the cached value of i_size to make this determination; another
295 * client can fill pages on the server past our cached end-of-file, but we
296 * won't see the new data until our attribute cache is updated. This is more
297 * or less conventional NFS client behavior.
299 static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
301 struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
302 struct page *page = req->wb_page;
303 struct nfs_read_header *rhdr;
304 struct nfs_read_data *data;
305 size_t rsize = desc->pg_bsize, nbytes;
310 nfs_list_remove_request(req);
313 nbytes = desc->pg_count;
315 size_t len = min(nbytes,rsize);
317 rhdr = nfs_readhdr_alloc(1);
320 data = &rhdr->rpc_data;
321 data->pagevec[0] = page;
322 nfs_read_rpcsetup(req, data, len, offset);
323 list_add(&data->list, res);
327 } while(nbytes != 0);
328 atomic_set(&req->wb_complete, requests);
329 desc->pg_rpc_callops = &nfs_read_partial_ops;
332 while (!list_empty(res)) {
333 data = list_entry(res->next, struct nfs_read_data, list);
334 list_del(&data->list);
335 nfs_readdata_release(data);
337 nfs_readpage_release(req);
341 static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
343 struct nfs_page *req;
345 struct nfs_read_header *rhdr;
346 struct nfs_read_data *data;
347 struct list_head *head = &desc->pg_list;
350 rhdr = nfs_readhdr_alloc(nfs_page_array_len(desc->pg_base,
353 nfs_async_read_error(head);
358 data = &rhdr->rpc_data;
359 pages = data->pagevec;
360 while (!list_empty(head)) {
361 req = nfs_list_entry(head->next);
362 nfs_list_remove_request(req);
363 nfs_list_add_request(req, &rhdr->header.pages);
364 *pages++ = req->wb_page;
366 req = nfs_list_entry(rhdr->header.pages.next);
368 nfs_read_rpcsetup(req, data, desc->pg_count, 0);
369 list_add(&data->list, res);
370 desc->pg_rpc_callops = &nfs_read_full_ops;
375 int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
377 if (desc->pg_bsize < PAGE_CACHE_SIZE)
378 return nfs_pagein_multi(desc, head);
379 return nfs_pagein_one(desc, head);
382 static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
387 ret = nfs_generic_pagein(desc, &head);
389 ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
393 static const struct nfs_pageio_ops nfs_pageio_read_ops = {
394 .pg_test = nfs_generic_pg_test,
395 .pg_doio = nfs_generic_pg_readpages,
399 * This is the callback from RPC telling us whether a reply was
400 * received or some error occurred (timeout or socket shutdown).
402 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
404 struct inode *inode = data->header->inode;
407 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
410 status = NFS_PROTO(inode)->read_done(task, data);
414 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
416 if (task->tk_status == -ESTALE) {
417 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
418 nfs_mark_for_revalidate(inode);
423 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
425 struct nfs_readargs *argp = &data->args;
426 struct nfs_readres *resp = &data->res;
428 if (resp->eof || resp->count == argp->count)
431 /* This is a short read! */
432 nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
433 /* Has the server at least made some progress? */
434 if (resp->count == 0)
437 /* Yes, so retry the read at the end of the data */
438 data->mds_offset += resp->count;
439 argp->offset += resp->count;
440 argp->pgbase += resp->count;
441 argp->count -= resp->count;
442 rpc_restart_call_prepare(task);
446 * Handle a read reply that fills part of a page.
448 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
450 struct nfs_read_data *data = calldata;
452 if (nfs_readpage_result(task, data) != 0)
454 if (task->tk_status < 0)
457 nfs_readpage_truncate_uninitialised_page(data);
458 nfs_readpage_retry(task, data);
461 static void nfs_readpage_release_partial(void *calldata)
463 struct nfs_read_data *data = calldata;
464 struct nfs_page *req = data->header->req;
465 struct page *page = req->wb_page;
466 int status = data->task.tk_status;
469 set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
471 if (atomic_dec_and_test(&req->wb_complete)) {
472 if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
473 SetPageUptodate(page);
474 nfs_readpage_release(req);
476 nfs_readdata_release(data);
479 void nfs_read_prepare(struct rpc_task *task, void *calldata)
481 struct nfs_read_data *data = calldata;
482 NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
485 static const struct rpc_call_ops nfs_read_partial_ops = {
486 .rpc_call_prepare = nfs_read_prepare,
487 .rpc_call_done = nfs_readpage_result_partial,
488 .rpc_release = nfs_readpage_release_partial,
491 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
493 unsigned int count = data->res.count;
494 unsigned int base = data->args.pgbase;
498 count = data->args.count;
499 if (unlikely(count == 0))
501 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
502 base &= ~PAGE_CACHE_MASK;
504 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
505 SetPageUptodate(*pages);
508 /* Was this a short read? */
509 if (data->res.eof || data->res.count == data->args.count)
510 SetPageUptodate(*pages);
514 * This is the callback from RPC telling us whether a reply was
515 * received or some error occurred (timeout or socket shutdown).
517 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
519 struct nfs_read_data *data = calldata;
521 if (nfs_readpage_result(task, data) != 0)
523 if (task->tk_status < 0)
526 * Note: nfs_readpage_retry may change the values of
527 * data->args. In the multi-page case, we therefore need
528 * to ensure that we call nfs_readpage_set_pages_uptodate()
531 nfs_readpage_truncate_uninitialised_page(data);
532 nfs_readpage_set_pages_uptodate(data);
533 nfs_readpage_retry(task, data);
536 static void nfs_readpage_release_full(void *calldata)
538 struct nfs_read_data *data = calldata;
539 struct nfs_pgio_header *hdr = data->header;
541 while (!list_empty(&hdr->pages)) {
542 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
544 nfs_list_remove_request(req);
545 nfs_readpage_release(req);
547 nfs_readdata_release(calldata);
550 static const struct rpc_call_ops nfs_read_full_ops = {
551 .rpc_call_prepare = nfs_read_prepare,
552 .rpc_call_done = nfs_readpage_result_full,
553 .rpc_release = nfs_readpage_release_full,
557 * Read a page over NFS.
558 * We read the page synchronously in the following case:
559 * - The error flag is set for this page. This happens only when a
560 * previous async read operation failed.
562 int nfs_readpage(struct file *file, struct page *page)
564 struct nfs_open_context *ctx;
565 struct inode *inode = page->mapping->host;
568 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
569 page, PAGE_CACHE_SIZE, page->index);
570 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
571 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
574 * Try to flush any pending writes to the file..
576 * NOTE! Because we own the page lock, there cannot
577 * be any new pending writes generated at this point
578 * for this page (other pages can be written to).
580 error = nfs_wb_page(inode, page);
583 if (PageUptodate(page))
587 if (NFS_STALE(inode))
592 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
596 ctx = get_nfs_open_context(nfs_file_open_context(file));
598 if (!IS_SYNC(inode)) {
599 error = nfs_readpage_from_fscache(ctx, inode, page);
604 error = nfs_readpage_async(ctx, inode, page);
607 put_nfs_open_context(ctx);
614 struct nfs_readdesc {
615 struct nfs_pageio_descriptor *pgio;
616 struct nfs_open_context *ctx;
620 readpage_async_filler(void *data, struct page *page)
622 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
623 struct inode *inode = page->mapping->host;
624 struct nfs_page *new;
628 len = nfs_page_length(page);
630 return nfs_return_empty_page(page);
632 new = nfs_create_request(desc->ctx, inode, page, 0, len);
636 if (len < PAGE_CACHE_SIZE)
637 zero_user_segment(page, len, PAGE_CACHE_SIZE);
638 if (!nfs_pageio_add_request(desc->pgio, new)) {
639 error = desc->pgio->pg_error;
644 error = PTR_ERR(new);
650 int nfs_readpages(struct file *filp, struct address_space *mapping,
651 struct list_head *pages, unsigned nr_pages)
653 struct nfs_pageio_descriptor pgio;
654 struct nfs_readdesc desc = {
657 struct inode *inode = mapping->host;
658 unsigned long npages;
661 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
663 (long long)NFS_FILEID(inode),
665 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
667 if (NFS_STALE(inode))
671 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
672 if (desc.ctx == NULL)
675 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
677 /* attempt to read as many of the pages as possible from the cache
678 * - this returns -ENOBUFS immediately if the cookie is negative
680 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
683 goto read_complete; /* all pages were read */
685 nfs_pageio_init_read(&pgio, inode);
687 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
689 nfs_pageio_complete(&pgio);
690 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
691 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
693 put_nfs_open_context(desc.ctx);
698 int __init nfs_init_readpagecache(void)
700 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
701 sizeof(struct nfs_read_header),
702 0, SLAB_HWCACHE_ALIGN,
704 if (nfs_rdata_cachep == NULL)
710 void nfs_destroy_readpagecache(void)
712 kmem_cache_destroy(nfs_rdata_cachep);