Merge branch 'nfs-for-3.2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[pandora-kernel.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23
24 #include <asm/uaccess.h>
25
26 #include "delegation.h"
27 #include "internal.h"
28 #include "iostat.h"
29 #include "nfs4_fs.h"
30 #include "fscache.h"
31 #include "pnfs.h"
32
33 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
34
35 #define MIN_POOL_WRITE          (32)
36 #define MIN_POOL_COMMIT         (4)
37
38 /*
39  * Local function declarations
40  */
41 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
42                                   struct inode *inode, int ioflags);
43 static void nfs_redirty_request(struct nfs_page *req);
44 static const struct rpc_call_ops nfs_write_partial_ops;
45 static const struct rpc_call_ops nfs_write_full_ops;
46 static const struct rpc_call_ops nfs_commit_ops;
47
48 static struct kmem_cache *nfs_wdata_cachep;
49 static mempool_t *nfs_wdata_mempool;
50 static mempool_t *nfs_commit_mempool;
51
52 struct nfs_write_data *nfs_commitdata_alloc(void)
53 {
54         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
55
56         if (p) {
57                 memset(p, 0, sizeof(*p));
58                 INIT_LIST_HEAD(&p->pages);
59         }
60         return p;
61 }
62 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
63
64 void nfs_commit_free(struct nfs_write_data *p)
65 {
66         if (p && (p->pagevec != &p->page_array[0]))
67                 kfree(p->pagevec);
68         mempool_free(p, nfs_commit_mempool);
69 }
70 EXPORT_SYMBOL_GPL(nfs_commit_free);
71
72 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
73 {
74         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
75
76         if (p) {
77                 memset(p, 0, sizeof(*p));
78                 INIT_LIST_HEAD(&p->pages);
79                 p->npages = pagecount;
80                 if (pagecount <= ARRAY_SIZE(p->page_array))
81                         p->pagevec = p->page_array;
82                 else {
83                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
84                         if (!p->pagevec) {
85                                 mempool_free(p, nfs_wdata_mempool);
86                                 p = NULL;
87                         }
88                 }
89         }
90         return p;
91 }
92
93 void nfs_writedata_free(struct nfs_write_data *p)
94 {
95         if (p && (p->pagevec != &p->page_array[0]))
96                 kfree(p->pagevec);
97         mempool_free(p, nfs_wdata_mempool);
98 }
99
100 void nfs_writedata_release(struct nfs_write_data *wdata)
101 {
102         put_lseg(wdata->lseg);
103         put_nfs_open_context(wdata->args.context);
104         nfs_writedata_free(wdata);
105 }
106
107 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
108 {
109         ctx->error = error;
110         smp_wmb();
111         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
112 }
113
114 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
115 {
116         struct nfs_page *req = NULL;
117
118         if (PagePrivate(page)) {
119                 req = (struct nfs_page *)page_private(page);
120                 if (req != NULL)
121                         kref_get(&req->wb_kref);
122         }
123         return req;
124 }
125
126 static struct nfs_page *nfs_page_find_request(struct page *page)
127 {
128         struct inode *inode = page->mapping->host;
129         struct nfs_page *req = NULL;
130
131         spin_lock(&inode->i_lock);
132         req = nfs_page_find_request_locked(page);
133         spin_unlock(&inode->i_lock);
134         return req;
135 }
136
137 /* Adjust the file length if we're writing beyond the end */
138 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
139 {
140         struct inode *inode = page->mapping->host;
141         loff_t end, i_size;
142         pgoff_t end_index;
143
144         spin_lock(&inode->i_lock);
145         i_size = i_size_read(inode);
146         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
147         if (i_size > 0 && page->index < end_index)
148                 goto out;
149         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
150         if (i_size >= end)
151                 goto out;
152         i_size_write(inode, end);
153         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
154 out:
155         spin_unlock(&inode->i_lock);
156 }
157
158 /* A writeback failed: mark the page as bad, and invalidate the page cache */
159 static void nfs_set_pageerror(struct page *page)
160 {
161         SetPageError(page);
162         nfs_zap_mapping(page->mapping->host, page->mapping);
163 }
164
165 /* We can set the PG_uptodate flag if we see that a write request
166  * covers the full page.
167  */
168 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
169 {
170         if (PageUptodate(page))
171                 return;
172         if (base != 0)
173                 return;
174         if (count != nfs_page_length(page))
175                 return;
176         SetPageUptodate(page);
177 }
178
179 static int wb_priority(struct writeback_control *wbc)
180 {
181         if (wbc->for_reclaim)
182                 return FLUSH_HIGHPRI | FLUSH_STABLE;
183         if (wbc->for_kupdate || wbc->for_background)
184                 return FLUSH_LOWPRI | FLUSH_COND_STABLE;
185         return FLUSH_COND_STABLE;
186 }
187
188 /*
189  * NFS congestion control
190  */
191
192 int nfs_congestion_kb;
193
194 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
195 #define NFS_CONGESTION_OFF_THRESH       \
196         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
197
198 static int nfs_set_page_writeback(struct page *page)
199 {
200         int ret = test_set_page_writeback(page);
201
202         if (!ret) {
203                 struct inode *inode = page->mapping->host;
204                 struct nfs_server *nfss = NFS_SERVER(inode);
205
206                 page_cache_get(page);
207                 if (atomic_long_inc_return(&nfss->writeback) >
208                                 NFS_CONGESTION_ON_THRESH) {
209                         set_bdi_congested(&nfss->backing_dev_info,
210                                                 BLK_RW_ASYNC);
211                 }
212         }
213         return ret;
214 }
215
216 static void nfs_end_page_writeback(struct page *page)
217 {
218         struct inode *inode = page->mapping->host;
219         struct nfs_server *nfss = NFS_SERVER(inode);
220
221         end_page_writeback(page);
222         page_cache_release(page);
223         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
224                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
225 }
226
227 static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
228 {
229         struct inode *inode = page->mapping->host;
230         struct nfs_page *req;
231         int ret;
232
233         spin_lock(&inode->i_lock);
234         for (;;) {
235                 req = nfs_page_find_request_locked(page);
236                 if (req == NULL)
237                         break;
238                 if (nfs_set_page_tag_locked(req))
239                         break;
240                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
241                  *       then the call to nfs_set_page_tag_locked() will always
242                  *       succeed provided that someone hasn't already marked the
243                  *       request as dirty (in which case we don't care).
244                  */
245                 spin_unlock(&inode->i_lock);
246                 if (!nonblock)
247                         ret = nfs_wait_on_request(req);
248                 else
249                         ret = -EAGAIN;
250                 nfs_release_request(req);
251                 if (ret != 0)
252                         return ERR_PTR(ret);
253                 spin_lock(&inode->i_lock);
254         }
255         spin_unlock(&inode->i_lock);
256         return req;
257 }
258
259 /*
260  * Find an associated nfs write request, and prepare to flush it out
261  * May return an error if the user signalled nfs_wait_on_request().
262  */
263 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
264                                 struct page *page, bool nonblock)
265 {
266         struct nfs_page *req;
267         int ret = 0;
268
269         req = nfs_find_and_lock_request(page, nonblock);
270         if (!req)
271                 goto out;
272         ret = PTR_ERR(req);
273         if (IS_ERR(req))
274                 goto out;
275
276         ret = nfs_set_page_writeback(page);
277         BUG_ON(ret != 0);
278         BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
279
280         if (!nfs_pageio_add_request(pgio, req)) {
281                 nfs_redirty_request(req);
282                 ret = pgio->pg_error;
283         }
284 out:
285         return ret;
286 }
287
288 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
289 {
290         struct inode *inode = page->mapping->host;
291         int ret;
292
293         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
294         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
295
296         nfs_pageio_cond_complete(pgio, page->index);
297         ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
298         if (ret == -EAGAIN) {
299                 redirty_page_for_writepage(wbc, page);
300                 ret = 0;
301         }
302         return ret;
303 }
304
305 /*
306  * Write an mmapped page to the server.
307  */
308 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
309 {
310         struct nfs_pageio_descriptor pgio;
311         int err;
312
313         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
314         err = nfs_do_writepage(page, wbc, &pgio);
315         nfs_pageio_complete(&pgio);
316         if (err < 0)
317                 return err;
318         if (pgio.pg_error < 0)
319                 return pgio.pg_error;
320         return 0;
321 }
322
323 int nfs_writepage(struct page *page, struct writeback_control *wbc)
324 {
325         int ret;
326
327         ret = nfs_writepage_locked(page, wbc);
328         unlock_page(page);
329         return ret;
330 }
331
332 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
333 {
334         int ret;
335
336         ret = nfs_do_writepage(page, wbc, data);
337         unlock_page(page);
338         return ret;
339 }
340
341 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
342 {
343         struct inode *inode = mapping->host;
344         unsigned long *bitlock = &NFS_I(inode)->flags;
345         struct nfs_pageio_descriptor pgio;
346         int err;
347
348         /* Stop dirtying of new pages while we sync */
349         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
350                         nfs_wait_bit_killable, TASK_KILLABLE);
351         if (err)
352                 goto out_err;
353
354         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
355
356         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
357         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
358         nfs_pageio_complete(&pgio);
359
360         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
361         smp_mb__after_clear_bit();
362         wake_up_bit(bitlock, NFS_INO_FLUSHING);
363
364         if (err < 0)
365                 goto out_err;
366         err = pgio.pg_error;
367         if (err < 0)
368                 goto out_err;
369         return 0;
370 out_err:
371         return err;
372 }
373
374 /*
375  * Insert a write request into an inode
376  */
377 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
378 {
379         struct nfs_inode *nfsi = NFS_I(inode);
380         int error;
381
382         error = radix_tree_preload(GFP_NOFS);
383         if (error != 0)
384                 goto out;
385
386         /* Lock the request! */
387         nfs_lock_request_dontget(req);
388
389         spin_lock(&inode->i_lock);
390         error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
391         BUG_ON(error);
392         if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
393                 inode->i_version++;
394         set_bit(PG_MAPPED, &req->wb_flags);
395         SetPagePrivate(req->wb_page);
396         set_page_private(req->wb_page, (unsigned long)req);
397         nfsi->npages++;
398         kref_get(&req->wb_kref);
399         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
400                                 NFS_PAGE_TAG_LOCKED);
401         spin_unlock(&inode->i_lock);
402         radix_tree_preload_end();
403 out:
404         return error;
405 }
406
407 /*
408  * Remove a write request from an inode
409  */
410 static void nfs_inode_remove_request(struct nfs_page *req)
411 {
412         struct inode *inode = req->wb_context->dentry->d_inode;
413         struct nfs_inode *nfsi = NFS_I(inode);
414
415         BUG_ON (!NFS_WBACK_BUSY(req));
416
417         spin_lock(&inode->i_lock);
418         set_page_private(req->wb_page, 0);
419         ClearPagePrivate(req->wb_page);
420         clear_bit(PG_MAPPED, &req->wb_flags);
421         radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
422         nfsi->npages--;
423         spin_unlock(&inode->i_lock);
424         nfs_release_request(req);
425 }
426
427 static void
428 nfs_mark_request_dirty(struct nfs_page *req)
429 {
430         __set_page_dirty_nobuffers(req->wb_page);
431 }
432
433 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
434 /*
435  * Add a request to the inode's commit list.
436  */
437 static void
438 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
439 {
440         struct inode *inode = req->wb_context->dentry->d_inode;
441         struct nfs_inode *nfsi = NFS_I(inode);
442
443         spin_lock(&inode->i_lock);
444         set_bit(PG_CLEAN, &(req)->wb_flags);
445         radix_tree_tag_set(&nfsi->nfs_page_tree,
446                         req->wb_index,
447                         NFS_PAGE_TAG_COMMIT);
448         nfsi->ncommit++;
449         spin_unlock(&inode->i_lock);
450         pnfs_mark_request_commit(req, lseg);
451         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
452         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
453         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
454 }
455
456 static int
457 nfs_clear_request_commit(struct nfs_page *req)
458 {
459         struct page *page = req->wb_page;
460
461         if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
462                 dec_zone_page_state(page, NR_UNSTABLE_NFS);
463                 dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
464                 return 1;
465         }
466         return 0;
467 }
468
469 static inline
470 int nfs_write_need_commit(struct nfs_write_data *data)
471 {
472         if (data->verf.committed == NFS_DATA_SYNC)
473                 return data->lseg == NULL;
474         else
475                 return data->verf.committed != NFS_FILE_SYNC;
476 }
477
478 static inline
479 int nfs_reschedule_unstable_write(struct nfs_page *req,
480                                   struct nfs_write_data *data)
481 {
482         if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
483                 nfs_mark_request_commit(req, data->lseg);
484                 return 1;
485         }
486         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
487                 nfs_mark_request_dirty(req);
488                 return 1;
489         }
490         return 0;
491 }
492 #else
493 static inline void
494 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
495 {
496 }
497
498 static inline int
499 nfs_clear_request_commit(struct nfs_page *req)
500 {
501         return 0;
502 }
503
504 static inline
505 int nfs_write_need_commit(struct nfs_write_data *data)
506 {
507         return 0;
508 }
509
510 static inline
511 int nfs_reschedule_unstable_write(struct nfs_page *req,
512                                   struct nfs_write_data *data)
513 {
514         return 0;
515 }
516 #endif
517
518 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
519 static int
520 nfs_need_commit(struct nfs_inode *nfsi)
521 {
522         return radix_tree_tagged(&nfsi->nfs_page_tree, NFS_PAGE_TAG_COMMIT);
523 }
524
525 /*
526  * nfs_scan_commit - Scan an inode for commit requests
527  * @inode: NFS inode to scan
528  * @dst: destination list
529  * @idx_start: lower bound of page->index to scan.
530  * @npages: idx_start + npages sets the upper bound to scan.
531  *
532  * Moves requests from the inode's 'commit' request list.
533  * The requests are *not* checked to ensure that they form a contiguous set.
534  */
535 static int
536 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
537 {
538         struct nfs_inode *nfsi = NFS_I(inode);
539         int ret;
540
541         if (!nfs_need_commit(nfsi))
542                 return 0;
543
544         spin_lock(&inode->i_lock);
545         ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
546         if (ret > 0)
547                 nfsi->ncommit -= ret;
548         spin_unlock(&inode->i_lock);
549
550         if (nfs_need_commit(NFS_I(inode)))
551                 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
552
553         return ret;
554 }
555 #else
556 static inline int nfs_need_commit(struct nfs_inode *nfsi)
557 {
558         return 0;
559 }
560
561 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
562 {
563         return 0;
564 }
565 #endif
566
567 /*
568  * Search for an existing write request, and attempt to update
569  * it to reflect a new dirty region on a given page.
570  *
571  * If the attempt fails, then the existing request is flushed out
572  * to disk.
573  */
574 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
575                 struct page *page,
576                 unsigned int offset,
577                 unsigned int bytes)
578 {
579         struct nfs_page *req;
580         unsigned int rqend;
581         unsigned int end;
582         int error;
583
584         if (!PagePrivate(page))
585                 return NULL;
586
587         end = offset + bytes;
588         spin_lock(&inode->i_lock);
589
590         for (;;) {
591                 req = nfs_page_find_request_locked(page);
592                 if (req == NULL)
593                         goto out_unlock;
594
595                 rqend = req->wb_offset + req->wb_bytes;
596                 /*
597                  * Tell the caller to flush out the request if
598                  * the offsets are non-contiguous.
599                  * Note: nfs_flush_incompatible() will already
600                  * have flushed out requests having wrong owners.
601                  */
602                 if (offset > rqend
603                     || end < req->wb_offset)
604                         goto out_flushme;
605
606                 if (nfs_set_page_tag_locked(req))
607                         break;
608
609                 /* The request is locked, so wait and then retry */
610                 spin_unlock(&inode->i_lock);
611                 error = nfs_wait_on_request(req);
612                 nfs_release_request(req);
613                 if (error != 0)
614                         goto out_err;
615                 spin_lock(&inode->i_lock);
616         }
617
618         if (nfs_clear_request_commit(req) &&
619             radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
620                                  req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) {
621                 NFS_I(inode)->ncommit--;
622                 pnfs_clear_request_commit(req);
623         }
624
625         /* Okay, the request matches. Update the region */
626         if (offset < req->wb_offset) {
627                 req->wb_offset = offset;
628                 req->wb_pgbase = offset;
629         }
630         if (end > rqend)
631                 req->wb_bytes = end - req->wb_offset;
632         else
633                 req->wb_bytes = rqend - req->wb_offset;
634 out_unlock:
635         spin_unlock(&inode->i_lock);
636         return req;
637 out_flushme:
638         spin_unlock(&inode->i_lock);
639         nfs_release_request(req);
640         error = nfs_wb_page(inode, page);
641 out_err:
642         return ERR_PTR(error);
643 }
644
645 /*
646  * Try to update an existing write request, or create one if there is none.
647  *
648  * Note: Should always be called with the Page Lock held to prevent races
649  * if we have to add a new request. Also assumes that the caller has
650  * already called nfs_flush_incompatible() if necessary.
651  */
652 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
653                 struct page *page, unsigned int offset, unsigned int bytes)
654 {
655         struct inode *inode = page->mapping->host;
656         struct nfs_page *req;
657         int error;
658
659         req = nfs_try_to_update_request(inode, page, offset, bytes);
660         if (req != NULL)
661                 goto out;
662         req = nfs_create_request(ctx, inode, page, offset, bytes);
663         if (IS_ERR(req))
664                 goto out;
665         error = nfs_inode_add_request(inode, req);
666         if (error != 0) {
667                 nfs_release_request(req);
668                 req = ERR_PTR(error);
669         }
670 out:
671         return req;
672 }
673
674 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
675                 unsigned int offset, unsigned int count)
676 {
677         struct nfs_page *req;
678
679         req = nfs_setup_write_request(ctx, page, offset, count);
680         if (IS_ERR(req))
681                 return PTR_ERR(req);
682         /* Update file length */
683         nfs_grow_file(page, offset, count);
684         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
685         nfs_mark_request_dirty(req);
686         nfs_clear_page_tag_locked(req);
687         return 0;
688 }
689
690 int nfs_flush_incompatible(struct file *file, struct page *page)
691 {
692         struct nfs_open_context *ctx = nfs_file_open_context(file);
693         struct nfs_page *req;
694         int do_flush, status;
695         /*
696          * Look for a request corresponding to this page. If there
697          * is one, and it belongs to another file, we flush it out
698          * before we try to copy anything into the page. Do this
699          * due to the lack of an ACCESS-type call in NFSv2.
700          * Also do the same if we find a request from an existing
701          * dropped page.
702          */
703         do {
704                 req = nfs_page_find_request(page);
705                 if (req == NULL)
706                         return 0;
707                 do_flush = req->wb_page != page || req->wb_context != ctx ||
708                         req->wb_lock_context->lockowner != current->files ||
709                         req->wb_lock_context->pid != current->tgid;
710                 nfs_release_request(req);
711                 if (!do_flush)
712                         return 0;
713                 status = nfs_wb_page(page->mapping->host, page);
714         } while (status == 0);
715         return status;
716 }
717
718 /*
719  * If the page cache is marked as unsafe or invalid, then we can't rely on
720  * the PageUptodate() flag. In this case, we will need to turn off
721  * write optimisations that depend on the page contents being correct.
722  */
723 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
724 {
725         return PageUptodate(page) &&
726                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
727 }
728
729 /*
730  * Update and possibly write a cached page of an NFS file.
731  *
732  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
733  * things with a page scheduled for an RPC call (e.g. invalidate it).
734  */
735 int nfs_updatepage(struct file *file, struct page *page,
736                 unsigned int offset, unsigned int count)
737 {
738         struct nfs_open_context *ctx = nfs_file_open_context(file);
739         struct inode    *inode = page->mapping->host;
740         int             status = 0;
741
742         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
743
744         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
745                 file->f_path.dentry->d_parent->d_name.name,
746                 file->f_path.dentry->d_name.name, count,
747                 (long long)(page_offset(page) + offset));
748
749         /* If we're not using byte range locks, and we know the page
750          * is up to date, it may be more efficient to extend the write
751          * to cover the entire page in order to avoid fragmentation
752          * inefficiencies.
753          */
754         if (nfs_write_pageuptodate(page, inode) &&
755                         inode->i_flock == NULL &&
756                         !(file->f_flags & O_DSYNC)) {
757                 count = max(count + offset, nfs_page_length(page));
758                 offset = 0;
759         }
760
761         status = nfs_writepage_setup(ctx, page, offset, count);
762         if (status < 0)
763                 nfs_set_pageerror(page);
764         else
765                 __set_page_dirty_nobuffers(page);
766
767         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
768                         status, (long long)i_size_read(inode));
769         return status;
770 }
771
772 static void nfs_writepage_release(struct nfs_page *req,
773                                   struct nfs_write_data *data)
774 {
775         struct page *page = req->wb_page;
776
777         if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
778                 nfs_inode_remove_request(req);
779         nfs_clear_page_tag_locked(req);
780         nfs_end_page_writeback(page);
781 }
782
783 static int flush_task_priority(int how)
784 {
785         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
786                 case FLUSH_HIGHPRI:
787                         return RPC_PRIORITY_HIGH;
788                 case FLUSH_LOWPRI:
789                         return RPC_PRIORITY_LOW;
790         }
791         return RPC_PRIORITY_NORMAL;
792 }
793
794 int nfs_initiate_write(struct nfs_write_data *data,
795                        struct rpc_clnt *clnt,
796                        const struct rpc_call_ops *call_ops,
797                        int how)
798 {
799         struct inode *inode = data->inode;
800         int priority = flush_task_priority(how);
801         struct rpc_task *task;
802         struct rpc_message msg = {
803                 .rpc_argp = &data->args,
804                 .rpc_resp = &data->res,
805                 .rpc_cred = data->cred,
806         };
807         struct rpc_task_setup task_setup_data = {
808                 .rpc_client = clnt,
809                 .task = &data->task,
810                 .rpc_message = &msg,
811                 .callback_ops = call_ops,
812                 .callback_data = data,
813                 .workqueue = nfsiod_workqueue,
814                 .flags = RPC_TASK_ASYNC,
815                 .priority = priority,
816         };
817         int ret = 0;
818
819         /* Set up the initial task struct.  */
820         NFS_PROTO(inode)->write_setup(data, &msg);
821
822         dprintk("NFS: %5u initiated write call "
823                 "(req %s/%lld, %u bytes @ offset %llu)\n",
824                 data->task.tk_pid,
825                 inode->i_sb->s_id,
826                 (long long)NFS_FILEID(inode),
827                 data->args.count,
828                 (unsigned long long)data->args.offset);
829
830         task = rpc_run_task(&task_setup_data);
831         if (IS_ERR(task)) {
832                 ret = PTR_ERR(task);
833                 goto out;
834         }
835         if (how & FLUSH_SYNC) {
836                 ret = rpc_wait_for_completion_task(task);
837                 if (ret == 0)
838                         ret = task->tk_status;
839         }
840         rpc_put_task(task);
841 out:
842         return ret;
843 }
844 EXPORT_SYMBOL_GPL(nfs_initiate_write);
845
846 /*
847  * Set up the argument/result storage required for the RPC call.
848  */
849 static void nfs_write_rpcsetup(struct nfs_page *req,
850                 struct nfs_write_data *data,
851                 unsigned int count, unsigned int offset,
852                 int how)
853 {
854         struct inode *inode = req->wb_context->dentry->d_inode;
855
856         /* Set up the RPC argument and reply structs
857          * NB: take care not to mess about with data->commit et al. */
858
859         data->req = req;
860         data->inode = inode = req->wb_context->dentry->d_inode;
861         data->cred = req->wb_context->cred;
862
863         data->args.fh     = NFS_FH(inode);
864         data->args.offset = req_offset(req) + offset;
865         /* pnfs_set_layoutcommit needs this */
866         data->mds_offset = data->args.offset;
867         data->args.pgbase = req->wb_pgbase + offset;
868         data->args.pages  = data->pagevec;
869         data->args.count  = count;
870         data->args.context = get_nfs_open_context(req->wb_context);
871         data->args.lock_context = req->wb_lock_context;
872         data->args.stable  = NFS_UNSTABLE;
873         switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
874         case 0:
875                 break;
876         case FLUSH_COND_STABLE:
877                 if (nfs_need_commit(NFS_I(inode)))
878                         break;
879         default:
880                 data->args.stable = NFS_FILE_SYNC;
881         }
882
883         data->res.fattr   = &data->fattr;
884         data->res.count   = count;
885         data->res.verf    = &data->verf;
886         nfs_fattr_init(&data->fattr);
887 }
888
889 static int nfs_do_write(struct nfs_write_data *data,
890                 const struct rpc_call_ops *call_ops,
891                 int how)
892 {
893         struct inode *inode = data->args.context->dentry->d_inode;
894
895         return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
896 }
897
898 static int nfs_do_multiple_writes(struct list_head *head,
899                 const struct rpc_call_ops *call_ops,
900                 int how)
901 {
902         struct nfs_write_data *data;
903         int ret = 0;
904
905         while (!list_empty(head)) {
906                 int ret2;
907
908                 data = list_entry(head->next, struct nfs_write_data, list);
909                 list_del_init(&data->list);
910                 
911                 ret2 = nfs_do_write(data, call_ops, how);
912                  if (ret == 0)
913                          ret = ret2;
914         }
915         return ret;
916 }
917
918 /* If a nfs_flush_* function fails, it should remove reqs from @head and
919  * call this on each, which will prepare them to be retried on next
920  * writeback using standard nfs.
921  */
922 static void nfs_redirty_request(struct nfs_page *req)
923 {
924         struct page *page = req->wb_page;
925
926         nfs_mark_request_dirty(req);
927         nfs_clear_page_tag_locked(req);
928         nfs_end_page_writeback(page);
929 }
930
931 /*
932  * Generate multiple small requests to write out a single
933  * contiguous dirty area on one page.
934  */
935 static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
936 {
937         struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
938         struct page *page = req->wb_page;
939         struct nfs_write_data *data;
940         size_t wsize = desc->pg_bsize, nbytes;
941         unsigned int offset;
942         int requests = 0;
943         int ret = 0;
944
945         nfs_list_remove_request(req);
946
947         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
948             (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
949              desc->pg_count > wsize))
950                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
951
952
953         offset = 0;
954         nbytes = desc->pg_count;
955         do {
956                 size_t len = min(nbytes, wsize);
957
958                 data = nfs_writedata_alloc(1);
959                 if (!data)
960                         goto out_bad;
961                 data->pagevec[0] = page;
962                 nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
963                 list_add(&data->list, res);
964                 requests++;
965                 nbytes -= len;
966                 offset += len;
967         } while (nbytes != 0);
968         atomic_set(&req->wb_complete, requests);
969         desc->pg_rpc_callops = &nfs_write_partial_ops;
970         return ret;
971
972 out_bad:
973         while (!list_empty(res)) {
974                 data = list_entry(res->next, struct nfs_write_data, list);
975                 list_del(&data->list);
976                 nfs_writedata_free(data);
977         }
978         nfs_redirty_request(req);
979         return -ENOMEM;
980 }
981
982 /*
983  * Create an RPC task for the given write request and kick it.
984  * The page must have been locked by the caller.
985  *
986  * It may happen that the page we're passed is not marked dirty.
987  * This is the case if nfs_updatepage detects a conflicting request
988  * that has been written but not committed.
989  */
990 static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
991 {
992         struct nfs_page         *req;
993         struct page             **pages;
994         struct nfs_write_data   *data;
995         struct list_head *head = &desc->pg_list;
996         int ret = 0;
997
998         data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
999                                                       desc->pg_count));
1000         if (!data) {
1001                 while (!list_empty(head)) {
1002                         req = nfs_list_entry(head->next);
1003                         nfs_list_remove_request(req);
1004                         nfs_redirty_request(req);
1005                 }
1006                 ret = -ENOMEM;
1007                 goto out;
1008         }
1009         pages = data->pagevec;
1010         while (!list_empty(head)) {
1011                 req = nfs_list_entry(head->next);
1012                 nfs_list_remove_request(req);
1013                 nfs_list_add_request(req, &data->pages);
1014                 *pages++ = req->wb_page;
1015         }
1016         req = nfs_list_entry(data->pages.next);
1017
1018         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1019             (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1020                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1021
1022         /* Set up the argument struct */
1023         nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
1024         list_add(&data->list, res);
1025         desc->pg_rpc_callops = &nfs_write_full_ops;
1026 out:
1027         return ret;
1028 }
1029
1030 int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
1031 {
1032         if (desc->pg_bsize < PAGE_CACHE_SIZE)
1033                 return nfs_flush_multi(desc, head);
1034         return nfs_flush_one(desc, head);
1035 }
1036
1037 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1038 {
1039         LIST_HEAD(head);
1040         int ret;
1041
1042         ret = nfs_generic_flush(desc, &head);
1043         if (ret == 0)
1044                 ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
1045                                 desc->pg_ioflags);
1046         return ret;
1047 }
1048
1049 static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1050         .pg_test = nfs_generic_pg_test,
1051         .pg_doio = nfs_generic_pg_writepages,
1052 };
1053
1054 static void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1055                                   struct inode *inode, int ioflags)
1056 {
1057         nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
1058                                 NFS_SERVER(inode)->wsize, ioflags);
1059 }
1060
1061 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1062 {
1063         pgio->pg_ops = &nfs_pageio_write_ops;
1064         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1065 }
1066 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1067
1068 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1069                                   struct inode *inode, int ioflags)
1070 {
1071         if (!pnfs_pageio_init_write(pgio, inode, ioflags))
1072                 nfs_pageio_init_write_mds(pgio, inode, ioflags);
1073 }
1074
1075 /*
1076  * Handle a write reply that flushed part of a page.
1077  */
1078 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1079 {
1080         struct nfs_write_data   *data = calldata;
1081
1082         dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1083                 task->tk_pid,
1084                 data->req->wb_context->dentry->d_inode->i_sb->s_id,
1085                 (long long)
1086                   NFS_FILEID(data->req->wb_context->dentry->d_inode),
1087                 data->req->wb_bytes, (long long)req_offset(data->req));
1088
1089         nfs_writeback_done(task, data);
1090 }
1091
1092 static void nfs_writeback_release_partial(void *calldata)
1093 {
1094         struct nfs_write_data   *data = calldata;
1095         struct nfs_page         *req = data->req;
1096         struct page             *page = req->wb_page;
1097         int status = data->task.tk_status;
1098
1099         if (status < 0) {
1100                 nfs_set_pageerror(page);
1101                 nfs_context_set_write_error(req->wb_context, status);
1102                 dprintk(", error = %d\n", status);
1103                 goto out;
1104         }
1105
1106         if (nfs_write_need_commit(data)) {
1107                 struct inode *inode = page->mapping->host;
1108
1109                 spin_lock(&inode->i_lock);
1110                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1111                         /* Do nothing we need to resend the writes */
1112                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1113                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1114                         dprintk(" defer commit\n");
1115                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1116                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1117                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1118                         dprintk(" server reboot detected\n");
1119                 }
1120                 spin_unlock(&inode->i_lock);
1121         } else
1122                 dprintk(" OK\n");
1123
1124 out:
1125         if (atomic_dec_and_test(&req->wb_complete))
1126                 nfs_writepage_release(req, data);
1127         nfs_writedata_release(calldata);
1128 }
1129
1130 #if defined(CONFIG_NFS_V4_1)
1131 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1132 {
1133         struct nfs_write_data *data = calldata;
1134
1135         if (nfs4_setup_sequence(NFS_SERVER(data->inode),
1136                                 &data->args.seq_args,
1137                                 &data->res.seq_res, 1, task))
1138                 return;
1139         rpc_call_start(task);
1140 }
1141 #endif /* CONFIG_NFS_V4_1 */
1142
1143 static const struct rpc_call_ops nfs_write_partial_ops = {
1144 #if defined(CONFIG_NFS_V4_1)
1145         .rpc_call_prepare = nfs_write_prepare,
1146 #endif /* CONFIG_NFS_V4_1 */
1147         .rpc_call_done = nfs_writeback_done_partial,
1148         .rpc_release = nfs_writeback_release_partial,
1149 };
1150
1151 /*
1152  * Handle a write reply that flushes a whole page.
1153  *
1154  * FIXME: There is an inherent race with invalidate_inode_pages and
1155  *        writebacks since the page->count is kept > 1 for as long
1156  *        as the page has a write request pending.
1157  */
1158 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1159 {
1160         struct nfs_write_data   *data = calldata;
1161
1162         nfs_writeback_done(task, data);
1163 }
1164
1165 static void nfs_writeback_release_full(void *calldata)
1166 {
1167         struct nfs_write_data   *data = calldata;
1168         int ret, status = data->task.tk_status;
1169         struct nfs_pageio_descriptor pgio;
1170
1171         if (data->pnfs_error) {
1172                 nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
1173                 pgio.pg_recoalesce = 1;
1174         }
1175
1176         /* Update attributes as result of writeback. */
1177         while (!list_empty(&data->pages)) {
1178                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1179                 struct page *page = req->wb_page;
1180
1181                 nfs_list_remove_request(req);
1182
1183                 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1184                         data->task.tk_pid,
1185                         req->wb_context->dentry->d_inode->i_sb->s_id,
1186                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1187                         req->wb_bytes,
1188                         (long long)req_offset(req));
1189
1190                 if (data->pnfs_error) {
1191                         dprintk(", pnfs error = %d\n", data->pnfs_error);
1192                         goto next;
1193                 }
1194
1195                 if (status < 0) {
1196                         nfs_set_pageerror(page);
1197                         nfs_context_set_write_error(req->wb_context, status);
1198                         dprintk(", error = %d\n", status);
1199                         goto remove_request;
1200                 }
1201
1202                 if (nfs_write_need_commit(data)) {
1203                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1204                         nfs_mark_request_commit(req, data->lseg);
1205                         dprintk(" marked for commit\n");
1206                         goto next;
1207                 }
1208                 dprintk(" OK\n");
1209 remove_request:
1210                 nfs_inode_remove_request(req);
1211         next:
1212                 nfs_clear_page_tag_locked(req);
1213                 nfs_end_page_writeback(page);
1214                 if (data->pnfs_error) {
1215                         lock_page(page);
1216                         nfs_pageio_cond_complete(&pgio, page->index);
1217                         ret = nfs_page_async_flush(&pgio, page, 0);
1218                         if (ret) {
1219                                 nfs_set_pageerror(page);
1220                                 dprintk("rewrite to MDS error = %d\n", ret);
1221                         }
1222                         unlock_page(page);
1223                 }
1224         }
1225         if (data->pnfs_error)
1226                 nfs_pageio_complete(&pgio);
1227         nfs_writedata_release(calldata);
1228 }
1229
1230 static const struct rpc_call_ops nfs_write_full_ops = {
1231 #if defined(CONFIG_NFS_V4_1)
1232         .rpc_call_prepare = nfs_write_prepare,
1233 #endif /* CONFIG_NFS_V4_1 */
1234         .rpc_call_done = nfs_writeback_done_full,
1235         .rpc_release = nfs_writeback_release_full,
1236 };
1237
1238
1239 /*
1240  * This function is called when the WRITE call is complete.
1241  */
1242 void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1243 {
1244         struct nfs_writeargs    *argp = &data->args;
1245         struct nfs_writeres     *resp = &data->res;
1246         int status;
1247
1248         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1249                 task->tk_pid, task->tk_status);
1250
1251         /*
1252          * ->write_done will attempt to use post-op attributes to detect
1253          * conflicting writes by other clients.  A strict interpretation
1254          * of close-to-open would allow us to continue caching even if
1255          * another writer had changed the file, but some applications
1256          * depend on tighter cache coherency when writing.
1257          */
1258         status = NFS_PROTO(data->inode)->write_done(task, data);
1259         if (status != 0)
1260                 return;
1261         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1262
1263 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1264         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1265                 /* We tried a write call, but the server did not
1266                  * commit data to stable storage even though we
1267                  * requested it.
1268                  * Note: There is a known bug in Tru64 < 5.0 in which
1269                  *       the server reports NFS_DATA_SYNC, but performs
1270                  *       NFS_FILE_SYNC. We therefore implement this checking
1271                  *       as a dprintk() in order to avoid filling syslog.
1272                  */
1273                 static unsigned long    complain;
1274
1275                 /* Note this will print the MDS for a DS write */
1276                 if (time_before(complain, jiffies)) {
1277                         dprintk("NFS:       faulty NFS server %s:"
1278                                 " (committed = %d) != (stable = %d)\n",
1279                                 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1280                                 resp->verf->committed, argp->stable);
1281                         complain = jiffies + 300 * HZ;
1282                 }
1283         }
1284 #endif
1285         /* Is this a short write? */
1286         if (task->tk_status >= 0 && resp->count < argp->count) {
1287                 static unsigned long    complain;
1288
1289                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1290
1291                 /* Has the server at least made some progress? */
1292                 if (resp->count != 0) {
1293                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1294                         if (resp->verf->committed != NFS_UNSTABLE) {
1295                                 /* Resend from where the server left off */
1296                                 data->mds_offset += resp->count;
1297                                 argp->offset += resp->count;
1298                                 argp->pgbase += resp->count;
1299                                 argp->count -= resp->count;
1300                         } else {
1301                                 /* Resend as a stable write in order to avoid
1302                                  * headaches in the case of a server crash.
1303                                  */
1304                                 argp->stable = NFS_FILE_SYNC;
1305                         }
1306                         rpc_restart_call_prepare(task);
1307                         return;
1308                 }
1309                 if (time_before(complain, jiffies)) {
1310                         printk(KERN_WARNING
1311                                "NFS: Server wrote zero bytes, expected %u.\n",
1312                                         argp->count);
1313                         complain = jiffies + 300 * HZ;
1314                 }
1315                 /* Can't do anything about it except throw an error. */
1316                 task->tk_status = -EIO;
1317         }
1318         return;
1319 }
1320
1321
1322 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1323 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1324 {
1325         int ret;
1326
1327         if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1328                 return 1;
1329         if (!may_wait)
1330                 return 0;
1331         ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1332                                 NFS_INO_COMMIT,
1333                                 nfs_wait_bit_killable,
1334                                 TASK_KILLABLE);
1335         return (ret < 0) ? ret : 1;
1336 }
1337
1338 void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1339 {
1340         clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1341         smp_mb__after_clear_bit();
1342         wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1343 }
1344 EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
1345
1346 void nfs_commitdata_release(void *data)
1347 {
1348         struct nfs_write_data *wdata = data;
1349
1350         put_lseg(wdata->lseg);
1351         put_nfs_open_context(wdata->args.context);
1352         nfs_commit_free(wdata);
1353 }
1354 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1355
1356 int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
1357                         const struct rpc_call_ops *call_ops,
1358                         int how)
1359 {
1360         struct rpc_task *task;
1361         int priority = flush_task_priority(how);
1362         struct rpc_message msg = {
1363                 .rpc_argp = &data->args,
1364                 .rpc_resp = &data->res,
1365                 .rpc_cred = data->cred,
1366         };
1367         struct rpc_task_setup task_setup_data = {
1368                 .task = &data->task,
1369                 .rpc_client = clnt,
1370                 .rpc_message = &msg,
1371                 .callback_ops = call_ops,
1372                 .callback_data = data,
1373                 .workqueue = nfsiod_workqueue,
1374                 .flags = RPC_TASK_ASYNC,
1375                 .priority = priority,
1376         };
1377         /* Set up the initial task struct.  */
1378         NFS_PROTO(data->inode)->commit_setup(data, &msg);
1379
1380         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1381
1382         task = rpc_run_task(&task_setup_data);
1383         if (IS_ERR(task))
1384                 return PTR_ERR(task);
1385         if (how & FLUSH_SYNC)
1386                 rpc_wait_for_completion_task(task);
1387         rpc_put_task(task);
1388         return 0;
1389 }
1390 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1391
1392 /*
1393  * Set up the argument/result storage required for the RPC call.
1394  */
1395 void nfs_init_commit(struct nfs_write_data *data,
1396                             struct list_head *head,
1397                             struct pnfs_layout_segment *lseg)
1398 {
1399         struct nfs_page *first = nfs_list_entry(head->next);
1400         struct inode *inode = first->wb_context->dentry->d_inode;
1401
1402         /* Set up the RPC argument and reply structs
1403          * NB: take care not to mess about with data->commit et al. */
1404
1405         list_splice_init(head, &data->pages);
1406
1407         data->inode       = inode;
1408         data->cred        = first->wb_context->cred;
1409         data->lseg        = lseg; /* reference transferred */
1410         data->mds_ops     = &nfs_commit_ops;
1411
1412         data->args.fh     = NFS_FH(data->inode);
1413         /* Note: we always request a commit of the entire inode */
1414         data->args.offset = 0;
1415         data->args.count  = 0;
1416         data->args.context = get_nfs_open_context(first->wb_context);
1417         data->res.count   = 0;
1418         data->res.fattr   = &data->fattr;
1419         data->res.verf    = &data->verf;
1420         nfs_fattr_init(&data->fattr);
1421 }
1422 EXPORT_SYMBOL_GPL(nfs_init_commit);
1423
1424 void nfs_retry_commit(struct list_head *page_list,
1425                       struct pnfs_layout_segment *lseg)
1426 {
1427         struct nfs_page *req;
1428
1429         while (!list_empty(page_list)) {
1430                 req = nfs_list_entry(page_list->next);
1431                 nfs_list_remove_request(req);
1432                 nfs_mark_request_commit(req, lseg);
1433                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1434                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1435                              BDI_RECLAIMABLE);
1436                 nfs_clear_page_tag_locked(req);
1437         }
1438 }
1439 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1440
1441 /*
1442  * Commit dirty pages
1443  */
1444 static int
1445 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1446 {
1447         struct nfs_write_data   *data;
1448
1449         data = nfs_commitdata_alloc();
1450
1451         if (!data)
1452                 goto out_bad;
1453
1454         /* Set up the argument struct */
1455         nfs_init_commit(data, head, NULL);
1456         return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
1457  out_bad:
1458         nfs_retry_commit(head, NULL);
1459         nfs_commit_clear_lock(NFS_I(inode));
1460         return -ENOMEM;
1461 }
1462
1463 /*
1464  * COMMIT call returned
1465  */
1466 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1467 {
1468         struct nfs_write_data   *data = calldata;
1469
1470         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1471                                 task->tk_pid, task->tk_status);
1472
1473         /* Call the NFS version-specific code */
1474         NFS_PROTO(data->inode)->commit_done(task, data);
1475 }
1476
1477 void nfs_commit_release_pages(struct nfs_write_data *data)
1478 {
1479         struct nfs_page *req;
1480         int status = data->task.tk_status;
1481
1482         while (!list_empty(&data->pages)) {
1483                 req = nfs_list_entry(data->pages.next);
1484                 nfs_list_remove_request(req);
1485                 nfs_clear_request_commit(req);
1486
1487                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1488                         req->wb_context->dentry->d_sb->s_id,
1489                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1490                         req->wb_bytes,
1491                         (long long)req_offset(req));
1492                 if (status < 0) {
1493                         nfs_context_set_write_error(req->wb_context, status);
1494                         nfs_inode_remove_request(req);
1495                         dprintk(", error = %d\n", status);
1496                         goto next;
1497                 }
1498
1499                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1500                  * returned by the server against all stored verfs. */
1501                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1502                         /* We have a match */
1503                         nfs_inode_remove_request(req);
1504                         dprintk(" OK\n");
1505                         goto next;
1506                 }
1507                 /* We have a mismatch. Write the page again */
1508                 dprintk(" mismatch\n");
1509                 nfs_mark_request_dirty(req);
1510         next:
1511                 nfs_clear_page_tag_locked(req);
1512         }
1513 }
1514 EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
1515
1516 static void nfs_commit_release(void *calldata)
1517 {
1518         struct nfs_write_data *data = calldata;
1519
1520         nfs_commit_release_pages(data);
1521         nfs_commit_clear_lock(NFS_I(data->inode));
1522         nfs_commitdata_release(calldata);
1523 }
1524
1525 static const struct rpc_call_ops nfs_commit_ops = {
1526 #if defined(CONFIG_NFS_V4_1)
1527         .rpc_call_prepare = nfs_write_prepare,
1528 #endif /* CONFIG_NFS_V4_1 */
1529         .rpc_call_done = nfs_commit_done,
1530         .rpc_release = nfs_commit_release,
1531 };
1532
1533 int nfs_commit_inode(struct inode *inode, int how)
1534 {
1535         LIST_HEAD(head);
1536         int may_wait = how & FLUSH_SYNC;
1537         int res;
1538
1539         res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1540         if (res <= 0)
1541                 goto out_mark_dirty;
1542         res = nfs_scan_commit(inode, &head, 0, 0);
1543         if (res) {
1544                 int error;
1545
1546                 error = pnfs_commit_list(inode, &head, how);
1547                 if (error == PNFS_NOT_ATTEMPTED)
1548                         error = nfs_commit_list(inode, &head, how);
1549                 if (error < 0)
1550                         return error;
1551                 if (!may_wait)
1552                         goto out_mark_dirty;
1553                 error = wait_on_bit(&NFS_I(inode)->flags,
1554                                 NFS_INO_COMMIT,
1555                                 nfs_wait_bit_killable,
1556                                 TASK_KILLABLE);
1557                 if (error < 0)
1558                         return error;
1559         } else
1560                 nfs_commit_clear_lock(NFS_I(inode));
1561         return res;
1562         /* Note: If we exit without ensuring that the commit is complete,
1563          * we must mark the inode as dirty. Otherwise, future calls to
1564          * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1565          * that the data is on the disk.
1566          */
1567 out_mark_dirty:
1568         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1569         return res;
1570 }
1571
1572 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1573 {
1574         struct nfs_inode *nfsi = NFS_I(inode);
1575         int flags = FLUSH_SYNC;
1576         int ret = 0;
1577
1578         /* no commits means nothing needs to be done */
1579         if (!nfsi->ncommit)
1580                 return ret;
1581
1582         if (wbc->sync_mode == WB_SYNC_NONE) {
1583                 /* Don't commit yet if this is a non-blocking flush and there
1584                  * are a lot of outstanding writes for this mapping.
1585                  */
1586                 if (nfsi->ncommit <= (nfsi->npages >> 1))
1587                         goto out_mark_dirty;
1588
1589                 /* don't wait for the COMMIT response */
1590                 flags = 0;
1591         }
1592
1593         ret = nfs_commit_inode(inode, flags);
1594         if (ret >= 0) {
1595                 if (wbc->sync_mode == WB_SYNC_NONE) {
1596                         if (ret < wbc->nr_to_write)
1597                                 wbc->nr_to_write -= ret;
1598                         else
1599                                 wbc->nr_to_write = 0;
1600                 }
1601                 return 0;
1602         }
1603 out_mark_dirty:
1604         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1605         return ret;
1606 }
1607 #else
1608 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1609 {
1610         return 0;
1611 }
1612 #endif
1613
1614 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1615 {
1616         int ret;
1617
1618         ret = nfs_commit_unstable_pages(inode, wbc);
1619         if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
1620                 int status;
1621                 bool sync = true;
1622
1623                 if (wbc->sync_mode == WB_SYNC_NONE)
1624                         sync = false;
1625
1626                 status = pnfs_layoutcommit_inode(inode, sync);
1627                 if (status < 0)
1628                         return status;
1629         }
1630         return ret;
1631 }
1632
1633 /*
1634  * flush the inode to disk.
1635  */
1636 int nfs_wb_all(struct inode *inode)
1637 {
1638         struct writeback_control wbc = {
1639                 .sync_mode = WB_SYNC_ALL,
1640                 .nr_to_write = LONG_MAX,
1641                 .range_start = 0,
1642                 .range_end = LLONG_MAX,
1643         };
1644
1645         return sync_inode(inode, &wbc);
1646 }
1647
1648 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1649 {
1650         struct nfs_page *req;
1651         int ret = 0;
1652
1653         BUG_ON(!PageLocked(page));
1654         for (;;) {
1655                 wait_on_page_writeback(page);
1656                 req = nfs_page_find_request(page);
1657                 if (req == NULL)
1658                         break;
1659                 if (nfs_lock_request_dontget(req)) {
1660                         nfs_inode_remove_request(req);
1661                         /*
1662                          * In case nfs_inode_remove_request has marked the
1663                          * page as being dirty
1664                          */
1665                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1666                         nfs_unlock_request(req);
1667                         break;
1668                 }
1669                 ret = nfs_wait_on_request(req);
1670                 nfs_release_request(req);
1671                 if (ret < 0)
1672                         break;
1673         }
1674         return ret;
1675 }
1676
1677 /*
1678  * Write back all requests on one page - we do this before reading it.
1679  */
1680 int nfs_wb_page(struct inode *inode, struct page *page)
1681 {
1682         loff_t range_start = page_offset(page);
1683         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1684         struct writeback_control wbc = {
1685                 .sync_mode = WB_SYNC_ALL,
1686                 .nr_to_write = 0,
1687                 .range_start = range_start,
1688                 .range_end = range_end,
1689         };
1690         int ret;
1691
1692         for (;;) {
1693                 wait_on_page_writeback(page);
1694                 if (clear_page_dirty_for_io(page)) {
1695                         ret = nfs_writepage_locked(page, &wbc);
1696                         if (ret < 0)
1697                                 goto out_error;
1698                         continue;
1699                 }
1700                 if (!PagePrivate(page))
1701                         break;
1702                 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1703                 if (ret < 0)
1704                         goto out_error;
1705         }
1706         return 0;
1707 out_error:
1708         return ret;
1709 }
1710
1711 #ifdef CONFIG_MIGRATION
1712 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1713                 struct page *page)
1714 {
1715         /*
1716          * If PagePrivate is set, then the page is currently associated with
1717          * an in-progress read or write request. Don't try to migrate it.
1718          *
1719          * FIXME: we could do this in principle, but we'll need a way to ensure
1720          *        that we can safely release the inode reference while holding
1721          *        the page lock.
1722          */
1723         if (PagePrivate(page))
1724                 return -EBUSY;
1725
1726         nfs_fscache_release_page(page, GFP_KERNEL);
1727
1728         return migrate_page(mapping, newpage, page);
1729 }
1730 #endif
1731
1732 int __init nfs_init_writepagecache(void)
1733 {
1734         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1735                                              sizeof(struct nfs_write_data),
1736                                              0, SLAB_HWCACHE_ALIGN,
1737                                              NULL);
1738         if (nfs_wdata_cachep == NULL)
1739                 return -ENOMEM;
1740
1741         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1742                                                      nfs_wdata_cachep);
1743         if (nfs_wdata_mempool == NULL)
1744                 return -ENOMEM;
1745
1746         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1747                                                       nfs_wdata_cachep);
1748         if (nfs_commit_mempool == NULL)
1749                 return -ENOMEM;
1750
1751         /*
1752          * NFS congestion size, scale with available memory.
1753          *
1754          *  64MB:    8192k
1755          * 128MB:   11585k
1756          * 256MB:   16384k
1757          * 512MB:   23170k
1758          *   1GB:   32768k
1759          *   2GB:   46340k
1760          *   4GB:   65536k
1761          *   8GB:   92681k
1762          *  16GB:  131072k
1763          *
1764          * This allows larger machines to have larger/more transfers.
1765          * Limit the default to 256M
1766          */
1767         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1768         if (nfs_congestion_kb > 256*1024)
1769                 nfs_congestion_kb = 256*1024;
1770
1771         return 0;
1772 }
1773
1774 void nfs_destroy_writepagecache(void)
1775 {
1776         mempool_destroy(nfs_commit_mempool);
1777         mempool_destroy(nfs_wdata_mempool);
1778         kmem_cache_destroy(nfs_wdata_cachep);
1779 }
1780