NFSv4.1: Clean ups and bugfixes for the pNFS read/writeback/commit code
[pandora-kernel.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24
25 #include <asm/uaccess.h>
26
27 #include "delegation.h"
28 #include "internal.h"
29 #include "iostat.h"
30 #include "nfs4_fs.h"
31 #include "fscache.h"
32 #include "pnfs.h"
33
34 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
35
36 #define MIN_POOL_WRITE          (32)
37 #define MIN_POOL_COMMIT         (4)
38
39 /*
40  * Local function declarations
41  */
42 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
43                                   struct inode *inode, int ioflags);
44 static void nfs_redirty_request(struct nfs_page *req);
45 static const struct rpc_call_ops nfs_write_partial_ops;
46 static const struct rpc_call_ops nfs_write_full_ops;
47 static const struct rpc_call_ops nfs_commit_ops;
48
49 static struct kmem_cache *nfs_wdata_cachep;
50 static mempool_t *nfs_wdata_mempool;
51 static mempool_t *nfs_commit_mempool;
52
53 struct nfs_write_data *nfs_commitdata_alloc(void)
54 {
55         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
56
57         if (p) {
58                 memset(p, 0, sizeof(*p));
59                 INIT_LIST_HEAD(&p->pages);
60         }
61         return p;
62 }
63 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
64
65 void nfs_commit_free(struct nfs_write_data *p)
66 {
67         if (p && (p->pagevec != &p->page_array[0]))
68                 kfree(p->pagevec);
69         mempool_free(p, nfs_commit_mempool);
70 }
71 EXPORT_SYMBOL_GPL(nfs_commit_free);
72
73 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
74 {
75         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
76
77         if (p) {
78                 memset(p, 0, sizeof(*p));
79                 INIT_LIST_HEAD(&p->pages);
80                 p->npages = pagecount;
81                 if (pagecount <= ARRAY_SIZE(p->page_array))
82                         p->pagevec = p->page_array;
83                 else {
84                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
85                         if (!p->pagevec) {
86                                 mempool_free(p, nfs_wdata_mempool);
87                                 p = NULL;
88                         }
89                 }
90         }
91         return p;
92 }
93
94 void nfs_writedata_free(struct nfs_write_data *p)
95 {
96         if (p && (p->pagevec != &p->page_array[0]))
97                 kfree(p->pagevec);
98         mempool_free(p, nfs_wdata_mempool);
99 }
100
101 void nfs_writedata_release(struct nfs_write_data *wdata)
102 {
103         put_nfs_open_context(wdata->args.context);
104         nfs_writedata_free(wdata);
105 }
106
107 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
108 {
109         ctx->error = error;
110         smp_wmb();
111         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
112 }
113
114 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
115 {
116         struct nfs_page *req = NULL;
117
118         if (PagePrivate(page)) {
119                 req = (struct nfs_page *)page_private(page);
120                 if (req != NULL)
121                         kref_get(&req->wb_kref);
122         }
123         return req;
124 }
125
126 static struct nfs_page *nfs_page_find_request(struct page *page)
127 {
128         struct inode *inode = page->mapping->host;
129         struct nfs_page *req = NULL;
130
131         spin_lock(&inode->i_lock);
132         req = nfs_page_find_request_locked(page);
133         spin_unlock(&inode->i_lock);
134         return req;
135 }
136
137 /* Adjust the file length if we're writing beyond the end */
138 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
139 {
140         struct inode *inode = page->mapping->host;
141         loff_t end, i_size;
142         pgoff_t end_index;
143
144         spin_lock(&inode->i_lock);
145         i_size = i_size_read(inode);
146         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
147         if (i_size > 0 && page->index < end_index)
148                 goto out;
149         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
150         if (i_size >= end)
151                 goto out;
152         i_size_write(inode, end);
153         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
154 out:
155         spin_unlock(&inode->i_lock);
156 }
157
158 /* A writeback failed: mark the page as bad, and invalidate the page cache */
159 static void nfs_set_pageerror(struct page *page)
160 {
161         SetPageError(page);
162         nfs_zap_mapping(page->mapping->host, page->mapping);
163 }
164
165 /* We can set the PG_uptodate flag if we see that a write request
166  * covers the full page.
167  */
168 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
169 {
170         if (PageUptodate(page))
171                 return;
172         if (base != 0)
173                 return;
174         if (count != nfs_page_length(page))
175                 return;
176         SetPageUptodate(page);
177 }
178
179 static int wb_priority(struct writeback_control *wbc)
180 {
181         if (wbc->for_reclaim)
182                 return FLUSH_HIGHPRI | FLUSH_STABLE;
183         if (wbc->for_kupdate || wbc->for_background)
184                 return FLUSH_LOWPRI | FLUSH_COND_STABLE;
185         return FLUSH_COND_STABLE;
186 }
187
188 /*
189  * NFS congestion control
190  */
191
192 int nfs_congestion_kb;
193
194 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
195 #define NFS_CONGESTION_OFF_THRESH       \
196         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
197
198 static int nfs_set_page_writeback(struct page *page)
199 {
200         int ret = test_set_page_writeback(page);
201
202         if (!ret) {
203                 struct inode *inode = page->mapping->host;
204                 struct nfs_server *nfss = NFS_SERVER(inode);
205
206                 page_cache_get(page);
207                 if (atomic_long_inc_return(&nfss->writeback) >
208                                 NFS_CONGESTION_ON_THRESH) {
209                         set_bdi_congested(&nfss->backing_dev_info,
210                                                 BLK_RW_ASYNC);
211                 }
212         }
213         return ret;
214 }
215
216 static void nfs_end_page_writeback(struct page *page)
217 {
218         struct inode *inode = page->mapping->host;
219         struct nfs_server *nfss = NFS_SERVER(inode);
220
221         end_page_writeback(page);
222         page_cache_release(page);
223         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
224                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
225 }
226
227 static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
228 {
229         struct inode *inode = page->mapping->host;
230         struct nfs_page *req;
231         int ret;
232
233         spin_lock(&inode->i_lock);
234         for (;;) {
235                 req = nfs_page_find_request_locked(page);
236                 if (req == NULL)
237                         break;
238                 if (nfs_lock_request_dontget(req))
239                         break;
240                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
241                  *       then the call to nfs_lock_request_dontget() will always
242                  *       succeed provided that someone hasn't already marked the
243                  *       request as dirty (in which case we don't care).
244                  */
245                 spin_unlock(&inode->i_lock);
246                 if (!nonblock)
247                         ret = nfs_wait_on_request(req);
248                 else
249                         ret = -EAGAIN;
250                 nfs_release_request(req);
251                 if (ret != 0)
252                         return ERR_PTR(ret);
253                 spin_lock(&inode->i_lock);
254         }
255         spin_unlock(&inode->i_lock);
256         return req;
257 }
258
259 /*
260  * Find an associated nfs write request, and prepare to flush it out
261  * May return an error if the user signalled nfs_wait_on_request().
262  */
263 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
264                                 struct page *page, bool nonblock)
265 {
266         struct nfs_page *req;
267         int ret = 0;
268
269         req = nfs_find_and_lock_request(page, nonblock);
270         if (!req)
271                 goto out;
272         ret = PTR_ERR(req);
273         if (IS_ERR(req))
274                 goto out;
275
276         ret = nfs_set_page_writeback(page);
277         BUG_ON(ret != 0);
278         BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
279
280         if (!nfs_pageio_add_request(pgio, req)) {
281                 nfs_redirty_request(req);
282                 ret = pgio->pg_error;
283         }
284 out:
285         return ret;
286 }
287
288 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
289 {
290         struct inode *inode = page->mapping->host;
291         int ret;
292
293         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
294         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
295
296         nfs_pageio_cond_complete(pgio, page->index);
297         ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
298         if (ret == -EAGAIN) {
299                 redirty_page_for_writepage(wbc, page);
300                 ret = 0;
301         }
302         return ret;
303 }
304
305 /*
306  * Write an mmapped page to the server.
307  */
308 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
309 {
310         struct nfs_pageio_descriptor pgio;
311         int err;
312
313         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
314         err = nfs_do_writepage(page, wbc, &pgio);
315         nfs_pageio_complete(&pgio);
316         if (err < 0)
317                 return err;
318         if (pgio.pg_error < 0)
319                 return pgio.pg_error;
320         return 0;
321 }
322
323 int nfs_writepage(struct page *page, struct writeback_control *wbc)
324 {
325         int ret;
326
327         ret = nfs_writepage_locked(page, wbc);
328         unlock_page(page);
329         return ret;
330 }
331
332 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
333 {
334         int ret;
335
336         ret = nfs_do_writepage(page, wbc, data);
337         unlock_page(page);
338         return ret;
339 }
340
341 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
342 {
343         struct inode *inode = mapping->host;
344         unsigned long *bitlock = &NFS_I(inode)->flags;
345         struct nfs_pageio_descriptor pgio;
346         int err;
347
348         /* Stop dirtying of new pages while we sync */
349         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
350                         nfs_wait_bit_killable, TASK_KILLABLE);
351         if (err)
352                 goto out_err;
353
354         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
355
356         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
357         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
358         nfs_pageio_complete(&pgio);
359
360         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
361         smp_mb__after_clear_bit();
362         wake_up_bit(bitlock, NFS_INO_FLUSHING);
363
364         if (err < 0)
365                 goto out_err;
366         err = pgio.pg_error;
367         if (err < 0)
368                 goto out_err;
369         return 0;
370 out_err:
371         return err;
372 }
373
374 /*
375  * Insert a write request into an inode
376  */
377 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
378 {
379         struct nfs_inode *nfsi = NFS_I(inode);
380
381         /* Lock the request! */
382         nfs_lock_request_dontget(req);
383
384         spin_lock(&inode->i_lock);
385         if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
386                 inode->i_version++;
387         set_bit(PG_MAPPED, &req->wb_flags);
388         SetPagePrivate(req->wb_page);
389         set_page_private(req->wb_page, (unsigned long)req);
390         nfsi->npages++;
391         kref_get(&req->wb_kref);
392         spin_unlock(&inode->i_lock);
393 }
394
395 /*
396  * Remove a write request from an inode
397  */
398 static void nfs_inode_remove_request(struct nfs_page *req)
399 {
400         struct inode *inode = req->wb_context->dentry->d_inode;
401         struct nfs_inode *nfsi = NFS_I(inode);
402
403         BUG_ON (!NFS_WBACK_BUSY(req));
404
405         spin_lock(&inode->i_lock);
406         set_page_private(req->wb_page, 0);
407         ClearPagePrivate(req->wb_page);
408         clear_bit(PG_MAPPED, &req->wb_flags);
409         nfsi->npages--;
410         spin_unlock(&inode->i_lock);
411         nfs_release_request(req);
412 }
413
414 static void
415 nfs_mark_request_dirty(struct nfs_page *req)
416 {
417         __set_page_dirty_nobuffers(req->wb_page);
418 }
419
420 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
421 /**
422  * nfs_request_add_commit_list - add request to a commit list
423  * @req: pointer to a struct nfs_page
424  * @head: commit list head
425  *
426  * This sets the PG_CLEAN bit, updates the inode global count of
427  * number of outstanding requests requiring a commit as well as
428  * the MM page stats.
429  *
430  * The caller must _not_ hold the inode->i_lock, but must be
431  * holding the nfs_page lock.
432  */
433 void
434 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
435 {
436         struct inode *inode = req->wb_context->dentry->d_inode;
437
438         set_bit(PG_CLEAN, &(req)->wb_flags);
439         spin_lock(&inode->i_lock);
440         nfs_list_add_request(req, head);
441         NFS_I(inode)->ncommit++;
442         spin_unlock(&inode->i_lock);
443         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
444         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
445         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
446 }
447 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
448
449 /**
450  * nfs_request_remove_commit_list - Remove request from a commit list
451  * @req: pointer to a nfs_page
452  *
453  * This clears the PG_CLEAN bit, and updates the inode global count of
454  * number of outstanding requests requiring a commit
455  * It does not update the MM page stats.
456  *
457  * The caller _must_ hold the inode->i_lock and the nfs_page lock.
458  */
459 void
460 nfs_request_remove_commit_list(struct nfs_page *req)
461 {
462         struct inode *inode = req->wb_context->dentry->d_inode;
463
464         if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
465                 return;
466         nfs_list_remove_request(req);
467         NFS_I(inode)->ncommit--;
468 }
469 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
470
471
472 /*
473  * Add a request to the inode's commit list.
474  */
475 static void
476 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
477 {
478         struct inode *inode = req->wb_context->dentry->d_inode;
479
480         if (pnfs_mark_request_commit(req, lseg))
481                 return;
482         nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
483 }
484
485 static void
486 nfs_clear_page_commit(struct page *page)
487 {
488         dec_zone_page_state(page, NR_UNSTABLE_NFS);
489         dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
490 }
491
492 static void
493 nfs_clear_request_commit(struct nfs_page *req)
494 {
495         if (test_bit(PG_CLEAN, &req->wb_flags)) {
496                 struct inode *inode = req->wb_context->dentry->d_inode;
497
498                 if (!pnfs_clear_request_commit(req)) {
499                         spin_lock(&inode->i_lock);
500                         nfs_request_remove_commit_list(req);
501                         spin_unlock(&inode->i_lock);
502                 }
503                 nfs_clear_page_commit(req->wb_page);
504         }
505 }
506
507 static inline
508 int nfs_write_need_commit(struct nfs_write_data *data)
509 {
510         if (data->verf.committed == NFS_DATA_SYNC)
511                 return data->lseg == NULL;
512         else
513                 return data->verf.committed != NFS_FILE_SYNC;
514 }
515
516 static inline
517 int nfs_reschedule_unstable_write(struct nfs_page *req,
518                                   struct nfs_write_data *data)
519 {
520         if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
521                 nfs_mark_request_commit(req, data->lseg);
522                 return 1;
523         }
524         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
525                 nfs_mark_request_dirty(req);
526                 return 1;
527         }
528         return 0;
529 }
530 #else
531 static void
532 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
533 {
534 }
535
536 static void
537 nfs_clear_request_commit(struct nfs_page *req)
538 {
539 }
540
541 static inline
542 int nfs_write_need_commit(struct nfs_write_data *data)
543 {
544         return 0;
545 }
546
547 static inline
548 int nfs_reschedule_unstable_write(struct nfs_page *req,
549                                   struct nfs_write_data *data)
550 {
551         return 0;
552 }
553 #endif
554
555 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
556 static int
557 nfs_need_commit(struct nfs_inode *nfsi)
558 {
559         return nfsi->ncommit > 0;
560 }
561
562 /* i_lock held by caller */
563 static int
564 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max)
565 {
566         struct nfs_page *req, *tmp;
567         int ret = 0;
568
569         list_for_each_entry_safe(req, tmp, src, wb_list) {
570                 if (!nfs_lock_request(req))
571                         continue;
572                 nfs_request_remove_commit_list(req);
573                 nfs_list_add_request(req, dst);
574                 ret++;
575                 if (ret == max)
576                         break;
577         }
578         return ret;
579 }
580
581 /*
582  * nfs_scan_commit - Scan an inode for commit requests
583  * @inode: NFS inode to scan
584  * @dst: destination list
585  *
586  * Moves requests from the inode's 'commit' request list.
587  * The requests are *not* checked to ensure that they form a contiguous set.
588  */
589 static int
590 nfs_scan_commit(struct inode *inode, struct list_head *dst)
591 {
592         struct nfs_inode *nfsi = NFS_I(inode);
593         int ret = 0;
594
595         spin_lock(&inode->i_lock);
596         if (nfsi->ncommit > 0) {
597                 const int max = INT_MAX;
598                 int pnfs_ret;
599
600                 ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max);
601                 pnfs_ret = pnfs_scan_commit_lists(inode, max - ret);
602                 ret += pnfs_ret;
603                 nfsi->ncommit -= ret;
604         }
605         spin_unlock(&inode->i_lock);
606         return ret;
607 }
608
609 #else
610 static inline int nfs_need_commit(struct nfs_inode *nfsi)
611 {
612         return 0;
613 }
614
615 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
616 {
617         return 0;
618 }
619 #endif
620
621 /*
622  * Search for an existing write request, and attempt to update
623  * it to reflect a new dirty region on a given page.
624  *
625  * If the attempt fails, then the existing request is flushed out
626  * to disk.
627  */
628 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
629                 struct page *page,
630                 unsigned int offset,
631                 unsigned int bytes)
632 {
633         struct nfs_page *req;
634         unsigned int rqend;
635         unsigned int end;
636         int error;
637
638         if (!PagePrivate(page))
639                 return NULL;
640
641         end = offset + bytes;
642         spin_lock(&inode->i_lock);
643
644         for (;;) {
645                 req = nfs_page_find_request_locked(page);
646                 if (req == NULL)
647                         goto out_unlock;
648
649                 rqend = req->wb_offset + req->wb_bytes;
650                 /*
651                  * Tell the caller to flush out the request if
652                  * the offsets are non-contiguous.
653                  * Note: nfs_flush_incompatible() will already
654                  * have flushed out requests having wrong owners.
655                  */
656                 if (offset > rqend
657                     || end < req->wb_offset)
658                         goto out_flushme;
659
660                 if (nfs_lock_request_dontget(req))
661                         break;
662
663                 /* The request is locked, so wait and then retry */
664                 spin_unlock(&inode->i_lock);
665                 error = nfs_wait_on_request(req);
666                 nfs_release_request(req);
667                 if (error != 0)
668                         goto out_err;
669                 spin_lock(&inode->i_lock);
670         }
671
672         /* Okay, the request matches. Update the region */
673         if (offset < req->wb_offset) {
674                 req->wb_offset = offset;
675                 req->wb_pgbase = offset;
676         }
677         if (end > rqend)
678                 req->wb_bytes = end - req->wb_offset;
679         else
680                 req->wb_bytes = rqend - req->wb_offset;
681 out_unlock:
682         spin_unlock(&inode->i_lock);
683         nfs_clear_request_commit(req);
684         return req;
685 out_flushme:
686         spin_unlock(&inode->i_lock);
687         nfs_release_request(req);
688         error = nfs_wb_page(inode, page);
689 out_err:
690         return ERR_PTR(error);
691 }
692
693 /*
694  * Try to update an existing write request, or create one if there is none.
695  *
696  * Note: Should always be called with the Page Lock held to prevent races
697  * if we have to add a new request. Also assumes that the caller has
698  * already called nfs_flush_incompatible() if necessary.
699  */
700 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
701                 struct page *page, unsigned int offset, unsigned int bytes)
702 {
703         struct inode *inode = page->mapping->host;
704         struct nfs_page *req;
705
706         req = nfs_try_to_update_request(inode, page, offset, bytes);
707         if (req != NULL)
708                 goto out;
709         req = nfs_create_request(ctx, inode, page, offset, bytes);
710         if (IS_ERR(req))
711                 goto out;
712         nfs_inode_add_request(inode, req);
713 out:
714         return req;
715 }
716
717 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
718                 unsigned int offset, unsigned int count)
719 {
720         struct nfs_page *req;
721
722         req = nfs_setup_write_request(ctx, page, offset, count);
723         if (IS_ERR(req))
724                 return PTR_ERR(req);
725         /* Update file length */
726         nfs_grow_file(page, offset, count);
727         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
728         nfs_mark_request_dirty(req);
729         nfs_unlock_request(req);
730         return 0;
731 }
732
733 int nfs_flush_incompatible(struct file *file, struct page *page)
734 {
735         struct nfs_open_context *ctx = nfs_file_open_context(file);
736         struct nfs_page *req;
737         int do_flush, status;
738         /*
739          * Look for a request corresponding to this page. If there
740          * is one, and it belongs to another file, we flush it out
741          * before we try to copy anything into the page. Do this
742          * due to the lack of an ACCESS-type call in NFSv2.
743          * Also do the same if we find a request from an existing
744          * dropped page.
745          */
746         do {
747                 req = nfs_page_find_request(page);
748                 if (req == NULL)
749                         return 0;
750                 do_flush = req->wb_page != page || req->wb_context != ctx ||
751                         req->wb_lock_context->lockowner != current->files ||
752                         req->wb_lock_context->pid != current->tgid;
753                 nfs_release_request(req);
754                 if (!do_flush)
755                         return 0;
756                 status = nfs_wb_page(page->mapping->host, page);
757         } while (status == 0);
758         return status;
759 }
760
761 /*
762  * If the page cache is marked as unsafe or invalid, then we can't rely on
763  * the PageUptodate() flag. In this case, we will need to turn off
764  * write optimisations that depend on the page contents being correct.
765  */
766 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
767 {
768         return PageUptodate(page) &&
769                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
770 }
771
772 /*
773  * Update and possibly write a cached page of an NFS file.
774  *
775  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
776  * things with a page scheduled for an RPC call (e.g. invalidate it).
777  */
778 int nfs_updatepage(struct file *file, struct page *page,
779                 unsigned int offset, unsigned int count)
780 {
781         struct nfs_open_context *ctx = nfs_file_open_context(file);
782         struct inode    *inode = page->mapping->host;
783         int             status = 0;
784
785         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
786
787         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
788                 file->f_path.dentry->d_parent->d_name.name,
789                 file->f_path.dentry->d_name.name, count,
790                 (long long)(page_offset(page) + offset));
791
792         /* If we're not using byte range locks, and we know the page
793          * is up to date, it may be more efficient to extend the write
794          * to cover the entire page in order to avoid fragmentation
795          * inefficiencies.
796          */
797         if (nfs_write_pageuptodate(page, inode) &&
798                         inode->i_flock == NULL &&
799                         !(file->f_flags & O_DSYNC)) {
800                 count = max(count + offset, nfs_page_length(page));
801                 offset = 0;
802         }
803
804         status = nfs_writepage_setup(ctx, page, offset, count);
805         if (status < 0)
806                 nfs_set_pageerror(page);
807         else
808                 __set_page_dirty_nobuffers(page);
809
810         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
811                         status, (long long)i_size_read(inode));
812         return status;
813 }
814
815 static void nfs_writepage_release(struct nfs_page *req,
816                                   struct nfs_write_data *data)
817 {
818         struct page *page = req->wb_page;
819
820         if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
821                 nfs_inode_remove_request(req);
822         nfs_unlock_request(req);
823         nfs_end_page_writeback(page);
824 }
825
826 static int flush_task_priority(int how)
827 {
828         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
829                 case FLUSH_HIGHPRI:
830                         return RPC_PRIORITY_HIGH;
831                 case FLUSH_LOWPRI:
832                         return RPC_PRIORITY_LOW;
833         }
834         return RPC_PRIORITY_NORMAL;
835 }
836
837 int nfs_initiate_write(struct nfs_write_data *data,
838                        struct rpc_clnt *clnt,
839                        const struct rpc_call_ops *call_ops,
840                        int how)
841 {
842         struct inode *inode = data->inode;
843         int priority = flush_task_priority(how);
844         struct rpc_task *task;
845         struct rpc_message msg = {
846                 .rpc_argp = &data->args,
847                 .rpc_resp = &data->res,
848                 .rpc_cred = data->cred,
849         };
850         struct rpc_task_setup task_setup_data = {
851                 .rpc_client = clnt,
852                 .task = &data->task,
853                 .rpc_message = &msg,
854                 .callback_ops = call_ops,
855                 .callback_data = data,
856                 .workqueue = nfsiod_workqueue,
857                 .flags = RPC_TASK_ASYNC,
858                 .priority = priority,
859         };
860         int ret = 0;
861
862         /* Set up the initial task struct.  */
863         NFS_PROTO(inode)->write_setup(data, &msg);
864
865         dprintk("NFS: %5u initiated write call "
866                 "(req %s/%lld, %u bytes @ offset %llu)\n",
867                 data->task.tk_pid,
868                 inode->i_sb->s_id,
869                 (long long)NFS_FILEID(inode),
870                 data->args.count,
871                 (unsigned long long)data->args.offset);
872
873         task = rpc_run_task(&task_setup_data);
874         if (IS_ERR(task)) {
875                 ret = PTR_ERR(task);
876                 goto out;
877         }
878         if (how & FLUSH_SYNC) {
879                 ret = rpc_wait_for_completion_task(task);
880                 if (ret == 0)
881                         ret = task->tk_status;
882         }
883         rpc_put_task(task);
884 out:
885         return ret;
886 }
887 EXPORT_SYMBOL_GPL(nfs_initiate_write);
888
889 /*
890  * Set up the argument/result storage required for the RPC call.
891  */
892 static void nfs_write_rpcsetup(struct nfs_page *req,
893                 struct nfs_write_data *data,
894                 unsigned int count, unsigned int offset,
895                 int how)
896 {
897         struct inode *inode = req->wb_context->dentry->d_inode;
898
899         /* Set up the RPC argument and reply structs
900          * NB: take care not to mess about with data->commit et al. */
901
902         data->req = req;
903         data->inode = inode = req->wb_context->dentry->d_inode;
904         data->cred = req->wb_context->cred;
905
906         data->args.fh     = NFS_FH(inode);
907         data->args.offset = req_offset(req) + offset;
908         /* pnfs_set_layoutcommit needs this */
909         data->mds_offset = data->args.offset;
910         data->args.pgbase = req->wb_pgbase + offset;
911         data->args.pages  = data->pagevec;
912         data->args.count  = count;
913         data->args.context = get_nfs_open_context(req->wb_context);
914         data->args.lock_context = req->wb_lock_context;
915         data->args.stable  = NFS_UNSTABLE;
916         switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
917         case 0:
918                 break;
919         case FLUSH_COND_STABLE:
920                 if (nfs_need_commit(NFS_I(inode)))
921                         break;
922         default:
923                 data->args.stable = NFS_FILE_SYNC;
924         }
925
926         data->res.fattr   = &data->fattr;
927         data->res.count   = count;
928         data->res.verf    = &data->verf;
929         nfs_fattr_init(&data->fattr);
930 }
931
932 static int nfs_do_write(struct nfs_write_data *data,
933                 const struct rpc_call_ops *call_ops,
934                 int how)
935 {
936         struct inode *inode = data->args.context->dentry->d_inode;
937
938         return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
939 }
940
941 static int nfs_do_multiple_writes(struct list_head *head,
942                 const struct rpc_call_ops *call_ops,
943                 int how)
944 {
945         struct nfs_write_data *data;
946         int ret = 0;
947
948         while (!list_empty(head)) {
949                 int ret2;
950
951                 data = list_entry(head->next, struct nfs_write_data, list);
952                 list_del_init(&data->list);
953                 
954                 ret2 = nfs_do_write(data, call_ops, how);
955                  if (ret == 0)
956                          ret = ret2;
957         }
958         return ret;
959 }
960
961 /* If a nfs_flush_* function fails, it should remove reqs from @head and
962  * call this on each, which will prepare them to be retried on next
963  * writeback using standard nfs.
964  */
965 static void nfs_redirty_request(struct nfs_page *req)
966 {
967         struct page *page = req->wb_page;
968
969         nfs_mark_request_dirty(req);
970         nfs_unlock_request(req);
971         nfs_end_page_writeback(page);
972 }
973
974 /*
975  * Generate multiple small requests to write out a single
976  * contiguous dirty area on one page.
977  */
978 static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
979 {
980         struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
981         struct page *page = req->wb_page;
982         struct nfs_write_data *data;
983         size_t wsize = desc->pg_bsize, nbytes;
984         unsigned int offset;
985         int requests = 0;
986         int ret = 0;
987
988         nfs_list_remove_request(req);
989
990         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
991             (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
992              desc->pg_count > wsize))
993                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
994
995
996         offset = 0;
997         nbytes = desc->pg_count;
998         do {
999                 size_t len = min(nbytes, wsize);
1000
1001                 data = nfs_writedata_alloc(1);
1002                 if (!data)
1003                         goto out_bad;
1004                 data->pagevec[0] = page;
1005                 nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
1006                 list_add(&data->list, res);
1007                 requests++;
1008                 nbytes -= len;
1009                 offset += len;
1010         } while (nbytes != 0);
1011         atomic_set(&req->wb_complete, requests);
1012         desc->pg_rpc_callops = &nfs_write_partial_ops;
1013         return ret;
1014
1015 out_bad:
1016         while (!list_empty(res)) {
1017                 data = list_entry(res->next, struct nfs_write_data, list);
1018                 list_del(&data->list);
1019                 nfs_writedata_free(data);
1020         }
1021         nfs_redirty_request(req);
1022         return -ENOMEM;
1023 }
1024
1025 /*
1026  * Create an RPC task for the given write request and kick it.
1027  * The page must have been locked by the caller.
1028  *
1029  * It may happen that the page we're passed is not marked dirty.
1030  * This is the case if nfs_updatepage detects a conflicting request
1031  * that has been written but not committed.
1032  */
1033 static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
1034 {
1035         struct nfs_page         *req;
1036         struct page             **pages;
1037         struct nfs_write_data   *data;
1038         struct list_head *head = &desc->pg_list;
1039         int ret = 0;
1040
1041         data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
1042                                                       desc->pg_count));
1043         if (!data) {
1044                 while (!list_empty(head)) {
1045                         req = nfs_list_entry(head->next);
1046                         nfs_list_remove_request(req);
1047                         nfs_redirty_request(req);
1048                 }
1049                 ret = -ENOMEM;
1050                 goto out;
1051         }
1052         pages = data->pagevec;
1053         while (!list_empty(head)) {
1054                 req = nfs_list_entry(head->next);
1055                 nfs_list_remove_request(req);
1056                 nfs_list_add_request(req, &data->pages);
1057                 *pages++ = req->wb_page;
1058         }
1059         req = nfs_list_entry(data->pages.next);
1060
1061         if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1062             (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1063                 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1064
1065         /* Set up the argument struct */
1066         nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
1067         list_add(&data->list, res);
1068         desc->pg_rpc_callops = &nfs_write_full_ops;
1069 out:
1070         return ret;
1071 }
1072
1073 int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
1074 {
1075         if (desc->pg_bsize < PAGE_CACHE_SIZE)
1076                 return nfs_flush_multi(desc, head);
1077         return nfs_flush_one(desc, head);
1078 }
1079
1080 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1081 {
1082         LIST_HEAD(head);
1083         int ret;
1084
1085         ret = nfs_generic_flush(desc, &head);
1086         if (ret == 0)
1087                 ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
1088                                 desc->pg_ioflags);
1089         return ret;
1090 }
1091
1092 static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1093         .pg_test = nfs_generic_pg_test,
1094         .pg_doio = nfs_generic_pg_writepages,
1095 };
1096
1097 void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1098                                   struct inode *inode, int ioflags)
1099 {
1100         nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
1101                                 NFS_SERVER(inode)->wsize, ioflags);
1102 }
1103
1104 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1105 {
1106         pgio->pg_ops = &nfs_pageio_write_ops;
1107         pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1108 }
1109 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1110
1111 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1112                                   struct inode *inode, int ioflags)
1113 {
1114         if (!pnfs_pageio_init_write(pgio, inode, ioflags))
1115                 nfs_pageio_init_write_mds(pgio, inode, ioflags);
1116 }
1117
1118 /*
1119  * Handle a write reply that flushed part of a page.
1120  */
1121 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1122 {
1123         struct nfs_write_data   *data = calldata;
1124
1125         dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1126                 task->tk_pid,
1127                 data->req->wb_context->dentry->d_inode->i_sb->s_id,
1128                 (long long)
1129                   NFS_FILEID(data->req->wb_context->dentry->d_inode),
1130                 data->req->wb_bytes, (long long)req_offset(data->req));
1131
1132         nfs_writeback_done(task, data);
1133 }
1134
1135 static void nfs_writeback_release_partial(void *calldata)
1136 {
1137         struct nfs_write_data   *data = calldata;
1138         struct nfs_page         *req = data->req;
1139         struct page             *page = req->wb_page;
1140         int status = data->task.tk_status;
1141
1142         if (status < 0) {
1143                 nfs_set_pageerror(page);
1144                 nfs_context_set_write_error(req->wb_context, status);
1145                 dprintk(", error = %d\n", status);
1146                 goto out;
1147         }
1148
1149         if (nfs_write_need_commit(data)) {
1150                 struct inode *inode = page->mapping->host;
1151
1152                 spin_lock(&inode->i_lock);
1153                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1154                         /* Do nothing we need to resend the writes */
1155                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1156                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1157                         dprintk(" defer commit\n");
1158                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1159                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1160                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1161                         dprintk(" server reboot detected\n");
1162                 }
1163                 spin_unlock(&inode->i_lock);
1164         } else
1165                 dprintk(" OK\n");
1166
1167 out:
1168         if (atomic_dec_and_test(&req->wb_complete))
1169                 nfs_writepage_release(req, data);
1170         nfs_writedata_release(calldata);
1171 }
1172
1173 #if defined(CONFIG_NFS_V4_1)
1174 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1175 {
1176         struct nfs_write_data *data = calldata;
1177
1178         if (nfs4_setup_sequence(NFS_SERVER(data->inode),
1179                                 &data->args.seq_args,
1180                                 &data->res.seq_res, task))
1181                 return;
1182         rpc_call_start(task);
1183 }
1184 #endif /* CONFIG_NFS_V4_1 */
1185
1186 static const struct rpc_call_ops nfs_write_partial_ops = {
1187 #if defined(CONFIG_NFS_V4_1)
1188         .rpc_call_prepare = nfs_write_prepare,
1189 #endif /* CONFIG_NFS_V4_1 */
1190         .rpc_call_done = nfs_writeback_done_partial,
1191         .rpc_release = nfs_writeback_release_partial,
1192 };
1193
1194 /*
1195  * Handle a write reply that flushes a whole page.
1196  *
1197  * FIXME: There is an inherent race with invalidate_inode_pages and
1198  *        writebacks since the page->count is kept > 1 for as long
1199  *        as the page has a write request pending.
1200  */
1201 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1202 {
1203         struct nfs_write_data   *data = calldata;
1204
1205         nfs_writeback_done(task, data);
1206 }
1207
1208 static void nfs_writeback_release_full(void *calldata)
1209 {
1210         struct nfs_write_data   *data = calldata;
1211         int status = data->task.tk_status;
1212
1213         /* Update attributes as result of writeback. */
1214         while (!list_empty(&data->pages)) {
1215                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1216                 struct page *page = req->wb_page;
1217
1218                 nfs_list_remove_request(req);
1219
1220                 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1221                         data->task.tk_pid,
1222                         req->wb_context->dentry->d_inode->i_sb->s_id,
1223                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1224                         req->wb_bytes,
1225                         (long long)req_offset(req));
1226
1227                 if (status < 0) {
1228                         nfs_set_pageerror(page);
1229                         nfs_context_set_write_error(req->wb_context, status);
1230                         dprintk(", error = %d\n", status);
1231                         goto remove_request;
1232                 }
1233
1234                 if (nfs_write_need_commit(data)) {
1235                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1236                         nfs_mark_request_commit(req, data->lseg);
1237                         dprintk(" marked for commit\n");
1238                         goto next;
1239                 }
1240                 dprintk(" OK\n");
1241 remove_request:
1242                 nfs_inode_remove_request(req);
1243         next:
1244                 nfs_unlock_request(req);
1245                 nfs_end_page_writeback(page);
1246         }
1247         nfs_writedata_release(calldata);
1248 }
1249
1250 static const struct rpc_call_ops nfs_write_full_ops = {
1251 #if defined(CONFIG_NFS_V4_1)
1252         .rpc_call_prepare = nfs_write_prepare,
1253 #endif /* CONFIG_NFS_V4_1 */
1254         .rpc_call_done = nfs_writeback_done_full,
1255         .rpc_release = nfs_writeback_release_full,
1256 };
1257
1258
1259 /*
1260  * This function is called when the WRITE call is complete.
1261  */
1262 void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1263 {
1264         struct nfs_writeargs    *argp = &data->args;
1265         struct nfs_writeres     *resp = &data->res;
1266         int status;
1267
1268         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1269                 task->tk_pid, task->tk_status);
1270
1271         /*
1272          * ->write_done will attempt to use post-op attributes to detect
1273          * conflicting writes by other clients.  A strict interpretation
1274          * of close-to-open would allow us to continue caching even if
1275          * another writer had changed the file, but some applications
1276          * depend on tighter cache coherency when writing.
1277          */
1278         status = NFS_PROTO(data->inode)->write_done(task, data);
1279         if (status != 0)
1280                 return;
1281         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1282
1283 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1284         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1285                 /* We tried a write call, but the server did not
1286                  * commit data to stable storage even though we
1287                  * requested it.
1288                  * Note: There is a known bug in Tru64 < 5.0 in which
1289                  *       the server reports NFS_DATA_SYNC, but performs
1290                  *       NFS_FILE_SYNC. We therefore implement this checking
1291                  *       as a dprintk() in order to avoid filling syslog.
1292                  */
1293                 static unsigned long    complain;
1294
1295                 /* Note this will print the MDS for a DS write */
1296                 if (time_before(complain, jiffies)) {
1297                         dprintk("NFS:       faulty NFS server %s:"
1298                                 " (committed = %d) != (stable = %d)\n",
1299                                 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1300                                 resp->verf->committed, argp->stable);
1301                         complain = jiffies + 300 * HZ;
1302                 }
1303         }
1304 #endif
1305         /* Is this a short write? */
1306         if (task->tk_status >= 0 && resp->count < argp->count) {
1307                 static unsigned long    complain;
1308
1309                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1310
1311                 /* Has the server at least made some progress? */
1312                 if (resp->count != 0) {
1313                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1314                         if (resp->verf->committed != NFS_UNSTABLE) {
1315                                 /* Resend from where the server left off */
1316                                 data->mds_offset += resp->count;
1317                                 argp->offset += resp->count;
1318                                 argp->pgbase += resp->count;
1319                                 argp->count -= resp->count;
1320                         } else {
1321                                 /* Resend as a stable write in order to avoid
1322                                  * headaches in the case of a server crash.
1323                                  */
1324                                 argp->stable = NFS_FILE_SYNC;
1325                         }
1326                         rpc_restart_call_prepare(task);
1327                         return;
1328                 }
1329                 if (time_before(complain, jiffies)) {
1330                         printk(KERN_WARNING
1331                                "NFS: Server wrote zero bytes, expected %u.\n",
1332                                         argp->count);
1333                         complain = jiffies + 300 * HZ;
1334                 }
1335                 /* Can't do anything about it except throw an error. */
1336                 task->tk_status = -EIO;
1337         }
1338         return;
1339 }
1340
1341
1342 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1343 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1344 {
1345         int ret;
1346
1347         if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1348                 return 1;
1349         if (!may_wait)
1350                 return 0;
1351         ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1352                                 NFS_INO_COMMIT,
1353                                 nfs_wait_bit_killable,
1354                                 TASK_KILLABLE);
1355         return (ret < 0) ? ret : 1;
1356 }
1357
1358 void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1359 {
1360         clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1361         smp_mb__after_clear_bit();
1362         wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1363 }
1364 EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
1365
1366 void nfs_commitdata_release(void *data)
1367 {
1368         struct nfs_write_data *wdata = data;
1369
1370         put_nfs_open_context(wdata->args.context);
1371         nfs_commit_free(wdata);
1372 }
1373 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1374
1375 int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
1376                         const struct rpc_call_ops *call_ops,
1377                         int how)
1378 {
1379         struct rpc_task *task;
1380         int priority = flush_task_priority(how);
1381         struct rpc_message msg = {
1382                 .rpc_argp = &data->args,
1383                 .rpc_resp = &data->res,
1384                 .rpc_cred = data->cred,
1385         };
1386         struct rpc_task_setup task_setup_data = {
1387                 .task = &data->task,
1388                 .rpc_client = clnt,
1389                 .rpc_message = &msg,
1390                 .callback_ops = call_ops,
1391                 .callback_data = data,
1392                 .workqueue = nfsiod_workqueue,
1393                 .flags = RPC_TASK_ASYNC,
1394                 .priority = priority,
1395         };
1396         /* Set up the initial task struct.  */
1397         NFS_PROTO(data->inode)->commit_setup(data, &msg);
1398
1399         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1400
1401         task = rpc_run_task(&task_setup_data);
1402         if (IS_ERR(task))
1403                 return PTR_ERR(task);
1404         if (how & FLUSH_SYNC)
1405                 rpc_wait_for_completion_task(task);
1406         rpc_put_task(task);
1407         return 0;
1408 }
1409 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1410
1411 /*
1412  * Set up the argument/result storage required for the RPC call.
1413  */
1414 void nfs_init_commit(struct nfs_write_data *data,
1415                             struct list_head *head,
1416                             struct pnfs_layout_segment *lseg)
1417 {
1418         struct nfs_page *first = nfs_list_entry(head->next);
1419         struct inode *inode = first->wb_context->dentry->d_inode;
1420
1421         /* Set up the RPC argument and reply structs
1422          * NB: take care not to mess about with data->commit et al. */
1423
1424         list_splice_init(head, &data->pages);
1425
1426         data->inode       = inode;
1427         data->cred        = first->wb_context->cred;
1428         data->lseg        = lseg; /* reference transferred */
1429         data->mds_ops     = &nfs_commit_ops;
1430
1431         data->args.fh     = NFS_FH(data->inode);
1432         /* Note: we always request a commit of the entire inode */
1433         data->args.offset = 0;
1434         data->args.count  = 0;
1435         data->args.context = get_nfs_open_context(first->wb_context);
1436         data->res.count   = 0;
1437         data->res.fattr   = &data->fattr;
1438         data->res.verf    = &data->verf;
1439         nfs_fattr_init(&data->fattr);
1440 }
1441 EXPORT_SYMBOL_GPL(nfs_init_commit);
1442
1443 void nfs_retry_commit(struct list_head *page_list,
1444                       struct pnfs_layout_segment *lseg)
1445 {
1446         struct nfs_page *req;
1447
1448         while (!list_empty(page_list)) {
1449                 req = nfs_list_entry(page_list->next);
1450                 nfs_list_remove_request(req);
1451                 nfs_mark_request_commit(req, lseg);
1452                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1453                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1454                              BDI_RECLAIMABLE);
1455                 nfs_unlock_request(req);
1456         }
1457 }
1458 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1459
1460 /*
1461  * Commit dirty pages
1462  */
1463 static int
1464 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1465 {
1466         struct nfs_write_data   *data;
1467
1468         data = nfs_commitdata_alloc();
1469
1470         if (!data)
1471                 goto out_bad;
1472
1473         /* Set up the argument struct */
1474         nfs_init_commit(data, head, NULL);
1475         return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
1476  out_bad:
1477         nfs_retry_commit(head, NULL);
1478         nfs_commit_clear_lock(NFS_I(inode));
1479         return -ENOMEM;
1480 }
1481
1482 /*
1483  * COMMIT call returned
1484  */
1485 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1486 {
1487         struct nfs_write_data   *data = calldata;
1488
1489         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1490                                 task->tk_pid, task->tk_status);
1491
1492         /* Call the NFS version-specific code */
1493         NFS_PROTO(data->inode)->commit_done(task, data);
1494 }
1495
1496 void nfs_commit_release_pages(struct nfs_write_data *data)
1497 {
1498         struct nfs_page *req;
1499         int status = data->task.tk_status;
1500
1501         while (!list_empty(&data->pages)) {
1502                 req = nfs_list_entry(data->pages.next);
1503                 nfs_list_remove_request(req);
1504                 nfs_clear_page_commit(req->wb_page);
1505
1506                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1507                         req->wb_context->dentry->d_sb->s_id,
1508                         (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1509                         req->wb_bytes,
1510                         (long long)req_offset(req));
1511                 if (status < 0) {
1512                         nfs_context_set_write_error(req->wb_context, status);
1513                         nfs_inode_remove_request(req);
1514                         dprintk(", error = %d\n", status);
1515                         goto next;
1516                 }
1517
1518                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1519                  * returned by the server against all stored verfs. */
1520                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1521                         /* We have a match */
1522                         nfs_inode_remove_request(req);
1523                         dprintk(" OK\n");
1524                         goto next;
1525                 }
1526                 /* We have a mismatch. Write the page again */
1527                 dprintk(" mismatch\n");
1528                 nfs_mark_request_dirty(req);
1529         next:
1530                 nfs_unlock_request(req);
1531         }
1532 }
1533 EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
1534
1535 static void nfs_commit_release(void *calldata)
1536 {
1537         struct nfs_write_data *data = calldata;
1538
1539         nfs_commit_release_pages(data);
1540         nfs_commit_clear_lock(NFS_I(data->inode));
1541         nfs_commitdata_release(calldata);
1542 }
1543
1544 static const struct rpc_call_ops nfs_commit_ops = {
1545 #if defined(CONFIG_NFS_V4_1)
1546         .rpc_call_prepare = nfs_write_prepare,
1547 #endif /* CONFIG_NFS_V4_1 */
1548         .rpc_call_done = nfs_commit_done,
1549         .rpc_release = nfs_commit_release,
1550 };
1551
1552 int nfs_commit_inode(struct inode *inode, int how)
1553 {
1554         LIST_HEAD(head);
1555         int may_wait = how & FLUSH_SYNC;
1556         int res;
1557
1558         res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1559         if (res <= 0)
1560                 goto out_mark_dirty;
1561         res = nfs_scan_commit(inode, &head);
1562         if (res) {
1563                 int error;
1564
1565                 error = pnfs_commit_list(inode, &head, how);
1566                 if (error == PNFS_NOT_ATTEMPTED)
1567                         error = nfs_commit_list(inode, &head, how);
1568                 if (error < 0)
1569                         return error;
1570                 if (!may_wait)
1571                         goto out_mark_dirty;
1572                 error = wait_on_bit(&NFS_I(inode)->flags,
1573                                 NFS_INO_COMMIT,
1574                                 nfs_wait_bit_killable,
1575                                 TASK_KILLABLE);
1576                 if (error < 0)
1577                         return error;
1578         } else
1579                 nfs_commit_clear_lock(NFS_I(inode));
1580         return res;
1581         /* Note: If we exit without ensuring that the commit is complete,
1582          * we must mark the inode as dirty. Otherwise, future calls to
1583          * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1584          * that the data is on the disk.
1585          */
1586 out_mark_dirty:
1587         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1588         return res;
1589 }
1590
1591 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1592 {
1593         struct nfs_inode *nfsi = NFS_I(inode);
1594         int flags = FLUSH_SYNC;
1595         int ret = 0;
1596
1597         /* no commits means nothing needs to be done */
1598         if (!nfsi->ncommit)
1599                 return ret;
1600
1601         if (wbc->sync_mode == WB_SYNC_NONE) {
1602                 /* Don't commit yet if this is a non-blocking flush and there
1603                  * are a lot of outstanding writes for this mapping.
1604                  */
1605                 if (nfsi->ncommit <= (nfsi->npages >> 1))
1606                         goto out_mark_dirty;
1607
1608                 /* don't wait for the COMMIT response */
1609                 flags = 0;
1610         }
1611
1612         ret = nfs_commit_inode(inode, flags);
1613         if (ret >= 0) {
1614                 if (wbc->sync_mode == WB_SYNC_NONE) {
1615                         if (ret < wbc->nr_to_write)
1616                                 wbc->nr_to_write -= ret;
1617                         else
1618                                 wbc->nr_to_write = 0;
1619                 }
1620                 return 0;
1621         }
1622 out_mark_dirty:
1623         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1624         return ret;
1625 }
1626 #else
1627 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1628 {
1629         return 0;
1630 }
1631 #endif
1632
1633 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1634 {
1635         int ret;
1636
1637         ret = nfs_commit_unstable_pages(inode, wbc);
1638         if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
1639                 int status;
1640                 bool sync = true;
1641
1642                 if (wbc->sync_mode == WB_SYNC_NONE)
1643                         sync = false;
1644
1645                 status = pnfs_layoutcommit_inode(inode, sync);
1646                 if (status < 0)
1647                         return status;
1648         }
1649         return ret;
1650 }
1651
1652 /*
1653  * flush the inode to disk.
1654  */
1655 int nfs_wb_all(struct inode *inode)
1656 {
1657         struct writeback_control wbc = {
1658                 .sync_mode = WB_SYNC_ALL,
1659                 .nr_to_write = LONG_MAX,
1660                 .range_start = 0,
1661                 .range_end = LLONG_MAX,
1662         };
1663
1664         return sync_inode(inode, &wbc);
1665 }
1666
1667 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1668 {
1669         struct nfs_page *req;
1670         int ret = 0;
1671
1672         BUG_ON(!PageLocked(page));
1673         for (;;) {
1674                 wait_on_page_writeback(page);
1675                 req = nfs_page_find_request(page);
1676                 if (req == NULL)
1677                         break;
1678                 if (nfs_lock_request_dontget(req)) {
1679                         nfs_clear_request_commit(req);
1680                         nfs_inode_remove_request(req);
1681                         /*
1682                          * In case nfs_inode_remove_request has marked the
1683                          * page as being dirty
1684                          */
1685                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1686                         nfs_unlock_request(req);
1687                         break;
1688                 }
1689                 ret = nfs_wait_on_request(req);
1690                 nfs_release_request(req);
1691                 if (ret < 0)
1692                         break;
1693         }
1694         return ret;
1695 }
1696
1697 /*
1698  * Write back all requests on one page - we do this before reading it.
1699  */
1700 int nfs_wb_page(struct inode *inode, struct page *page)
1701 {
1702         loff_t range_start = page_offset(page);
1703         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1704         struct writeback_control wbc = {
1705                 .sync_mode = WB_SYNC_ALL,
1706                 .nr_to_write = 0,
1707                 .range_start = range_start,
1708                 .range_end = range_end,
1709         };
1710         int ret;
1711
1712         for (;;) {
1713                 wait_on_page_writeback(page);
1714                 if (clear_page_dirty_for_io(page)) {
1715                         ret = nfs_writepage_locked(page, &wbc);
1716                         if (ret < 0)
1717                                 goto out_error;
1718                         continue;
1719                 }
1720                 if (!PagePrivate(page))
1721                         break;
1722                 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1723                 if (ret < 0)
1724                         goto out_error;
1725         }
1726         return 0;
1727 out_error:
1728         return ret;
1729 }
1730
1731 #ifdef CONFIG_MIGRATION
1732 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1733                 struct page *page, enum migrate_mode mode)
1734 {
1735         /*
1736          * If PagePrivate is set, then the page is currently associated with
1737          * an in-progress read or write request. Don't try to migrate it.
1738          *
1739          * FIXME: we could do this in principle, but we'll need a way to ensure
1740          *        that we can safely release the inode reference while holding
1741          *        the page lock.
1742          */
1743         if (PagePrivate(page))
1744                 return -EBUSY;
1745
1746         nfs_fscache_release_page(page, GFP_KERNEL);
1747
1748         return migrate_page(mapping, newpage, page, mode);
1749 }
1750 #endif
1751
1752 int __init nfs_init_writepagecache(void)
1753 {
1754         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1755                                              sizeof(struct nfs_write_data),
1756                                              0, SLAB_HWCACHE_ALIGN,
1757                                              NULL);
1758         if (nfs_wdata_cachep == NULL)
1759                 return -ENOMEM;
1760
1761         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1762                                                      nfs_wdata_cachep);
1763         if (nfs_wdata_mempool == NULL)
1764                 return -ENOMEM;
1765
1766         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1767                                                       nfs_wdata_cachep);
1768         if (nfs_commit_mempool == NULL)
1769                 return -ENOMEM;
1770
1771         /*
1772          * NFS congestion size, scale with available memory.
1773          *
1774          *  64MB:    8192k
1775          * 128MB:   11585k
1776          * 256MB:   16384k
1777          * 512MB:   23170k
1778          *   1GB:   32768k
1779          *   2GB:   46340k
1780          *   4GB:   65536k
1781          *   8GB:   92681k
1782          *  16GB:  131072k
1783          *
1784          * This allows larger machines to have larger/more transfers.
1785          * Limit the default to 256M
1786          */
1787         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1788         if (nfs_congestion_kb > 256*1024)
1789                 nfs_congestion_kb = 256*1024;
1790
1791         return 0;
1792 }
1793
1794 void nfs_destroy_writepagecache(void)
1795 {
1796         mempool_destroy(nfs_commit_mempool);
1797         mempool_destroy(nfs_wdata_mempool);
1798         kmem_cache_destroy(nfs_wdata_cachep);
1799 }
1800