Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[pandora-kernel.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
22
23 #include <asm/uaccess.h>
24
25 #include "delegation.h"
26 #include "internal.h"
27 #include "iostat.h"
28
29 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
30
31 #define MIN_POOL_WRITE          (32)
32 #define MIN_POOL_COMMIT         (4)
33
34 /*
35  * Local function declarations
36  */
37 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
38                                             struct page *,
39                                             unsigned int, unsigned int);
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41                                   struct inode *inode, int ioflags);
42 static void nfs_redirty_request(struct nfs_page *req);
43 static const struct rpc_call_ops nfs_write_partial_ops;
44 static const struct rpc_call_ops nfs_write_full_ops;
45 static const struct rpc_call_ops nfs_commit_ops;
46
47 static struct kmem_cache *nfs_wdata_cachep;
48 static mempool_t *nfs_wdata_mempool;
49 static mempool_t *nfs_commit_mempool;
50
51 struct nfs_write_data *nfs_commit_alloc(void)
52 {
53         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
54
55         if (p) {
56                 memset(p, 0, sizeof(*p));
57                 INIT_LIST_HEAD(&p->pages);
58         }
59         return p;
60 }
61
62 static void nfs_commit_rcu_free(struct rcu_head *head)
63 {
64         struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
65         if (p && (p->pagevec != &p->page_array[0]))
66                 kfree(p->pagevec);
67         mempool_free(p, nfs_commit_mempool);
68 }
69
70 void nfs_commit_free(struct nfs_write_data *wdata)
71 {
72         call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
73 }
74
75 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
76 {
77         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
78
79         if (p) {
80                 memset(p, 0, sizeof(*p));
81                 INIT_LIST_HEAD(&p->pages);
82                 p->npages = pagecount;
83                 if (pagecount <= ARRAY_SIZE(p->page_array))
84                         p->pagevec = p->page_array;
85                 else {
86                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
87                         if (!p->pagevec) {
88                                 mempool_free(p, nfs_wdata_mempool);
89                                 p = NULL;
90                         }
91                 }
92         }
93         return p;
94 }
95
96 static void nfs_writedata_rcu_free(struct rcu_head *head)
97 {
98         struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
99         if (p && (p->pagevec != &p->page_array[0]))
100                 kfree(p->pagevec);
101         mempool_free(p, nfs_wdata_mempool);
102 }
103
104 static void nfs_writedata_free(struct nfs_write_data *wdata)
105 {
106         call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
107 }
108
109 void nfs_writedata_release(void *wdata)
110 {
111         nfs_writedata_free(wdata);
112 }
113
114 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
115 {
116         ctx->error = error;
117         smp_wmb();
118         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
119 }
120
121 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
122 {
123         struct nfs_page *req = NULL;
124
125         if (PagePrivate(page)) {
126                 req = (struct nfs_page *)page_private(page);
127                 if (req != NULL)
128                         kref_get(&req->wb_kref);
129         }
130         return req;
131 }
132
133 static struct nfs_page *nfs_page_find_request(struct page *page)
134 {
135         struct inode *inode = page->mapping->host;
136         struct nfs_page *req = NULL;
137
138         spin_lock(&inode->i_lock);
139         req = nfs_page_find_request_locked(page);
140         spin_unlock(&inode->i_lock);
141         return req;
142 }
143
144 /* Adjust the file length if we're writing beyond the end */
145 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
146 {
147         struct inode *inode = page->mapping->host;
148         loff_t end, i_size = i_size_read(inode);
149         pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
150
151         if (i_size > 0 && page->index < end_index)
152                 return;
153         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
154         if (i_size >= end)
155                 return;
156         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
157         i_size_write(inode, end);
158 }
159
160 /* A writeback failed: mark the page as bad, and invalidate the page cache */
161 static void nfs_set_pageerror(struct page *page)
162 {
163         SetPageError(page);
164         nfs_zap_mapping(page->mapping->host, page->mapping);
165 }
166
167 /* We can set the PG_uptodate flag if we see that a write request
168  * covers the full page.
169  */
170 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
171 {
172         if (PageUptodate(page))
173                 return;
174         if (base != 0)
175                 return;
176         if (count != nfs_page_length(page))
177                 return;
178         SetPageUptodate(page);
179 }
180
181 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
182                 unsigned int offset, unsigned int count)
183 {
184         struct nfs_page *req;
185         int ret;
186
187         for (;;) {
188                 req = nfs_update_request(ctx, page, offset, count);
189                 if (!IS_ERR(req))
190                         break;
191                 ret = PTR_ERR(req);
192                 if (ret != -EBUSY)
193                         return ret;
194                 ret = nfs_wb_page(page->mapping->host, page);
195                 if (ret != 0)
196                         return ret;
197         }
198         /* Update file length */
199         nfs_grow_file(page, offset, count);
200         nfs_clear_page_tag_locked(req);
201         return 0;
202 }
203
204 static int wb_priority(struct writeback_control *wbc)
205 {
206         if (wbc->for_reclaim)
207                 return FLUSH_HIGHPRI | FLUSH_STABLE;
208         if (wbc->for_kupdate)
209                 return FLUSH_LOWPRI;
210         return 0;
211 }
212
213 /*
214  * NFS congestion control
215  */
216
217 int nfs_congestion_kb;
218
219 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
220 #define NFS_CONGESTION_OFF_THRESH       \
221         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
222
223 static int nfs_set_page_writeback(struct page *page)
224 {
225         int ret = test_set_page_writeback(page);
226
227         if (!ret) {
228                 struct inode *inode = page->mapping->host;
229                 struct nfs_server *nfss = NFS_SERVER(inode);
230
231                 if (atomic_long_inc_return(&nfss->writeback) >
232                                 NFS_CONGESTION_ON_THRESH)
233                         set_bdi_congested(&nfss->backing_dev_info, WRITE);
234         }
235         return ret;
236 }
237
238 static void nfs_end_page_writeback(struct page *page)
239 {
240         struct inode *inode = page->mapping->host;
241         struct nfs_server *nfss = NFS_SERVER(inode);
242
243         end_page_writeback(page);
244         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
245                 clear_bdi_congested(&nfss->backing_dev_info, WRITE);
246 }
247
248 /*
249  * Find an associated nfs write request, and prepare to flush it out
250  * May return an error if the user signalled nfs_wait_on_request().
251  */
252 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
253                                 struct page *page)
254 {
255         struct inode *inode = page->mapping->host;
256         struct nfs_page *req;
257         int ret;
258
259         spin_lock(&inode->i_lock);
260         for(;;) {
261                 req = nfs_page_find_request_locked(page);
262                 if (req == NULL) {
263                         spin_unlock(&inode->i_lock);
264                         return 0;
265                 }
266                 if (nfs_set_page_tag_locked(req))
267                         break;
268                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
269                  *       then the call to nfs_set_page_tag_locked() will always
270                  *       succeed provided that someone hasn't already marked the
271                  *       request as dirty (in which case we don't care).
272                  */
273                 spin_unlock(&inode->i_lock);
274                 ret = nfs_wait_on_request(req);
275                 nfs_release_request(req);
276                 if (ret != 0)
277                         return ret;
278                 spin_lock(&inode->i_lock);
279         }
280         if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
281                 /* This request is marked for commit */
282                 spin_unlock(&inode->i_lock);
283                 nfs_clear_page_tag_locked(req);
284                 nfs_pageio_complete(pgio);
285                 return 0;
286         }
287         if (nfs_set_page_writeback(page) != 0) {
288                 spin_unlock(&inode->i_lock);
289                 BUG();
290         }
291         spin_unlock(&inode->i_lock);
292         if (!nfs_pageio_add_request(pgio, req)) {
293                 nfs_redirty_request(req);
294                 nfs_end_page_writeback(page);
295                 nfs_clear_page_tag_locked(req);
296                 return pgio->pg_error;
297         }
298         return 0;
299 }
300
301 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
302 {
303         struct inode *inode = page->mapping->host;
304
305         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
306         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
307
308         nfs_pageio_cond_complete(pgio, page->index);
309         return nfs_page_async_flush(pgio, page);
310 }
311
312 /*
313  * Write an mmapped page to the server.
314  */
315 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
316 {
317         struct nfs_pageio_descriptor pgio;
318         int err;
319
320         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
321         err = nfs_do_writepage(page, wbc, &pgio);
322         nfs_pageio_complete(&pgio);
323         if (err < 0)
324                 return err;
325         if (pgio.pg_error < 0)
326                 return pgio.pg_error;
327         return 0;
328 }
329
330 int nfs_writepage(struct page *page, struct writeback_control *wbc)
331 {
332         int ret;
333
334         ret = nfs_writepage_locked(page, wbc);
335         unlock_page(page);
336         return ret;
337 }
338
339 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
340 {
341         int ret;
342
343         ret = nfs_do_writepage(page, wbc, data);
344         unlock_page(page);
345         return ret;
346 }
347
348 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
349 {
350         struct inode *inode = mapping->host;
351         struct nfs_pageio_descriptor pgio;
352         int err;
353
354         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
355
356         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
357         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
358         nfs_pageio_complete(&pgio);
359         if (err < 0)
360                 return err;
361         if (pgio.pg_error < 0)
362                 return pgio.pg_error;
363         return 0;
364 }
365
366 /*
367  * Insert a write request into an inode
368  */
369 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
370 {
371         struct nfs_inode *nfsi = NFS_I(inode);
372         int error;
373
374         error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
375         BUG_ON(error == -EEXIST);
376         if (error)
377                 return error;
378         if (!nfsi->npages) {
379                 igrab(inode);
380                 if (nfs_have_delegation(inode, FMODE_WRITE))
381                         nfsi->change_attr++;
382         }
383         SetPagePrivate(req->wb_page);
384         set_page_private(req->wb_page, (unsigned long)req);
385         nfsi->npages++;
386         kref_get(&req->wb_kref);
387         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
388         return 0;
389 }
390
391 /*
392  * Remove a write request from an inode
393  */
394 static void nfs_inode_remove_request(struct nfs_page *req)
395 {
396         struct inode *inode = req->wb_context->path.dentry->d_inode;
397         struct nfs_inode *nfsi = NFS_I(inode);
398
399         BUG_ON (!NFS_WBACK_BUSY(req));
400
401         spin_lock(&inode->i_lock);
402         set_page_private(req->wb_page, 0);
403         ClearPagePrivate(req->wb_page);
404         radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
405         nfsi->npages--;
406         if (!nfsi->npages) {
407                 spin_unlock(&inode->i_lock);
408                 iput(inode);
409         } else
410                 spin_unlock(&inode->i_lock);
411         nfs_clear_request(req);
412         nfs_release_request(req);
413 }
414
415 static void
416 nfs_redirty_request(struct nfs_page *req)
417 {
418         __set_page_dirty_nobuffers(req->wb_page);
419 }
420
421 /*
422  * Check if a request is dirty
423  */
424 static inline int
425 nfs_dirty_request(struct nfs_page *req)
426 {
427         struct page *page = req->wb_page;
428
429         if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
430                 return 0;
431         return !PageWriteback(req->wb_page);
432 }
433
434 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
435 /*
436  * Add a request to the inode's commit list.
437  */
438 static void
439 nfs_mark_request_commit(struct nfs_page *req)
440 {
441         struct inode *inode = req->wb_context->path.dentry->d_inode;
442         struct nfs_inode *nfsi = NFS_I(inode);
443
444         spin_lock(&inode->i_lock);
445         nfsi->ncommit++;
446         set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
447         radix_tree_tag_set(&nfsi->nfs_page_tree,
448                         req->wb_index,
449                         NFS_PAGE_TAG_COMMIT);
450         spin_unlock(&inode->i_lock);
451         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
452         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
453         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
454 }
455
456 static inline
457 int nfs_write_need_commit(struct nfs_write_data *data)
458 {
459         return data->verf.committed != NFS_FILE_SYNC;
460 }
461
462 static inline
463 int nfs_reschedule_unstable_write(struct nfs_page *req)
464 {
465         if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
466                 nfs_mark_request_commit(req);
467                 return 1;
468         }
469         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
470                 nfs_redirty_request(req);
471                 return 1;
472         }
473         return 0;
474 }
475 #else
476 static inline void
477 nfs_mark_request_commit(struct nfs_page *req)
478 {
479 }
480
481 static inline
482 int nfs_write_need_commit(struct nfs_write_data *data)
483 {
484         return 0;
485 }
486
487 static inline
488 int nfs_reschedule_unstable_write(struct nfs_page *req)
489 {
490         return 0;
491 }
492 #endif
493
494 /*
495  * Wait for a request to complete.
496  *
497  * Interruptible by fatal signals only.
498  */
499 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
500 {
501         struct nfs_inode *nfsi = NFS_I(inode);
502         struct nfs_page *req;
503         pgoff_t idx_end, next;
504         unsigned int            res = 0;
505         int                     error;
506
507         if (npages == 0)
508                 idx_end = ~0;
509         else
510                 idx_end = idx_start + npages - 1;
511
512         next = idx_start;
513         while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
514                 if (req->wb_index > idx_end)
515                         break;
516
517                 next = req->wb_index + 1;
518                 BUG_ON(!NFS_WBACK_BUSY(req));
519
520                 kref_get(&req->wb_kref);
521                 spin_unlock(&inode->i_lock);
522                 error = nfs_wait_on_request(req);
523                 nfs_release_request(req);
524                 spin_lock(&inode->i_lock);
525                 if (error < 0)
526                         return error;
527                 res++;
528         }
529         return res;
530 }
531
532 static void nfs_cancel_commit_list(struct list_head *head)
533 {
534         struct nfs_page *req;
535
536         while(!list_empty(head)) {
537                 req = nfs_list_entry(head->next);
538                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
539                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
540                                 BDI_RECLAIMABLE);
541                 nfs_list_remove_request(req);
542                 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
543                 nfs_inode_remove_request(req);
544                 nfs_unlock_request(req);
545         }
546 }
547
548 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
549 /*
550  * nfs_scan_commit - Scan an inode for commit requests
551  * @inode: NFS inode to scan
552  * @dst: destination list
553  * @idx_start: lower bound of page->index to scan.
554  * @npages: idx_start + npages sets the upper bound to scan.
555  *
556  * Moves requests from the inode's 'commit' request list.
557  * The requests are *not* checked to ensure that they form a contiguous set.
558  */
559 static int
560 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
561 {
562         struct nfs_inode *nfsi = NFS_I(inode);
563         int res = 0;
564
565         if (nfsi->ncommit != 0) {
566                 res = nfs_scan_list(nfsi, dst, idx_start, npages,
567                                 NFS_PAGE_TAG_COMMIT);
568                 nfsi->ncommit -= res;
569         }
570         return res;
571 }
572 #else
573 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
574 {
575         return 0;
576 }
577 #endif
578
579 /*
580  * Try to update any existing write request, or create one if there is none.
581  * In order to match, the request's credentials must match those of
582  * the calling process.
583  *
584  * Note: Should always be called with the Page Lock held!
585  */
586 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
587                 struct page *page, unsigned int offset, unsigned int bytes)
588 {
589         struct address_space *mapping = page->mapping;
590         struct inode *inode = mapping->host;
591         struct nfs_page         *req, *new = NULL;
592         pgoff_t         rqend, end;
593
594         end = offset + bytes;
595
596         for (;;) {
597                 /* Loop over all inode entries and see if we find
598                  * A request for the page we wish to update
599                  */
600                 spin_lock(&inode->i_lock);
601                 req = nfs_page_find_request_locked(page);
602                 if (req) {
603                         if (!nfs_set_page_tag_locked(req)) {
604                                 int error;
605
606                                 spin_unlock(&inode->i_lock);
607                                 error = nfs_wait_on_request(req);
608                                 nfs_release_request(req);
609                                 if (error < 0) {
610                                         if (new)
611                                                 nfs_release_request(new);
612                                         return ERR_PTR(error);
613                                 }
614                                 continue;
615                         }
616                         spin_unlock(&inode->i_lock);
617                         if (new)
618                                 nfs_release_request(new);
619                         break;
620                 }
621
622                 if (new) {
623                         int error;
624                         nfs_lock_request_dontget(new);
625                         error = nfs_inode_add_request(inode, new);
626                         if (error) {
627                                 spin_unlock(&inode->i_lock);
628                                 nfs_unlock_request(new);
629                                 return ERR_PTR(error);
630                         }
631                         spin_unlock(&inode->i_lock);
632                         req = new;
633                         goto zero_page;
634                 }
635                 spin_unlock(&inode->i_lock);
636
637                 new = nfs_create_request(ctx, inode, page, offset, bytes);
638                 if (IS_ERR(new))
639                         return new;
640         }
641
642         /* We have a request for our page.
643          * If the creds don't match, or the
644          * page addresses don't match,
645          * tell the caller to wait on the conflicting
646          * request.
647          */
648         rqend = req->wb_offset + req->wb_bytes;
649         if (req->wb_context != ctx
650             || req->wb_page != page
651             || !nfs_dirty_request(req)
652             || offset > rqend || end < req->wb_offset) {
653                 nfs_clear_page_tag_locked(req);
654                 return ERR_PTR(-EBUSY);
655         }
656
657         /* Okay, the request matches. Update the region */
658         if (offset < req->wb_offset) {
659                 req->wb_offset = offset;
660                 req->wb_pgbase = offset;
661                 req->wb_bytes = max(end, rqend) - req->wb_offset;
662                 goto zero_page;
663         }
664
665         if (end > rqend)
666                 req->wb_bytes = end - req->wb_offset;
667
668         return req;
669 zero_page:
670         /* If this page might potentially be marked as up to date,
671          * then we need to zero any uninitalised data. */
672         if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
673                         && !PageUptodate(req->wb_page))
674                 zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
675         return req;
676 }
677
678 int nfs_flush_incompatible(struct file *file, struct page *page)
679 {
680         struct nfs_open_context *ctx = nfs_file_open_context(file);
681         struct nfs_page *req;
682         int do_flush, status;
683         /*
684          * Look for a request corresponding to this page. If there
685          * is one, and it belongs to another file, we flush it out
686          * before we try to copy anything into the page. Do this
687          * due to the lack of an ACCESS-type call in NFSv2.
688          * Also do the same if we find a request from an existing
689          * dropped page.
690          */
691         do {
692                 req = nfs_page_find_request(page);
693                 if (req == NULL)
694                         return 0;
695                 do_flush = req->wb_page != page || req->wb_context != ctx
696                         || !nfs_dirty_request(req);
697                 nfs_release_request(req);
698                 if (!do_flush)
699                         return 0;
700                 status = nfs_wb_page(page->mapping->host, page);
701         } while (status == 0);
702         return status;
703 }
704
705 /*
706  * If the page cache is marked as unsafe or invalid, then we can't rely on
707  * the PageUptodate() flag. In this case, we will need to turn off
708  * write optimisations that depend on the page contents being correct.
709  */
710 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
711 {
712         return PageUptodate(page) &&
713                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
714 }
715
716 /*
717  * Update and possibly write a cached page of an NFS file.
718  *
719  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
720  * things with a page scheduled for an RPC call (e.g. invalidate it).
721  */
722 int nfs_updatepage(struct file *file, struct page *page,
723                 unsigned int offset, unsigned int count)
724 {
725         struct nfs_open_context *ctx = nfs_file_open_context(file);
726         struct inode    *inode = page->mapping->host;
727         int             status = 0;
728
729         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
730
731         dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
732                 file->f_path.dentry->d_parent->d_name.name,
733                 file->f_path.dentry->d_name.name, count,
734                 (long long)(page_offset(page) +offset));
735
736         /* If we're not using byte range locks, and we know the page
737          * is up to date, it may be more efficient to extend the write
738          * to cover the entire page in order to avoid fragmentation
739          * inefficiencies.
740          */
741         if (nfs_write_pageuptodate(page, inode) &&
742                         inode->i_flock == NULL &&
743                         !(file->f_flags & O_SYNC)) {
744                 count = max(count + offset, nfs_page_length(page));
745                 offset = 0;
746         }
747
748         status = nfs_writepage_setup(ctx, page, offset, count);
749         __set_page_dirty_nobuffers(page);
750
751         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
752                         status, (long long)i_size_read(inode));
753         if (status < 0)
754                 nfs_set_pageerror(page);
755         return status;
756 }
757
758 static void nfs_writepage_release(struct nfs_page *req)
759 {
760
761         if (PageError(req->wb_page)) {
762                 nfs_end_page_writeback(req->wb_page);
763                 nfs_inode_remove_request(req);
764         } else if (!nfs_reschedule_unstable_write(req)) {
765                 /* Set the PG_uptodate flag */
766                 nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes);
767                 nfs_end_page_writeback(req->wb_page);
768                 nfs_inode_remove_request(req);
769         } else
770                 nfs_end_page_writeback(req->wb_page);
771         nfs_clear_page_tag_locked(req);
772 }
773
774 static int flush_task_priority(int how)
775 {
776         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
777                 case FLUSH_HIGHPRI:
778                         return RPC_PRIORITY_HIGH;
779                 case FLUSH_LOWPRI:
780                         return RPC_PRIORITY_LOW;
781         }
782         return RPC_PRIORITY_NORMAL;
783 }
784
785 /*
786  * Set up the argument/result storage required for the RPC call.
787  */
788 static void nfs_write_rpcsetup(struct nfs_page *req,
789                 struct nfs_write_data *data,
790                 const struct rpc_call_ops *call_ops,
791                 unsigned int count, unsigned int offset,
792                 int how)
793 {
794         struct inode *inode = req->wb_context->path.dentry->d_inode;
795         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
796         int priority = flush_task_priority(how);
797         struct rpc_task *task;
798         struct rpc_message msg = {
799                 .rpc_argp = &data->args,
800                 .rpc_resp = &data->res,
801                 .rpc_cred = req->wb_context->cred,
802         };
803         struct rpc_task_setup task_setup_data = {
804                 .rpc_client = NFS_CLIENT(inode),
805                 .task = &data->task,
806                 .rpc_message = &msg,
807                 .callback_ops = call_ops,
808                 .callback_data = data,
809                 .flags = flags,
810                 .priority = priority,
811         };
812
813         /* Set up the RPC argument and reply structs
814          * NB: take care not to mess about with data->commit et al. */
815
816         data->req = req;
817         data->inode = inode = req->wb_context->path.dentry->d_inode;
818         data->cred = msg.rpc_cred;
819
820         data->args.fh     = NFS_FH(inode);
821         data->args.offset = req_offset(req) + offset;
822         data->args.pgbase = req->wb_pgbase + offset;
823         data->args.pages  = data->pagevec;
824         data->args.count  = count;
825         data->args.context = req->wb_context;
826         data->args.stable  = NFS_UNSTABLE;
827         if (how & FLUSH_STABLE) {
828                 data->args.stable = NFS_DATA_SYNC;
829                 if (!NFS_I(inode)->ncommit)
830                         data->args.stable = NFS_FILE_SYNC;
831         }
832
833         data->res.fattr   = &data->fattr;
834         data->res.count   = count;
835         data->res.verf    = &data->verf;
836         nfs_fattr_init(&data->fattr);
837
838         /* Set up the initial task struct.  */
839         NFS_PROTO(inode)->write_setup(data, &msg);
840
841         dprintk("NFS: %5u initiated write call "
842                 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
843                 data->task.tk_pid,
844                 inode->i_sb->s_id,
845                 (long long)NFS_FILEID(inode),
846                 count,
847                 (unsigned long long)data->args.offset);
848
849         task = rpc_run_task(&task_setup_data);
850         if (!IS_ERR(task))
851                 rpc_put_task(task);
852 }
853
854 /*
855  * Generate multiple small requests to write out a single
856  * contiguous dirty area on one page.
857  */
858 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
859 {
860         struct nfs_page *req = nfs_list_entry(head->next);
861         struct page *page = req->wb_page;
862         struct nfs_write_data *data;
863         size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
864         unsigned int offset;
865         int requests = 0;
866         LIST_HEAD(list);
867
868         nfs_list_remove_request(req);
869
870         nbytes = count;
871         do {
872                 size_t len = min(nbytes, wsize);
873
874                 data = nfs_writedata_alloc(1);
875                 if (!data)
876                         goto out_bad;
877                 list_add(&data->pages, &list);
878                 requests++;
879                 nbytes -= len;
880         } while (nbytes != 0);
881         atomic_set(&req->wb_complete, requests);
882
883         ClearPageError(page);
884         offset = 0;
885         nbytes = count;
886         do {
887                 data = list_entry(list.next, struct nfs_write_data, pages);
888                 list_del_init(&data->pages);
889
890                 data->pagevec[0] = page;
891
892                 if (nbytes < wsize)
893                         wsize = nbytes;
894                 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
895                                    wsize, offset, how);
896                 offset += wsize;
897                 nbytes -= wsize;
898         } while (nbytes != 0);
899
900         return 0;
901
902 out_bad:
903         while (!list_empty(&list)) {
904                 data = list_entry(list.next, struct nfs_write_data, pages);
905                 list_del(&data->pages);
906                 nfs_writedata_release(data);
907         }
908         nfs_redirty_request(req);
909         nfs_end_page_writeback(req->wb_page);
910         nfs_clear_page_tag_locked(req);
911         return -ENOMEM;
912 }
913
914 /*
915  * Create an RPC task for the given write request and kick it.
916  * The page must have been locked by the caller.
917  *
918  * It may happen that the page we're passed is not marked dirty.
919  * This is the case if nfs_updatepage detects a conflicting request
920  * that has been written but not committed.
921  */
922 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
923 {
924         struct nfs_page         *req;
925         struct page             **pages;
926         struct nfs_write_data   *data;
927
928         data = nfs_writedata_alloc(npages);
929         if (!data)
930                 goto out_bad;
931
932         pages = data->pagevec;
933         while (!list_empty(head)) {
934                 req = nfs_list_entry(head->next);
935                 nfs_list_remove_request(req);
936                 nfs_list_add_request(req, &data->pages);
937                 ClearPageError(req->wb_page);
938                 *pages++ = req->wb_page;
939         }
940         req = nfs_list_entry(data->pages.next);
941
942         /* Set up the argument struct */
943         nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
944
945         return 0;
946  out_bad:
947         while (!list_empty(head)) {
948                 req = nfs_list_entry(head->next);
949                 nfs_list_remove_request(req);
950                 nfs_redirty_request(req);
951                 nfs_end_page_writeback(req->wb_page);
952                 nfs_clear_page_tag_locked(req);
953         }
954         return -ENOMEM;
955 }
956
957 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
958                                   struct inode *inode, int ioflags)
959 {
960         size_t wsize = NFS_SERVER(inode)->wsize;
961
962         if (wsize < PAGE_CACHE_SIZE)
963                 nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
964         else
965                 nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
966 }
967
968 /*
969  * Handle a write reply that flushed part of a page.
970  */
971 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
972 {
973         struct nfs_write_data   *data = calldata;
974         struct nfs_page         *req = data->req;
975         struct page             *page = req->wb_page;
976
977         dprintk("NFS: write (%s/%Ld %d@%Ld)",
978                 req->wb_context->path.dentry->d_inode->i_sb->s_id,
979                 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
980                 req->wb_bytes,
981                 (long long)req_offset(req));
982
983         if (nfs_writeback_done(task, data) != 0)
984                 return;
985
986         if (task->tk_status < 0) {
987                 nfs_set_pageerror(page);
988                 nfs_context_set_write_error(req->wb_context, task->tk_status);
989                 dprintk(", error = %d\n", task->tk_status);
990                 goto out;
991         }
992
993         if (nfs_write_need_commit(data)) {
994                 struct inode *inode = page->mapping->host;
995
996                 spin_lock(&inode->i_lock);
997                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
998                         /* Do nothing we need to resend the writes */
999                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1000                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1001                         dprintk(" defer commit\n");
1002                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1003                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1004                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1005                         dprintk(" server reboot detected\n");
1006                 }
1007                 spin_unlock(&inode->i_lock);
1008         } else
1009                 dprintk(" OK\n");
1010
1011 out:
1012         if (atomic_dec_and_test(&req->wb_complete))
1013                 nfs_writepage_release(req);
1014 }
1015
1016 static const struct rpc_call_ops nfs_write_partial_ops = {
1017         .rpc_call_done = nfs_writeback_done_partial,
1018         .rpc_release = nfs_writedata_release,
1019 };
1020
1021 /*
1022  * Handle a write reply that flushes a whole page.
1023  *
1024  * FIXME: There is an inherent race with invalidate_inode_pages and
1025  *        writebacks since the page->count is kept > 1 for as long
1026  *        as the page has a write request pending.
1027  */
1028 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1029 {
1030         struct nfs_write_data   *data = calldata;
1031         struct nfs_page         *req;
1032         struct page             *page;
1033
1034         if (nfs_writeback_done(task, data) != 0)
1035                 return;
1036
1037         /* Update attributes as result of writeback. */
1038         while (!list_empty(&data->pages)) {
1039                 req = nfs_list_entry(data->pages.next);
1040                 nfs_list_remove_request(req);
1041                 page = req->wb_page;
1042
1043                 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1044                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1045                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1046                         req->wb_bytes,
1047                         (long long)req_offset(req));
1048
1049                 if (task->tk_status < 0) {
1050                         nfs_set_pageerror(page);
1051                         nfs_context_set_write_error(req->wb_context, task->tk_status);
1052                         dprintk(", error = %d\n", task->tk_status);
1053                         goto remove_request;
1054                 }
1055
1056                 if (nfs_write_need_commit(data)) {
1057                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1058                         nfs_mark_request_commit(req);
1059                         nfs_end_page_writeback(page);
1060                         dprintk(" marked for commit\n");
1061                         goto next;
1062                 }
1063                 /* Set the PG_uptodate flag? */
1064                 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
1065                 dprintk(" OK\n");
1066 remove_request:
1067                 nfs_end_page_writeback(page);
1068                 nfs_inode_remove_request(req);
1069         next:
1070                 nfs_clear_page_tag_locked(req);
1071         }
1072 }
1073
1074 static const struct rpc_call_ops nfs_write_full_ops = {
1075         .rpc_call_done = nfs_writeback_done_full,
1076         .rpc_release = nfs_writedata_release,
1077 };
1078
1079
1080 /*
1081  * This function is called when the WRITE call is complete.
1082  */
1083 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1084 {
1085         struct nfs_writeargs    *argp = &data->args;
1086         struct nfs_writeres     *resp = &data->res;
1087         int status;
1088
1089         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1090                 task->tk_pid, task->tk_status);
1091
1092         /*
1093          * ->write_done will attempt to use post-op attributes to detect
1094          * conflicting writes by other clients.  A strict interpretation
1095          * of close-to-open would allow us to continue caching even if
1096          * another writer had changed the file, but some applications
1097          * depend on tighter cache coherency when writing.
1098          */
1099         status = NFS_PROTO(data->inode)->write_done(task, data);
1100         if (status != 0)
1101                 return status;
1102         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1103
1104 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1105         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1106                 /* We tried a write call, but the server did not
1107                  * commit data to stable storage even though we
1108                  * requested it.
1109                  * Note: There is a known bug in Tru64 < 5.0 in which
1110                  *       the server reports NFS_DATA_SYNC, but performs
1111                  *       NFS_FILE_SYNC. We therefore implement this checking
1112                  *       as a dprintk() in order to avoid filling syslog.
1113                  */
1114                 static unsigned long    complain;
1115
1116                 if (time_before(complain, jiffies)) {
1117                         dprintk("NFS: faulty NFS server %s:"
1118                                 " (committed = %d) != (stable = %d)\n",
1119                                 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1120                                 resp->verf->committed, argp->stable);
1121                         complain = jiffies + 300 * HZ;
1122                 }
1123         }
1124 #endif
1125         /* Is this a short write? */
1126         if (task->tk_status >= 0 && resp->count < argp->count) {
1127                 static unsigned long    complain;
1128
1129                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1130
1131                 /* Has the server at least made some progress? */
1132                 if (resp->count != 0) {
1133                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1134                         if (resp->verf->committed != NFS_UNSTABLE) {
1135                                 /* Resend from where the server left off */
1136                                 argp->offset += resp->count;
1137                                 argp->pgbase += resp->count;
1138                                 argp->count -= resp->count;
1139                         } else {
1140                                 /* Resend as a stable write in order to avoid
1141                                  * headaches in the case of a server crash.
1142                                  */
1143                                 argp->stable = NFS_FILE_SYNC;
1144                         }
1145                         rpc_restart_call(task);
1146                         return -EAGAIN;
1147                 }
1148                 if (time_before(complain, jiffies)) {
1149                         printk(KERN_WARNING
1150                                "NFS: Server wrote zero bytes, expected %u.\n",
1151                                         argp->count);
1152                         complain = jiffies + 300 * HZ;
1153                 }
1154                 /* Can't do anything about it except throw an error. */
1155                 task->tk_status = -EIO;
1156         }
1157         return 0;
1158 }
1159
1160
1161 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1162 void nfs_commit_release(void *wdata)
1163 {
1164         nfs_commit_free(wdata);
1165 }
1166
1167 /*
1168  * Set up the argument/result storage required for the RPC call.
1169  */
1170 static void nfs_commit_rpcsetup(struct list_head *head,
1171                 struct nfs_write_data *data,
1172                 int how)
1173 {
1174         struct nfs_page *first = nfs_list_entry(head->next);
1175         struct inode *inode = first->wb_context->path.dentry->d_inode;
1176         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1177         int priority = flush_task_priority(how);
1178         struct rpc_task *task;
1179         struct rpc_message msg = {
1180                 .rpc_argp = &data->args,
1181                 .rpc_resp = &data->res,
1182                 .rpc_cred = first->wb_context->cred,
1183         };
1184         struct rpc_task_setup task_setup_data = {
1185                 .task = &data->task,
1186                 .rpc_client = NFS_CLIENT(inode),
1187                 .rpc_message = &msg,
1188                 .callback_ops = &nfs_commit_ops,
1189                 .callback_data = data,
1190                 .flags = flags,
1191                 .priority = priority,
1192         };
1193
1194         /* Set up the RPC argument and reply structs
1195          * NB: take care not to mess about with data->commit et al. */
1196
1197         list_splice_init(head, &data->pages);
1198
1199         data->inode       = inode;
1200         data->cred        = msg.rpc_cred;
1201
1202         data->args.fh     = NFS_FH(data->inode);
1203         /* Note: we always request a commit of the entire inode */
1204         data->args.offset = 0;
1205         data->args.count  = 0;
1206         data->res.count   = 0;
1207         data->res.fattr   = &data->fattr;
1208         data->res.verf    = &data->verf;
1209         nfs_fattr_init(&data->fattr);
1210
1211         /* Set up the initial task struct.  */
1212         NFS_PROTO(inode)->commit_setup(data, &msg);
1213
1214         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1215
1216         task = rpc_run_task(&task_setup_data);
1217         if (!IS_ERR(task))
1218                 rpc_put_task(task);
1219 }
1220
1221 /*
1222  * Commit dirty pages
1223  */
1224 static int
1225 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1226 {
1227         struct nfs_write_data   *data;
1228         struct nfs_page         *req;
1229
1230         data = nfs_commit_alloc();
1231
1232         if (!data)
1233                 goto out_bad;
1234
1235         /* Set up the argument struct */
1236         nfs_commit_rpcsetup(head, data, how);
1237
1238         return 0;
1239  out_bad:
1240         while (!list_empty(head)) {
1241                 req = nfs_list_entry(head->next);
1242                 nfs_list_remove_request(req);
1243                 nfs_mark_request_commit(req);
1244                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1245                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1246                                 BDI_RECLAIMABLE);
1247                 nfs_clear_page_tag_locked(req);
1248         }
1249         return -ENOMEM;
1250 }
1251
1252 /*
1253  * COMMIT call returned
1254  */
1255 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1256 {
1257         struct nfs_write_data   *data = calldata;
1258         struct nfs_page         *req;
1259
1260         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1261                                 task->tk_pid, task->tk_status);
1262
1263         /* Call the NFS version-specific code */
1264         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1265                 return;
1266
1267         while (!list_empty(&data->pages)) {
1268                 req = nfs_list_entry(data->pages.next);
1269                 nfs_list_remove_request(req);
1270                 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1271                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1272                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1273                                 BDI_RECLAIMABLE);
1274
1275                 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1276                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1277                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1278                         req->wb_bytes,
1279                         (long long)req_offset(req));
1280                 if (task->tk_status < 0) {
1281                         nfs_context_set_write_error(req->wb_context, task->tk_status);
1282                         nfs_inode_remove_request(req);
1283                         dprintk(", error = %d\n", task->tk_status);
1284                         goto next;
1285                 }
1286
1287                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1288                  * returned by the server against all stored verfs. */
1289                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1290                         /* We have a match */
1291                         /* Set the PG_uptodate flag */
1292                         nfs_mark_uptodate(req->wb_page, req->wb_pgbase,
1293                                         req->wb_bytes);
1294                         nfs_inode_remove_request(req);
1295                         dprintk(" OK\n");
1296                         goto next;
1297                 }
1298                 /* We have a mismatch. Write the page again */
1299                 dprintk(" mismatch\n");
1300                 nfs_redirty_request(req);
1301         next:
1302                 nfs_clear_page_tag_locked(req);
1303         }
1304 }
1305
1306 static const struct rpc_call_ops nfs_commit_ops = {
1307         .rpc_call_done = nfs_commit_done,
1308         .rpc_release = nfs_commit_release,
1309 };
1310
1311 int nfs_commit_inode(struct inode *inode, int how)
1312 {
1313         LIST_HEAD(head);
1314         int res;
1315
1316         spin_lock(&inode->i_lock);
1317         res = nfs_scan_commit(inode, &head, 0, 0);
1318         spin_unlock(&inode->i_lock);
1319         if (res) {
1320                 int error = nfs_commit_list(inode, &head, how);
1321                 if (error < 0)
1322                         return error;
1323         }
1324         return res;
1325 }
1326 #else
1327 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1328 {
1329         return 0;
1330 }
1331 #endif
1332
1333 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1334 {
1335         struct inode *inode = mapping->host;
1336         pgoff_t idx_start, idx_end;
1337         unsigned int npages = 0;
1338         LIST_HEAD(head);
1339         int nocommit = how & FLUSH_NOCOMMIT;
1340         long pages, ret;
1341
1342         /* FIXME */
1343         if (wbc->range_cyclic)
1344                 idx_start = 0;
1345         else {
1346                 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1347                 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1348                 if (idx_end > idx_start) {
1349                         pgoff_t l_npages = 1 + idx_end - idx_start;
1350                         npages = l_npages;
1351                         if (sizeof(npages) != sizeof(l_npages) &&
1352                                         (pgoff_t)npages != l_npages)
1353                                 npages = 0;
1354                 }
1355         }
1356         how &= ~FLUSH_NOCOMMIT;
1357         spin_lock(&inode->i_lock);
1358         do {
1359                 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1360                 if (ret != 0)
1361                         continue;
1362                 if (nocommit)
1363                         break;
1364                 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1365                 if (pages == 0)
1366                         break;
1367                 if (how & FLUSH_INVALIDATE) {
1368                         spin_unlock(&inode->i_lock);
1369                         nfs_cancel_commit_list(&head);
1370                         ret = pages;
1371                         spin_lock(&inode->i_lock);
1372                         continue;
1373                 }
1374                 pages += nfs_scan_commit(inode, &head, 0, 0);
1375                 spin_unlock(&inode->i_lock);
1376                 ret = nfs_commit_list(inode, &head, how);
1377                 spin_lock(&inode->i_lock);
1378
1379         } while (ret >= 0);
1380         spin_unlock(&inode->i_lock);
1381         return ret;
1382 }
1383
1384 static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1385 {
1386         int ret;
1387
1388         ret = nfs_writepages(mapping, wbc);
1389         if (ret < 0)
1390                 goto out;
1391         ret = nfs_sync_mapping_wait(mapping, wbc, how);
1392         if (ret < 0)
1393                 goto out;
1394         return 0;
1395 out:
1396         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1397         return ret;
1398 }
1399
1400 /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
1401 static int nfs_write_mapping(struct address_space *mapping, int how)
1402 {
1403         struct writeback_control wbc = {
1404                 .bdi = mapping->backing_dev_info,
1405                 .sync_mode = WB_SYNC_NONE,
1406                 .nr_to_write = LONG_MAX,
1407                 .for_writepages = 1,
1408                 .range_cyclic = 1,
1409         };
1410         int ret;
1411
1412         ret = __nfs_write_mapping(mapping, &wbc, how);
1413         if (ret < 0)
1414                 return ret;
1415         wbc.sync_mode = WB_SYNC_ALL;
1416         return __nfs_write_mapping(mapping, &wbc, how);
1417 }
1418
1419 /*
1420  * flush the inode to disk.
1421  */
1422 int nfs_wb_all(struct inode *inode)
1423 {
1424         return nfs_write_mapping(inode->i_mapping, 0);
1425 }
1426
1427 int nfs_wb_nocommit(struct inode *inode)
1428 {
1429         return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1430 }
1431
1432 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1433 {
1434         struct nfs_page *req;
1435         loff_t range_start = page_offset(page);
1436         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1437         struct writeback_control wbc = {
1438                 .bdi = page->mapping->backing_dev_info,
1439                 .sync_mode = WB_SYNC_ALL,
1440                 .nr_to_write = LONG_MAX,
1441                 .range_start = range_start,
1442                 .range_end = range_end,
1443         };
1444         int ret = 0;
1445
1446         BUG_ON(!PageLocked(page));
1447         for (;;) {
1448                 req = nfs_page_find_request(page);
1449                 if (req == NULL)
1450                         goto out;
1451                 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1452                         nfs_release_request(req);
1453                         break;
1454                 }
1455                 if (nfs_lock_request_dontget(req)) {
1456                         nfs_inode_remove_request(req);
1457                         /*
1458                          * In case nfs_inode_remove_request has marked the
1459                          * page as being dirty
1460                          */
1461                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1462                         nfs_unlock_request(req);
1463                         break;
1464                 }
1465                 ret = nfs_wait_on_request(req);
1466                 if (ret < 0)
1467                         goto out;
1468         }
1469         if (!PagePrivate(page))
1470                 return 0;
1471         ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1472 out:
1473         return ret;
1474 }
1475
1476 static int nfs_wb_page_priority(struct inode *inode, struct page *page,
1477                                 int how)
1478 {
1479         loff_t range_start = page_offset(page);
1480         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1481         struct writeback_control wbc = {
1482                 .bdi = page->mapping->backing_dev_info,
1483                 .sync_mode = WB_SYNC_ALL,
1484                 .nr_to_write = LONG_MAX,
1485                 .range_start = range_start,
1486                 .range_end = range_end,
1487         };
1488         int ret;
1489
1490         BUG_ON(!PageLocked(page));
1491         if (clear_page_dirty_for_io(page)) {
1492                 ret = nfs_writepage_locked(page, &wbc);
1493                 if (ret < 0)
1494                         goto out;
1495         }
1496         if (!PagePrivate(page))
1497                 return 0;
1498         ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1499         if (ret >= 0)
1500                 return 0;
1501 out:
1502         __mark_inode_dirty(inode, I_DIRTY_PAGES);
1503         return ret;
1504 }
1505
1506 /*
1507  * Write back all requests on one page - we do this before reading it.
1508  */
1509 int nfs_wb_page(struct inode *inode, struct page* page)
1510 {
1511         return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1512 }
1513
1514 int __init nfs_init_writepagecache(void)
1515 {
1516         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1517                                              sizeof(struct nfs_write_data),
1518                                              0, SLAB_HWCACHE_ALIGN,
1519                                              NULL);
1520         if (nfs_wdata_cachep == NULL)
1521                 return -ENOMEM;
1522
1523         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1524                                                      nfs_wdata_cachep);
1525         if (nfs_wdata_mempool == NULL)
1526                 return -ENOMEM;
1527
1528         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1529                                                       nfs_wdata_cachep);
1530         if (nfs_commit_mempool == NULL)
1531                 return -ENOMEM;
1532
1533         /*
1534          * NFS congestion size, scale with available memory.
1535          *
1536          *  64MB:    8192k
1537          * 128MB:   11585k
1538          * 256MB:   16384k
1539          * 512MB:   23170k
1540          *   1GB:   32768k
1541          *   2GB:   46340k
1542          *   4GB:   65536k
1543          *   8GB:   92681k
1544          *  16GB:  131072k
1545          *
1546          * This allows larger machines to have larger/more transfers.
1547          * Limit the default to 256M
1548          */
1549         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1550         if (nfs_congestion_kb > 256*1024)
1551                 nfs_congestion_kb = 256*1024;
1552
1553         return 0;
1554 }
1555
1556 void nfs_destroy_writepagecache(void)
1557 {
1558         mempool_destroy(nfs_commit_mempool);
1559         mempool_destroy(nfs_wdata_mempool);
1560         kmem_cache_destroy(nfs_wdata_cachep);
1561 }
1562