NFS: Replace file->private_data with calls to nfs_file_open_context()
[pandora-kernel.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
22
23 #include <asm/uaccess.h>
24
25 #include "delegation.h"
26 #include "internal.h"
27 #include "iostat.h"
28
29 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
30
31 #define MIN_POOL_WRITE          (32)
32 #define MIN_POOL_COMMIT         (4)
33
34 /*
35  * Local function declarations
36  */
37 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
38                                             struct page *,
39                                             unsigned int, unsigned int);
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41                                   struct inode *inode, int ioflags);
42 static const struct rpc_call_ops nfs_write_partial_ops;
43 static const struct rpc_call_ops nfs_write_full_ops;
44 static const struct rpc_call_ops nfs_commit_ops;
45
46 static struct kmem_cache *nfs_wdata_cachep;
47 static mempool_t *nfs_wdata_mempool;
48 static mempool_t *nfs_commit_mempool;
49
50 struct nfs_write_data *nfs_commit_alloc(void)
51 {
52         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
53
54         if (p) {
55                 memset(p, 0, sizeof(*p));
56                 INIT_LIST_HEAD(&p->pages);
57         }
58         return p;
59 }
60
61 static void nfs_commit_rcu_free(struct rcu_head *head)
62 {
63         struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
64         if (p && (p->pagevec != &p->page_array[0]))
65                 kfree(p->pagevec);
66         mempool_free(p, nfs_commit_mempool);
67 }
68
69 void nfs_commit_free(struct nfs_write_data *wdata)
70 {
71         call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
72 }
73
74 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
75 {
76         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
77
78         if (p) {
79                 memset(p, 0, sizeof(*p));
80                 INIT_LIST_HEAD(&p->pages);
81                 p->npages = pagecount;
82                 if (pagecount <= ARRAY_SIZE(p->page_array))
83                         p->pagevec = p->page_array;
84                 else {
85                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
86                         if (!p->pagevec) {
87                                 mempool_free(p, nfs_wdata_mempool);
88                                 p = NULL;
89                         }
90                 }
91         }
92         return p;
93 }
94
95 static void nfs_writedata_rcu_free(struct rcu_head *head)
96 {
97         struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
98         if (p && (p->pagevec != &p->page_array[0]))
99                 kfree(p->pagevec);
100         mempool_free(p, nfs_wdata_mempool);
101 }
102
103 static void nfs_writedata_free(struct nfs_write_data *wdata)
104 {
105         call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
106 }
107
108 void nfs_writedata_release(void *wdata)
109 {
110         nfs_writedata_free(wdata);
111 }
112
113 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
114 {
115         ctx->error = error;
116         smp_wmb();
117         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
118 }
119
120 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
121 {
122         struct nfs_page *req = NULL;
123
124         if (PagePrivate(page)) {
125                 req = (struct nfs_page *)page_private(page);
126                 if (req != NULL)
127                         kref_get(&req->wb_kref);
128         }
129         return req;
130 }
131
132 static struct nfs_page *nfs_page_find_request(struct page *page)
133 {
134         struct inode *inode = page->mapping->host;
135         struct nfs_page *req = NULL;
136
137         spin_lock(&inode->i_lock);
138         req = nfs_page_find_request_locked(page);
139         spin_unlock(&inode->i_lock);
140         return req;
141 }
142
143 /* Adjust the file length if we're writing beyond the end */
144 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
145 {
146         struct inode *inode = page->mapping->host;
147         loff_t end, i_size = i_size_read(inode);
148         pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
149
150         if (i_size > 0 && page->index < end_index)
151                 return;
152         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
153         if (i_size >= end)
154                 return;
155         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
156         i_size_write(inode, end);
157 }
158
159 /* A writeback failed: mark the page as bad, and invalidate the page cache */
160 static void nfs_set_pageerror(struct page *page)
161 {
162         SetPageError(page);
163         nfs_zap_mapping(page->mapping->host, page->mapping);
164 }
165
166 /* We can set the PG_uptodate flag if we see that a write request
167  * covers the full page.
168  */
169 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
170 {
171         if (PageUptodate(page))
172                 return;
173         if (base != 0)
174                 return;
175         if (count != nfs_page_length(page))
176                 return;
177         if (count != PAGE_CACHE_SIZE)
178                 zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0);
179         SetPageUptodate(page);
180 }
181
182 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
183                 unsigned int offset, unsigned int count)
184 {
185         struct nfs_page *req;
186         int ret;
187
188         for (;;) {
189                 req = nfs_update_request(ctx, page, offset, count);
190                 if (!IS_ERR(req))
191                         break;
192                 ret = PTR_ERR(req);
193                 if (ret != -EBUSY)
194                         return ret;
195                 ret = nfs_wb_page(page->mapping->host, page);
196                 if (ret != 0)
197                         return ret;
198         }
199         /* Update file length */
200         nfs_grow_file(page, offset, count);
201         nfs_unlock_request(req);
202         return 0;
203 }
204
205 static int wb_priority(struct writeback_control *wbc)
206 {
207         if (wbc->for_reclaim)
208                 return FLUSH_HIGHPRI | FLUSH_STABLE;
209         if (wbc->for_kupdate)
210                 return FLUSH_LOWPRI;
211         return 0;
212 }
213
214 /*
215  * NFS congestion control
216  */
217
218 int nfs_congestion_kb;
219
220 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
221 #define NFS_CONGESTION_OFF_THRESH       \
222         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
223
224 static int nfs_set_page_writeback(struct page *page)
225 {
226         int ret = test_set_page_writeback(page);
227
228         if (!ret) {
229                 struct inode *inode = page->mapping->host;
230                 struct nfs_server *nfss = NFS_SERVER(inode);
231
232                 if (atomic_long_inc_return(&nfss->writeback) >
233                                 NFS_CONGESTION_ON_THRESH)
234                         set_bdi_congested(&nfss->backing_dev_info, WRITE);
235         }
236         return ret;
237 }
238
239 static void nfs_end_page_writeback(struct page *page)
240 {
241         struct inode *inode = page->mapping->host;
242         struct nfs_server *nfss = NFS_SERVER(inode);
243
244         end_page_writeback(page);
245         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) {
246                 clear_bdi_congested(&nfss->backing_dev_info, WRITE);
247                 congestion_end(WRITE);
248         }
249 }
250
251 /*
252  * Find an associated nfs write request, and prepare to flush it out
253  * May return an error if the user signalled nfs_wait_on_request().
254  */
255 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
256                                 struct page *page)
257 {
258         struct inode *inode = page->mapping->host;
259         struct nfs_inode *nfsi = NFS_I(inode);
260         struct nfs_page *req;
261         int ret;
262
263         spin_lock(&inode->i_lock);
264         for(;;) {
265                 req = nfs_page_find_request_locked(page);
266                 if (req == NULL) {
267                         spin_unlock(&inode->i_lock);
268                         return 0;
269                 }
270                 if (nfs_lock_request_dontget(req))
271                         break;
272                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
273                  *       then the call to nfs_lock_request_dontget() will always
274                  *       succeed provided that someone hasn't already marked the
275                  *       request as dirty (in which case we don't care).
276                  */
277                 spin_unlock(&inode->i_lock);
278                 ret = nfs_wait_on_request(req);
279                 nfs_release_request(req);
280                 if (ret != 0)
281                         return ret;
282                 spin_lock(&inode->i_lock);
283         }
284         if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
285                 /* This request is marked for commit */
286                 spin_unlock(&inode->i_lock);
287                 nfs_unlock_request(req);
288                 nfs_pageio_complete(pgio);
289                 return 0;
290         }
291         if (nfs_set_page_writeback(page) != 0) {
292                 spin_unlock(&inode->i_lock);
293                 BUG();
294         }
295         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
296                         NFS_PAGE_TAG_LOCKED);
297         spin_unlock(&inode->i_lock);
298         nfs_pageio_add_request(pgio, req);
299         return 0;
300 }
301
302 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
303 {
304         struct inode *inode = page->mapping->host;
305
306         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
307         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
308
309         nfs_pageio_cond_complete(pgio, page->index);
310         return nfs_page_async_flush(pgio, page);
311 }
312
313 /*
314  * Write an mmapped page to the server.
315  */
316 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
317 {
318         struct nfs_pageio_descriptor pgio;
319         int err;
320
321         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
322         err = nfs_do_writepage(page, wbc, &pgio);
323         nfs_pageio_complete(&pgio);
324         if (err < 0)
325                 return err;
326         if (pgio.pg_error < 0)
327                 return pgio.pg_error;
328         return 0;
329 }
330
331 int nfs_writepage(struct page *page, struct writeback_control *wbc)
332 {
333         int ret;
334
335         ret = nfs_writepage_locked(page, wbc);
336         unlock_page(page);
337         return ret;
338 }
339
340 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
341 {
342         int ret;
343
344         ret = nfs_do_writepage(page, wbc, data);
345         unlock_page(page);
346         return ret;
347 }
348
349 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
350 {
351         struct inode *inode = mapping->host;
352         struct nfs_pageio_descriptor pgio;
353         int err;
354
355         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
356
357         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
358         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
359         nfs_pageio_complete(&pgio);
360         if (err < 0)
361                 return err;
362         if (pgio.pg_error < 0)
363                 return pgio.pg_error;
364         return 0;
365 }
366
367 /*
368  * Insert a write request into an inode
369  */
370 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
371 {
372         struct nfs_inode *nfsi = NFS_I(inode);
373         int error;
374
375         error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
376         BUG_ON(error == -EEXIST);
377         if (error)
378                 return error;
379         if (!nfsi->npages) {
380                 igrab(inode);
381                 nfs_begin_data_update(inode);
382                 if (nfs_have_delegation(inode, FMODE_WRITE))
383                         nfsi->change_attr++;
384         }
385         SetPagePrivate(req->wb_page);
386         set_page_private(req->wb_page, (unsigned long)req);
387         nfsi->npages++;
388         kref_get(&req->wb_kref);
389         return 0;
390 }
391
392 /*
393  * Remove a write request from an inode
394  */
395 static void nfs_inode_remove_request(struct nfs_page *req)
396 {
397         struct inode *inode = req->wb_context->path.dentry->d_inode;
398         struct nfs_inode *nfsi = NFS_I(inode);
399
400         BUG_ON (!NFS_WBACK_BUSY(req));
401
402         spin_lock(&inode->i_lock);
403         set_page_private(req->wb_page, 0);
404         ClearPagePrivate(req->wb_page);
405         radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
406         nfsi->npages--;
407         if (!nfsi->npages) {
408                 spin_unlock(&inode->i_lock);
409                 nfs_end_data_update(inode);
410                 iput(inode);
411         } else
412                 spin_unlock(&inode->i_lock);
413         nfs_clear_request(req);
414         nfs_release_request(req);
415 }
416
417 static void
418 nfs_redirty_request(struct nfs_page *req)
419 {
420         __set_page_dirty_nobuffers(req->wb_page);
421 }
422
423 /*
424  * Check if a request is dirty
425  */
426 static inline int
427 nfs_dirty_request(struct nfs_page *req)
428 {
429         struct page *page = req->wb_page;
430
431         if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
432                 return 0;
433         return !PageWriteback(req->wb_page);
434 }
435
436 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
437 /*
438  * Add a request to the inode's commit list.
439  */
440 static void
441 nfs_mark_request_commit(struct nfs_page *req)
442 {
443         struct inode *inode = req->wb_context->path.dentry->d_inode;
444         struct nfs_inode *nfsi = NFS_I(inode);
445
446         spin_lock(&inode->i_lock);
447         nfsi->ncommit++;
448         set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
449         radix_tree_tag_set(&nfsi->nfs_page_tree,
450                         req->wb_index,
451                         NFS_PAGE_TAG_COMMIT);
452         spin_unlock(&inode->i_lock);
453         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
454         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
455 }
456
457 static inline
458 int nfs_write_need_commit(struct nfs_write_data *data)
459 {
460         return data->verf.committed != NFS_FILE_SYNC;
461 }
462
463 static inline
464 int nfs_reschedule_unstable_write(struct nfs_page *req)
465 {
466         if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
467                 nfs_mark_request_commit(req);
468                 return 1;
469         }
470         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
471                 nfs_redirty_request(req);
472                 return 1;
473         }
474         return 0;
475 }
476 #else
477 static inline void
478 nfs_mark_request_commit(struct nfs_page *req)
479 {
480 }
481
482 static inline
483 int nfs_write_need_commit(struct nfs_write_data *data)
484 {
485         return 0;
486 }
487
488 static inline
489 int nfs_reschedule_unstable_write(struct nfs_page *req)
490 {
491         return 0;
492 }
493 #endif
494
495 /*
496  * Wait for a request to complete.
497  *
498  * Interruptible by signals only if mounted with intr flag.
499  */
500 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
501 {
502         struct nfs_inode *nfsi = NFS_I(inode);
503         struct nfs_page *req;
504         pgoff_t idx_end, next;
505         unsigned int            res = 0;
506         int                     error;
507
508         if (npages == 0)
509                 idx_end = ~0;
510         else
511                 idx_end = idx_start + npages - 1;
512
513         next = idx_start;
514         while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
515                 if (req->wb_index > idx_end)
516                         break;
517
518                 next = req->wb_index + 1;
519                 BUG_ON(!NFS_WBACK_BUSY(req));
520
521                 kref_get(&req->wb_kref);
522                 spin_unlock(&inode->i_lock);
523                 error = nfs_wait_on_request(req);
524                 nfs_release_request(req);
525                 spin_lock(&inode->i_lock);
526                 if (error < 0)
527                         return error;
528                 res++;
529         }
530         return res;
531 }
532
533 static void nfs_cancel_commit_list(struct list_head *head)
534 {
535         struct nfs_page *req;
536
537         while(!list_empty(head)) {
538                 req = nfs_list_entry(head->next);
539                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
540                 nfs_list_remove_request(req);
541                 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
542                 nfs_inode_remove_request(req);
543                 nfs_unlock_request(req);
544         }
545 }
546
547 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
548 /*
549  * nfs_scan_commit - Scan an inode for commit requests
550  * @inode: NFS inode to scan
551  * @dst: destination list
552  * @idx_start: lower bound of page->index to scan.
553  * @npages: idx_start + npages sets the upper bound to scan.
554  *
555  * Moves requests from the inode's 'commit' request list.
556  * The requests are *not* checked to ensure that they form a contiguous set.
557  */
558 static int
559 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
560 {
561         struct nfs_inode *nfsi = NFS_I(inode);
562         int res = 0;
563
564         if (nfsi->ncommit != 0) {
565                 res = nfs_scan_list(nfsi, dst, idx_start, npages,
566                                 NFS_PAGE_TAG_COMMIT);
567                 nfsi->ncommit -= res;
568         }
569         return res;
570 }
571 #else
572 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
573 {
574         return 0;
575 }
576 #endif
577
578 /*
579  * Try to update any existing write request, or create one if there is none.
580  * In order to match, the request's credentials must match those of
581  * the calling process.
582  *
583  * Note: Should always be called with the Page Lock held!
584  */
585 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
586                 struct page *page, unsigned int offset, unsigned int bytes)
587 {
588         struct address_space *mapping = page->mapping;
589         struct inode *inode = mapping->host;
590         struct nfs_page         *req, *new = NULL;
591         pgoff_t         rqend, end;
592
593         end = offset + bytes;
594
595         for (;;) {
596                 /* Loop over all inode entries and see if we find
597                  * A request for the page we wish to update
598                  */
599                 spin_lock(&inode->i_lock);
600                 req = nfs_page_find_request_locked(page);
601                 if (req) {
602                         if (!nfs_lock_request_dontget(req)) {
603                                 int error;
604
605                                 spin_unlock(&inode->i_lock);
606                                 error = nfs_wait_on_request(req);
607                                 nfs_release_request(req);
608                                 if (error < 0) {
609                                         if (new)
610                                                 nfs_release_request(new);
611                                         return ERR_PTR(error);
612                                 }
613                                 continue;
614                         }
615                         spin_unlock(&inode->i_lock);
616                         if (new)
617                                 nfs_release_request(new);
618                         break;
619                 }
620
621                 if (new) {
622                         int error;
623                         nfs_lock_request_dontget(new);
624                         error = nfs_inode_add_request(inode, new);
625                         if (error) {
626                                 spin_unlock(&inode->i_lock);
627                                 nfs_unlock_request(new);
628                                 return ERR_PTR(error);
629                         }
630                         spin_unlock(&inode->i_lock);
631                         return new;
632                 }
633                 spin_unlock(&inode->i_lock);
634
635                 new = nfs_create_request(ctx, inode, page, offset, bytes);
636                 if (IS_ERR(new))
637                         return new;
638         }
639
640         /* We have a request for our page.
641          * If the creds don't match, or the
642          * page addresses don't match,
643          * tell the caller to wait on the conflicting
644          * request.
645          */
646         rqend = req->wb_offset + req->wb_bytes;
647         if (req->wb_context != ctx
648             || req->wb_page != page
649             || !nfs_dirty_request(req)
650             || offset > rqend || end < req->wb_offset) {
651                 nfs_unlock_request(req);
652                 return ERR_PTR(-EBUSY);
653         }
654
655         /* Okay, the request matches. Update the region */
656         if (offset < req->wb_offset) {
657                 req->wb_offset = offset;
658                 req->wb_pgbase = offset;
659                 req->wb_bytes = rqend - req->wb_offset;
660         }
661
662         if (end > rqend)
663                 req->wb_bytes = end - req->wb_offset;
664
665         return req;
666 }
667
668 int nfs_flush_incompatible(struct file *file, struct page *page)
669 {
670         struct nfs_open_context *ctx = nfs_file_open_context(file);
671         struct nfs_page *req;
672         int do_flush, status;
673         /*
674          * Look for a request corresponding to this page. If there
675          * is one, and it belongs to another file, we flush it out
676          * before we try to copy anything into the page. Do this
677          * due to the lack of an ACCESS-type call in NFSv2.
678          * Also do the same if we find a request from an existing
679          * dropped page.
680          */
681         do {
682                 req = nfs_page_find_request(page);
683                 if (req == NULL)
684                         return 0;
685                 do_flush = req->wb_page != page || req->wb_context != ctx
686                         || !nfs_dirty_request(req);
687                 nfs_release_request(req);
688                 if (!do_flush)
689                         return 0;
690                 status = nfs_wb_page(page->mapping->host, page);
691         } while (status == 0);
692         return status;
693 }
694
695 /*
696  * Update and possibly write a cached page of an NFS file.
697  *
698  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
699  * things with a page scheduled for an RPC call (e.g. invalidate it).
700  */
701 int nfs_updatepage(struct file *file, struct page *page,
702                 unsigned int offset, unsigned int count)
703 {
704         struct nfs_open_context *ctx = nfs_file_open_context(file);
705         struct inode    *inode = page->mapping->host;
706         int             status = 0;
707
708         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
709
710         dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
711                 file->f_path.dentry->d_parent->d_name.name,
712                 file->f_path.dentry->d_name.name, count,
713                 (long long)(page_offset(page) +offset));
714
715         /* If we're not using byte range locks, and we know the page
716          * is entirely in cache, it may be more efficient to avoid
717          * fragmenting write requests.
718          */
719         if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
720                 count = max(count + offset, nfs_page_length(page));
721                 offset = 0;
722         }
723
724         status = nfs_writepage_setup(ctx, page, offset, count);
725         __set_page_dirty_nobuffers(page);
726
727         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
728                         status, (long long)i_size_read(inode));
729         if (status < 0)
730                 nfs_set_pageerror(page);
731         return status;
732 }
733
734 static void nfs_writepage_release(struct nfs_page *req)
735 {
736
737         if (PageError(req->wb_page)) {
738                 nfs_end_page_writeback(req->wb_page);
739                 nfs_inode_remove_request(req);
740         } else if (!nfs_reschedule_unstable_write(req)) {
741                 /* Set the PG_uptodate flag */
742                 nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes);
743                 nfs_end_page_writeback(req->wb_page);
744                 nfs_inode_remove_request(req);
745         } else
746                 nfs_end_page_writeback(req->wb_page);
747         nfs_clear_page_tag_locked(req);
748 }
749
750 static inline int flush_task_priority(int how)
751 {
752         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
753                 case FLUSH_HIGHPRI:
754                         return RPC_PRIORITY_HIGH;
755                 case FLUSH_LOWPRI:
756                         return RPC_PRIORITY_LOW;
757         }
758         return RPC_PRIORITY_NORMAL;
759 }
760
761 /*
762  * Set up the argument/result storage required for the RPC call.
763  */
764 static void nfs_write_rpcsetup(struct nfs_page *req,
765                 struct nfs_write_data *data,
766                 const struct rpc_call_ops *call_ops,
767                 unsigned int count, unsigned int offset,
768                 int how)
769 {
770         struct inode            *inode;
771         int flags;
772
773         /* Set up the RPC argument and reply structs
774          * NB: take care not to mess about with data->commit et al. */
775
776         data->req = req;
777         data->inode = inode = req->wb_context->path.dentry->d_inode;
778         data->cred = req->wb_context->cred;
779
780         data->args.fh     = NFS_FH(inode);
781         data->args.offset = req_offset(req) + offset;
782         data->args.pgbase = req->wb_pgbase + offset;
783         data->args.pages  = data->pagevec;
784         data->args.count  = count;
785         data->args.context = req->wb_context;
786
787         data->res.fattr   = &data->fattr;
788         data->res.count   = count;
789         data->res.verf    = &data->verf;
790         nfs_fattr_init(&data->fattr);
791
792         /* Set up the initial task struct.  */
793         flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
794         rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
795         NFS_PROTO(inode)->write_setup(data, how);
796
797         data->task.tk_priority = flush_task_priority(how);
798         data->task.tk_cookie = (unsigned long)inode;
799
800         dprintk("NFS: %5u initiated write call "
801                 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
802                 data->task.tk_pid,
803                 inode->i_sb->s_id,
804                 (long long)NFS_FILEID(inode),
805                 count,
806                 (unsigned long long)data->args.offset);
807 }
808
809 static void nfs_execute_write(struct nfs_write_data *data)
810 {
811         struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
812         sigset_t oldset;
813
814         rpc_clnt_sigmask(clnt, &oldset);
815         rpc_execute(&data->task);
816         rpc_clnt_sigunmask(clnt, &oldset);
817 }
818
819 /*
820  * Generate multiple small requests to write out a single
821  * contiguous dirty area on one page.
822  */
823 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
824 {
825         struct nfs_page *req = nfs_list_entry(head->next);
826         struct page *page = req->wb_page;
827         struct nfs_write_data *data;
828         size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
829         unsigned int offset;
830         int requests = 0;
831         LIST_HEAD(list);
832
833         nfs_list_remove_request(req);
834
835         nbytes = count;
836         do {
837                 size_t len = min(nbytes, wsize);
838
839                 data = nfs_writedata_alloc(1);
840                 if (!data)
841                         goto out_bad;
842                 list_add(&data->pages, &list);
843                 requests++;
844                 nbytes -= len;
845         } while (nbytes != 0);
846         atomic_set(&req->wb_complete, requests);
847
848         ClearPageError(page);
849         offset = 0;
850         nbytes = count;
851         do {
852                 data = list_entry(list.next, struct nfs_write_data, pages);
853                 list_del_init(&data->pages);
854
855                 data->pagevec[0] = page;
856
857                 if (nbytes < wsize)
858                         wsize = nbytes;
859                 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
860                                    wsize, offset, how);
861                 offset += wsize;
862                 nbytes -= wsize;
863                 nfs_execute_write(data);
864         } while (nbytes != 0);
865
866         return 0;
867
868 out_bad:
869         while (!list_empty(&list)) {
870                 data = list_entry(list.next, struct nfs_write_data, pages);
871                 list_del(&data->pages);
872                 nfs_writedata_release(data);
873         }
874         nfs_redirty_request(req);
875         nfs_end_page_writeback(req->wb_page);
876         nfs_clear_page_tag_locked(req);
877         return -ENOMEM;
878 }
879
880 /*
881  * Create an RPC task for the given write request and kick it.
882  * The page must have been locked by the caller.
883  *
884  * It may happen that the page we're passed is not marked dirty.
885  * This is the case if nfs_updatepage detects a conflicting request
886  * that has been written but not committed.
887  */
888 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
889 {
890         struct nfs_page         *req;
891         struct page             **pages;
892         struct nfs_write_data   *data;
893
894         data = nfs_writedata_alloc(npages);
895         if (!data)
896                 goto out_bad;
897
898         pages = data->pagevec;
899         while (!list_empty(head)) {
900                 req = nfs_list_entry(head->next);
901                 nfs_list_remove_request(req);
902                 nfs_list_add_request(req, &data->pages);
903                 ClearPageError(req->wb_page);
904                 *pages++ = req->wb_page;
905         }
906         req = nfs_list_entry(data->pages.next);
907
908         /* Set up the argument struct */
909         nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
910
911         nfs_execute_write(data);
912         return 0;
913  out_bad:
914         while (!list_empty(head)) {
915                 req = nfs_list_entry(head->next);
916                 nfs_list_remove_request(req);
917                 nfs_redirty_request(req);
918                 nfs_end_page_writeback(req->wb_page);
919                 nfs_clear_page_tag_locked(req);
920         }
921         return -ENOMEM;
922 }
923
924 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
925                                   struct inode *inode, int ioflags)
926 {
927         int wsize = NFS_SERVER(inode)->wsize;
928
929         if (wsize < PAGE_CACHE_SIZE)
930                 nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
931         else
932                 nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
933 }
934
935 /*
936  * Handle a write reply that flushed part of a page.
937  */
938 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
939 {
940         struct nfs_write_data   *data = calldata;
941         struct nfs_page         *req = data->req;
942         struct page             *page = req->wb_page;
943
944         dprintk("NFS: write (%s/%Ld %d@%Ld)",
945                 req->wb_context->path.dentry->d_inode->i_sb->s_id,
946                 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
947                 req->wb_bytes,
948                 (long long)req_offset(req));
949
950         if (nfs_writeback_done(task, data) != 0)
951                 return;
952
953         if (task->tk_status < 0) {
954                 nfs_set_pageerror(page);
955                 nfs_context_set_write_error(req->wb_context, task->tk_status);
956                 dprintk(", error = %d\n", task->tk_status);
957                 goto out;
958         }
959
960         if (nfs_write_need_commit(data)) {
961                 struct inode *inode = page->mapping->host;
962
963                 spin_lock(&inode->i_lock);
964                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
965                         /* Do nothing we need to resend the writes */
966                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
967                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
968                         dprintk(" defer commit\n");
969                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
970                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
971                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
972                         dprintk(" server reboot detected\n");
973                 }
974                 spin_unlock(&inode->i_lock);
975         } else
976                 dprintk(" OK\n");
977
978 out:
979         if (atomic_dec_and_test(&req->wb_complete))
980                 nfs_writepage_release(req);
981 }
982
983 static const struct rpc_call_ops nfs_write_partial_ops = {
984         .rpc_call_done = nfs_writeback_done_partial,
985         .rpc_release = nfs_writedata_release,
986 };
987
988 /*
989  * Handle a write reply that flushes a whole page.
990  *
991  * FIXME: There is an inherent race with invalidate_inode_pages and
992  *        writebacks since the page->count is kept > 1 for as long
993  *        as the page has a write request pending.
994  */
995 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
996 {
997         struct nfs_write_data   *data = calldata;
998         struct nfs_page         *req;
999         struct page             *page;
1000
1001         if (nfs_writeback_done(task, data) != 0)
1002                 return;
1003
1004         /* Update attributes as result of writeback. */
1005         while (!list_empty(&data->pages)) {
1006                 req = nfs_list_entry(data->pages.next);
1007                 nfs_list_remove_request(req);
1008                 page = req->wb_page;
1009
1010                 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1011                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1012                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1013                         req->wb_bytes,
1014                         (long long)req_offset(req));
1015
1016                 if (task->tk_status < 0) {
1017                         nfs_set_pageerror(page);
1018                         nfs_context_set_write_error(req->wb_context, task->tk_status);
1019                         dprintk(", error = %d\n", task->tk_status);
1020                         goto remove_request;
1021                 }
1022
1023                 if (nfs_write_need_commit(data)) {
1024                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1025                         nfs_mark_request_commit(req);
1026                         nfs_end_page_writeback(page);
1027                         dprintk(" marked for commit\n");
1028                         goto next;
1029                 }
1030                 /* Set the PG_uptodate flag? */
1031                 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
1032                 dprintk(" OK\n");
1033 remove_request:
1034                 nfs_end_page_writeback(page);
1035                 nfs_inode_remove_request(req);
1036         next:
1037                 nfs_clear_page_tag_locked(req);
1038         }
1039 }
1040
1041 static const struct rpc_call_ops nfs_write_full_ops = {
1042         .rpc_call_done = nfs_writeback_done_full,
1043         .rpc_release = nfs_writedata_release,
1044 };
1045
1046
1047 /*
1048  * This function is called when the WRITE call is complete.
1049  */
1050 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1051 {
1052         struct nfs_writeargs    *argp = &data->args;
1053         struct nfs_writeres     *resp = &data->res;
1054         int status;
1055
1056         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1057                 task->tk_pid, task->tk_status);
1058
1059         /*
1060          * ->write_done will attempt to use post-op attributes to detect
1061          * conflicting writes by other clients.  A strict interpretation
1062          * of close-to-open would allow us to continue caching even if
1063          * another writer had changed the file, but some applications
1064          * depend on tighter cache coherency when writing.
1065          */
1066         status = NFS_PROTO(data->inode)->write_done(task, data);
1067         if (status != 0)
1068                 return status;
1069         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1070
1071 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1072         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1073                 /* We tried a write call, but the server did not
1074                  * commit data to stable storage even though we
1075                  * requested it.
1076                  * Note: There is a known bug in Tru64 < 5.0 in which
1077                  *       the server reports NFS_DATA_SYNC, but performs
1078                  *       NFS_FILE_SYNC. We therefore implement this checking
1079                  *       as a dprintk() in order to avoid filling syslog.
1080                  */
1081                 static unsigned long    complain;
1082
1083                 if (time_before(complain, jiffies)) {
1084                         dprintk("NFS: faulty NFS server %s:"
1085                                 " (committed = %d) != (stable = %d)\n",
1086                                 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1087                                 resp->verf->committed, argp->stable);
1088                         complain = jiffies + 300 * HZ;
1089                 }
1090         }
1091 #endif
1092         /* Is this a short write? */
1093         if (task->tk_status >= 0 && resp->count < argp->count) {
1094                 static unsigned long    complain;
1095
1096                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1097
1098                 /* Has the server at least made some progress? */
1099                 if (resp->count != 0) {
1100                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1101                         if (resp->verf->committed != NFS_UNSTABLE) {
1102                                 /* Resend from where the server left off */
1103                                 argp->offset += resp->count;
1104                                 argp->pgbase += resp->count;
1105                                 argp->count -= resp->count;
1106                         } else {
1107                                 /* Resend as a stable write in order to avoid
1108                                  * headaches in the case of a server crash.
1109                                  */
1110                                 argp->stable = NFS_FILE_SYNC;
1111                         }
1112                         rpc_restart_call(task);
1113                         return -EAGAIN;
1114                 }
1115                 if (time_before(complain, jiffies)) {
1116                         printk(KERN_WARNING
1117                                "NFS: Server wrote zero bytes, expected %u.\n",
1118                                         argp->count);
1119                         complain = jiffies + 300 * HZ;
1120                 }
1121                 /* Can't do anything about it except throw an error. */
1122                 task->tk_status = -EIO;
1123         }
1124         return 0;
1125 }
1126
1127
1128 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1129 void nfs_commit_release(void *wdata)
1130 {
1131         nfs_commit_free(wdata);
1132 }
1133
1134 /*
1135  * Set up the argument/result storage required for the RPC call.
1136  */
1137 static void nfs_commit_rpcsetup(struct list_head *head,
1138                 struct nfs_write_data *data,
1139                 int how)
1140 {
1141         struct nfs_page         *first;
1142         struct inode            *inode;
1143         int flags;
1144
1145         /* Set up the RPC argument and reply structs
1146          * NB: take care not to mess about with data->commit et al. */
1147
1148         list_splice_init(head, &data->pages);
1149         first = nfs_list_entry(data->pages.next);
1150         inode = first->wb_context->path.dentry->d_inode;
1151
1152         data->inode       = inode;
1153         data->cred        = first->wb_context->cred;
1154
1155         data->args.fh     = NFS_FH(data->inode);
1156         /* Note: we always request a commit of the entire inode */
1157         data->args.offset = 0;
1158         data->args.count  = 0;
1159         data->res.count   = 0;
1160         data->res.fattr   = &data->fattr;
1161         data->res.verf    = &data->verf;
1162         nfs_fattr_init(&data->fattr);
1163
1164         /* Set up the initial task struct.  */
1165         flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1166         rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1167         NFS_PROTO(inode)->commit_setup(data, how);
1168
1169         data->task.tk_priority = flush_task_priority(how);
1170         data->task.tk_cookie = (unsigned long)inode;
1171         
1172         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1173 }
1174
1175 /*
1176  * Commit dirty pages
1177  */
1178 static int
1179 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1180 {
1181         struct nfs_write_data   *data;
1182         struct nfs_page         *req;
1183
1184         data = nfs_commit_alloc();
1185
1186         if (!data)
1187                 goto out_bad;
1188
1189         /* Set up the argument struct */
1190         nfs_commit_rpcsetup(head, data, how);
1191
1192         nfs_execute_write(data);
1193         return 0;
1194  out_bad:
1195         while (!list_empty(head)) {
1196                 req = nfs_list_entry(head->next);
1197                 nfs_list_remove_request(req);
1198                 nfs_mark_request_commit(req);
1199                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1200                 nfs_clear_page_tag_locked(req);
1201         }
1202         return -ENOMEM;
1203 }
1204
1205 /*
1206  * COMMIT call returned
1207  */
1208 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1209 {
1210         struct nfs_write_data   *data = calldata;
1211         struct nfs_page         *req;
1212
1213         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1214                                 task->tk_pid, task->tk_status);
1215
1216         /* Call the NFS version-specific code */
1217         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1218                 return;
1219
1220         while (!list_empty(&data->pages)) {
1221                 req = nfs_list_entry(data->pages.next);
1222                 nfs_list_remove_request(req);
1223                 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1224                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1225
1226                 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1227                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1228                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1229                         req->wb_bytes,
1230                         (long long)req_offset(req));
1231                 if (task->tk_status < 0) {
1232                         nfs_context_set_write_error(req->wb_context, task->tk_status);
1233                         nfs_inode_remove_request(req);
1234                         dprintk(", error = %d\n", task->tk_status);
1235                         goto next;
1236                 }
1237
1238                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1239                  * returned by the server against all stored verfs. */
1240                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1241                         /* We have a match */
1242                         /* Set the PG_uptodate flag */
1243                         nfs_mark_uptodate(req->wb_page, req->wb_pgbase,
1244                                         req->wb_bytes);
1245                         nfs_inode_remove_request(req);
1246                         dprintk(" OK\n");
1247                         goto next;
1248                 }
1249                 /* We have a mismatch. Write the page again */
1250                 dprintk(" mismatch\n");
1251                 nfs_redirty_request(req);
1252         next:
1253                 nfs_clear_page_tag_locked(req);
1254         }
1255 }
1256
1257 static const struct rpc_call_ops nfs_commit_ops = {
1258         .rpc_call_done = nfs_commit_done,
1259         .rpc_release = nfs_commit_release,
1260 };
1261
1262 int nfs_commit_inode(struct inode *inode, int how)
1263 {
1264         LIST_HEAD(head);
1265         int res;
1266
1267         spin_lock(&inode->i_lock);
1268         res = nfs_scan_commit(inode, &head, 0, 0);
1269         spin_unlock(&inode->i_lock);
1270         if (res) {
1271                 int error = nfs_commit_list(inode, &head, how);
1272                 if (error < 0)
1273                         return error;
1274         }
1275         return res;
1276 }
1277 #else
1278 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1279 {
1280         return 0;
1281 }
1282 #endif
1283
1284 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1285 {
1286         struct inode *inode = mapping->host;
1287         pgoff_t idx_start, idx_end;
1288         unsigned int npages = 0;
1289         LIST_HEAD(head);
1290         int nocommit = how & FLUSH_NOCOMMIT;
1291         long pages, ret;
1292
1293         /* FIXME */
1294         if (wbc->range_cyclic)
1295                 idx_start = 0;
1296         else {
1297                 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1298                 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1299                 if (idx_end > idx_start) {
1300                         pgoff_t l_npages = 1 + idx_end - idx_start;
1301                         npages = l_npages;
1302                         if (sizeof(npages) != sizeof(l_npages) &&
1303                                         (pgoff_t)npages != l_npages)
1304                                 npages = 0;
1305                 }
1306         }
1307         how &= ~FLUSH_NOCOMMIT;
1308         spin_lock(&inode->i_lock);
1309         do {
1310                 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1311                 if (ret != 0)
1312                         continue;
1313                 if (nocommit)
1314                         break;
1315                 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1316                 if (pages == 0)
1317                         break;
1318                 if (how & FLUSH_INVALIDATE) {
1319                         spin_unlock(&inode->i_lock);
1320                         nfs_cancel_commit_list(&head);
1321                         ret = pages;
1322                         spin_lock(&inode->i_lock);
1323                         continue;
1324                 }
1325                 pages += nfs_scan_commit(inode, &head, 0, 0);
1326                 spin_unlock(&inode->i_lock);
1327                 ret = nfs_commit_list(inode, &head, how);
1328                 spin_lock(&inode->i_lock);
1329
1330         } while (ret >= 0);
1331         spin_unlock(&inode->i_lock);
1332         return ret;
1333 }
1334
1335 static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1336 {
1337         int ret;
1338
1339         ret = nfs_writepages(mapping, wbc);
1340         if (ret < 0)
1341                 goto out;
1342         ret = nfs_sync_mapping_wait(mapping, wbc, how);
1343         if (ret < 0)
1344                 goto out;
1345         return 0;
1346 out:
1347         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1348         return ret;
1349 }
1350
1351 /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
1352 static int nfs_write_mapping(struct address_space *mapping, int how)
1353 {
1354         struct writeback_control wbc = {
1355                 .bdi = mapping->backing_dev_info,
1356                 .sync_mode = WB_SYNC_NONE,
1357                 .nr_to_write = LONG_MAX,
1358                 .for_writepages = 1,
1359                 .range_cyclic = 1,
1360         };
1361         int ret;
1362
1363         ret = __nfs_write_mapping(mapping, &wbc, how);
1364         if (ret < 0)
1365                 return ret;
1366         wbc.sync_mode = WB_SYNC_ALL;
1367         return __nfs_write_mapping(mapping, &wbc, how);
1368 }
1369
1370 /*
1371  * flush the inode to disk.
1372  */
1373 int nfs_wb_all(struct inode *inode)
1374 {
1375         return nfs_write_mapping(inode->i_mapping, 0);
1376 }
1377
1378 int nfs_wb_nocommit(struct inode *inode)
1379 {
1380         return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1381 }
1382
1383 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1384 {
1385         struct nfs_page *req;
1386         loff_t range_start = page_offset(page);
1387         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1388         struct writeback_control wbc = {
1389                 .bdi = page->mapping->backing_dev_info,
1390                 .sync_mode = WB_SYNC_ALL,
1391                 .nr_to_write = LONG_MAX,
1392                 .range_start = range_start,
1393                 .range_end = range_end,
1394         };
1395         int ret = 0;
1396
1397         BUG_ON(!PageLocked(page));
1398         for (;;) {
1399                 req = nfs_page_find_request(page);
1400                 if (req == NULL)
1401                         goto out;
1402                 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1403                         nfs_release_request(req);
1404                         break;
1405                 }
1406                 if (nfs_lock_request_dontget(req)) {
1407                         nfs_inode_remove_request(req);
1408                         /*
1409                          * In case nfs_inode_remove_request has marked the
1410                          * page as being dirty
1411                          */
1412                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1413                         nfs_unlock_request(req);
1414                         break;
1415                 }
1416                 ret = nfs_wait_on_request(req);
1417                 if (ret < 0)
1418                         goto out;
1419         }
1420         if (!PagePrivate(page))
1421                 return 0;
1422         ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1423 out:
1424         return ret;
1425 }
1426
1427 int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1428 {
1429         loff_t range_start = page_offset(page);
1430         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1431         struct writeback_control wbc = {
1432                 .bdi = page->mapping->backing_dev_info,
1433                 .sync_mode = WB_SYNC_ALL,
1434                 .nr_to_write = LONG_MAX,
1435                 .range_start = range_start,
1436                 .range_end = range_end,
1437         };
1438         int ret;
1439
1440         BUG_ON(!PageLocked(page));
1441         if (clear_page_dirty_for_io(page)) {
1442                 ret = nfs_writepage_locked(page, &wbc);
1443                 if (ret < 0)
1444                         goto out;
1445         }
1446         if (!PagePrivate(page))
1447                 return 0;
1448         ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1449         if (ret >= 0)
1450                 return 0;
1451 out:
1452         __mark_inode_dirty(inode, I_DIRTY_PAGES);
1453         return ret;
1454 }
1455
1456 /*
1457  * Write back all requests on one page - we do this before reading it.
1458  */
1459 int nfs_wb_page(struct inode *inode, struct page* page)
1460 {
1461         return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1462 }
1463
1464 int __init nfs_init_writepagecache(void)
1465 {
1466         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1467                                              sizeof(struct nfs_write_data),
1468                                              0, SLAB_HWCACHE_ALIGN,
1469                                              NULL);
1470         if (nfs_wdata_cachep == NULL)
1471                 return -ENOMEM;
1472
1473         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1474                                                      nfs_wdata_cachep);
1475         if (nfs_wdata_mempool == NULL)
1476                 return -ENOMEM;
1477
1478         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1479                                                       nfs_wdata_cachep);
1480         if (nfs_commit_mempool == NULL)
1481                 return -ENOMEM;
1482
1483         /*
1484          * NFS congestion size, scale with available memory.
1485          *
1486          *  64MB:    8192k
1487          * 128MB:   11585k
1488          * 256MB:   16384k
1489          * 512MB:   23170k
1490          *   1GB:   32768k
1491          *   2GB:   46340k
1492          *   4GB:   65536k
1493          *   8GB:   92681k
1494          *  16GB:  131072k
1495          *
1496          * This allows larger machines to have larger/more transfers.
1497          * Limit the default to 256M
1498          */
1499         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1500         if (nfs_congestion_kb > 256*1024)
1501                 nfs_congestion_kb = 256*1024;
1502
1503         return 0;
1504 }
1505
1506 void nfs_destroy_writepagecache(void)
1507 {
1508         mempool_destroy(nfs_commit_mempool);
1509         mempool_destroy(nfs_wdata_mempool);
1510         kmem_cache_destroy(nfs_wdata_cachep);
1511 }
1512