[PATCH] Really ignore kmem_cache_destroy return value
[pandora-kernel.git] / fs / nfs / pagelist.c
1 /*
2  * linux/fs/nfs/pagelist.c
3  *
4  * A set of helper functions for managing NFS read and write requests.
5  * The main purpose of these routines is to provide support for the
6  * coalescing of several requests into a single RPC call.
7  *
8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sunrpc/clnt.h>
15 #include <linux/nfs3.h>
16 #include <linux/nfs4.h>
17 #include <linux/nfs_page.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20
21 #define NFS_PARANOIA 1
22
23 static kmem_cache_t *nfs_page_cachep;
24
25 static inline struct nfs_page *
26 nfs_page_alloc(void)
27 {
28         struct nfs_page *p;
29         p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
30         if (p) {
31                 memset(p, 0, sizeof(*p));
32                 INIT_LIST_HEAD(&p->wb_list);
33         }
34         return p;
35 }
36
37 static inline void
38 nfs_page_free(struct nfs_page *p)
39 {
40         kmem_cache_free(nfs_page_cachep, p);
41 }
42
43 /**
44  * nfs_create_request - Create an NFS read/write request.
45  * @file: file descriptor to use
46  * @inode: inode to which the request is attached
47  * @page: page to write
48  * @offset: starting offset within the page for the write
49  * @count: number of bytes to read/write
50  *
51  * The page must be locked by the caller. This makes sure we never
52  * create two different requests for the same page, and avoids
53  * a possible deadlock when we reach the hard limit on the number
54  * of dirty pages.
55  * User should ensure it is safe to sleep in this function.
56  */
57 struct nfs_page *
58 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
59                    struct page *page,
60                    unsigned int offset, unsigned int count)
61 {
62         struct nfs_server *server = NFS_SERVER(inode);
63         struct nfs_page         *req;
64
65         /* Deal with hard limits.  */
66         for (;;) {
67                 /* try to allocate the request struct */
68                 req = nfs_page_alloc();
69                 if (req != NULL)
70                         break;
71
72                 /* Try to free up at least one request in order to stay
73                  * below the hard limit
74                  */
75                 if (signalled() && (server->flags & NFS_MOUNT_INTR))
76                         return ERR_PTR(-ERESTARTSYS);
77                 yield();
78         }
79
80         /* Initialize the request struct. Initially, we assume a
81          * long write-back delay. This will be adjusted in
82          * update_nfs_request below if the region is not locked. */
83         req->wb_page    = page;
84         atomic_set(&req->wb_complete, 0);
85         req->wb_index   = page->index;
86         page_cache_get(page);
87         BUG_ON(PagePrivate(page));
88         BUG_ON(!PageLocked(page));
89         BUG_ON(page->mapping->host != inode);
90         req->wb_offset  = offset;
91         req->wb_pgbase  = offset;
92         req->wb_bytes   = count;
93         atomic_set(&req->wb_count, 1);
94         req->wb_context = get_nfs_open_context(ctx);
95
96         return req;
97 }
98
99 /**
100  * nfs_unlock_request - Unlock request and wake up sleepers.
101  * @req:
102  */
103 void nfs_unlock_request(struct nfs_page *req)
104 {
105         if (!NFS_WBACK_BUSY(req)) {
106                 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
107                 BUG();
108         }
109         smp_mb__before_clear_bit();
110         clear_bit(PG_BUSY, &req->wb_flags);
111         smp_mb__after_clear_bit();
112         wake_up_bit(&req->wb_flags, PG_BUSY);
113         nfs_release_request(req);
114 }
115
116 /**
117  * nfs_set_page_writeback_locked - Lock a request for writeback
118  * @req:
119  */
120 int nfs_set_page_writeback_locked(struct nfs_page *req)
121 {
122         struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
123
124         if (!nfs_lock_request(req))
125                 return 0;
126         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
127         return 1;
128 }
129
130 /**
131  * nfs_clear_page_writeback - Unlock request and wake up sleepers
132  */
133 void nfs_clear_page_writeback(struct nfs_page *req)
134 {
135         struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
136
137         if (req->wb_page != NULL) {
138                 spin_lock(&nfsi->req_lock);
139                 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
140                 spin_unlock(&nfsi->req_lock);
141         }
142         nfs_unlock_request(req);
143 }
144
145 /**
146  * nfs_clear_request - Free up all resources allocated to the request
147  * @req:
148  *
149  * Release page resources associated with a write request after it
150  * has completed.
151  */
152 void nfs_clear_request(struct nfs_page *req)
153 {
154         struct page *page = req->wb_page;
155         if (page != NULL) {
156                 page_cache_release(page);
157                 req->wb_page = NULL;
158         }
159 }
160
161
162 /**
163  * nfs_release_request - Release the count on an NFS read/write request
164  * @req: request to release
165  *
166  * Note: Should never be called with the spinlock held!
167  */
168 void
169 nfs_release_request(struct nfs_page *req)
170 {
171         if (!atomic_dec_and_test(&req->wb_count))
172                 return;
173
174 #ifdef NFS_PARANOIA
175         BUG_ON (!list_empty(&req->wb_list));
176         BUG_ON (NFS_WBACK_BUSY(req));
177 #endif
178
179         /* Release struct file or cached credential */
180         nfs_clear_request(req);
181         put_nfs_open_context(req->wb_context);
182         nfs_page_free(req);
183 }
184
185 static int nfs_wait_bit_interruptible(void *word)
186 {
187         int ret = 0;
188
189         if (signal_pending(current))
190                 ret = -ERESTARTSYS;
191         else
192                 schedule();
193         return ret;
194 }
195
196 /**
197  * nfs_wait_on_request - Wait for a request to complete.
198  * @req: request to wait upon.
199  *
200  * Interruptible by signals only if mounted with intr flag.
201  * The user is responsible for holding a count on the request.
202  */
203 int
204 nfs_wait_on_request(struct nfs_page *req)
205 {
206         struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode);
207         sigset_t oldmask;
208         int ret = 0;
209
210         if (!test_bit(PG_BUSY, &req->wb_flags))
211                 goto out;
212         /*
213          * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
214          *       are not interrupted if intr flag is not set
215          */
216         rpc_clnt_sigmask(clnt, &oldmask);
217         ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
218                         nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
219         rpc_clnt_sigunmask(clnt, &oldmask);
220 out:
221         return ret;
222 }
223
224 /**
225  * nfs_coalesce_requests - Split coalesced requests out from a list.
226  * @head: source list
227  * @dst: destination list
228  * @nmax: maximum number of requests to coalesce
229  *
230  * Moves a maximum of 'nmax' elements from one list to another.
231  * The elements are checked to ensure that they form a contiguous set
232  * of pages, and that the RPC credentials are the same.
233  */
234 int
235 nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
236                       unsigned int nmax)
237 {
238         struct nfs_page         *req = NULL;
239         unsigned int            npages = 0;
240
241         while (!list_empty(head)) {
242                 struct nfs_page *prev = req;
243
244                 req = nfs_list_entry(head->next);
245                 if (prev) {
246                         if (req->wb_context->cred != prev->wb_context->cred)
247                                 break;
248                         if (req->wb_context->lockowner != prev->wb_context->lockowner)
249                                 break;
250                         if (req->wb_context->state != prev->wb_context->state)
251                                 break;
252                         if (req->wb_index != (prev->wb_index + 1))
253                                 break;
254
255                         if (req->wb_pgbase != 0)
256                                 break;
257                 }
258                 nfs_list_remove_request(req);
259                 nfs_list_add_request(req, dst);
260                 npages++;
261                 if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE)
262                         break;
263                 if (npages >= nmax)
264                         break;
265         }
266         return npages;
267 }
268
269 #define NFS_SCAN_MAXENTRIES 16
270 /**
271  * nfs_scan_lock_dirty - Scan the radix tree for dirty requests
272  * @nfsi: NFS inode
273  * @dst: Destination list
274  * @idx_start: lower bound of page->index to scan
275  * @npages: idx_start + npages sets the upper bound to scan.
276  *
277  * Moves elements from one of the inode request lists.
278  * If the number of requests is set to 0, the entire address_space
279  * starting at index idx_start, is scanned.
280  * The requests are *not* checked to ensure that they form a contiguous set.
281  * You must be holding the inode's req_lock when calling this function
282  */
283 int
284 nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
285               unsigned long idx_start, unsigned int npages)
286 {
287         struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
288         struct nfs_page *req;
289         unsigned long idx_end;
290         int found, i;
291         int res;
292
293         res = 0;
294         if (npages == 0)
295                 idx_end = ~0;
296         else
297                 idx_end = idx_start + npages - 1;
298
299         for (;;) {
300                 found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
301                                 (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,
302                                 NFS_PAGE_TAG_DIRTY);
303                 if (found <= 0)
304                         break;
305                 for (i = 0; i < found; i++) {
306                         req = pgvec[i];
307                         if (req->wb_index > idx_end)
308                                 goto out;
309
310                         idx_start = req->wb_index + 1;
311
312                         if (nfs_set_page_writeback_locked(req)) {
313                                 radix_tree_tag_clear(&nfsi->nfs_page_tree,
314                                                 req->wb_index, NFS_PAGE_TAG_DIRTY);
315                                 nfs_list_remove_request(req);
316                                 nfs_list_add_request(req, dst);
317                                 dec_zone_page_state(req->wb_page, NR_FILE_DIRTY);
318                                 res++;
319                         }
320                 }
321         }
322 out:
323         return res;
324 }
325
326 /**
327  * nfs_scan_list - Scan a list for matching requests
328  * @nfsi: NFS inode
329  * @head: One of the NFS inode request lists
330  * @dst: Destination list
331  * @idx_start: lower bound of page->index to scan
332  * @npages: idx_start + npages sets the upper bound to scan.
333  *
334  * Moves elements from one of the inode request lists.
335  * If the number of requests is set to 0, the entire address_space
336  * starting at index idx_start, is scanned.
337  * The requests are *not* checked to ensure that they form a contiguous set.
338  * You must be holding the inode's req_lock when calling this function
339  */
340 int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head,
341                 struct list_head *dst, unsigned long idx_start,
342                 unsigned int npages)
343 {
344         struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
345         struct nfs_page *req;
346         unsigned long idx_end;
347         int found, i;
348         int res;
349
350         res = 0;
351         if (npages == 0)
352                 idx_end = ~0;
353         else
354                 idx_end = idx_start + npages - 1;
355
356         for (;;) {
357                 found = radix_tree_gang_lookup(&nfsi->nfs_page_tree,
358                                 (void **)&pgvec[0], idx_start,
359                                 NFS_SCAN_MAXENTRIES);
360                 if (found <= 0)
361                         break;
362                 for (i = 0; i < found; i++) {
363                         req = pgvec[i];
364                         if (req->wb_index > idx_end)
365                                 goto out;
366                         idx_start = req->wb_index + 1;
367                         if (req->wb_list_head != head)
368                                 continue;
369                         if (nfs_set_page_writeback_locked(req)) {
370                                 nfs_list_remove_request(req);
371                                 nfs_list_add_request(req, dst);
372                                 res++;
373                         }
374                 }
375
376         }
377 out:
378         return res;
379 }
380
381 int __init nfs_init_nfspagecache(void)
382 {
383         nfs_page_cachep = kmem_cache_create("nfs_page",
384                                             sizeof(struct nfs_page),
385                                             0, SLAB_HWCACHE_ALIGN,
386                                             NULL, NULL);
387         if (nfs_page_cachep == NULL)
388                 return -ENOMEM;
389
390         return 0;
391 }
392
393 void nfs_destroy_nfspagecache(void)
394 {
395         kmem_cache_destroy(nfs_page_cachep);
396 }
397