Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / mm / truncate.c
1 /*
2  * mm/truncate.c - code for taking down pages from address_spaces
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 10Sep2002    akpm@zip.com.au
7  *              Initial version.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagevec.h>
16 #include <linux/buffer_head.h>  /* grr. try_to_release_page,
17                                    do_invalidatepage */
18
19
20 static inline void truncate_partial_page(struct page *page, unsigned partial)
21 {
22         memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
23         if (PagePrivate(page))
24                 do_invalidatepage(page, partial);
25 }
26
27 /*
28  * If truncate cannot remove the fs-private metadata from the page, the page
29  * becomes anonymous.  It will be left on the LRU and may even be mapped into
30  * user pagetables if we're racing with filemap_nopage().
31  *
32  * We need to bale out if page->mapping is no longer equal to the original
33  * mapping.  This happens a) when the VM reclaimed the page while we waited on
34  * its lock, b) when a concurrent invalidate_inode_pages got there first and
35  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
36  */
37 static void
38 truncate_complete_page(struct address_space *mapping, struct page *page)
39 {
40         if (page->mapping != mapping)
41                 return;
42
43         if (PagePrivate(page))
44                 do_invalidatepage(page, 0);
45
46         clear_page_dirty(page);
47         ClearPageUptodate(page);
48         ClearPageMappedToDisk(page);
49         remove_from_page_cache(page);
50         page_cache_release(page);       /* pagecache ref */
51 }
52
53 /*
54  * This is for invalidate_inode_pages().  That function can be called at
55  * any time, and is not supposed to throw away dirty pages.  But pages can
56  * be marked dirty at any time too, so use remove_mapping which safely
57  * discards clean, unused pages.
58  *
59  * Returns non-zero if the page was successfully invalidated.
60  */
61 static int
62 invalidate_complete_page(struct address_space *mapping, struct page *page)
63 {
64         int ret;
65
66         if (page->mapping != mapping)
67                 return 0;
68
69         if (PagePrivate(page) && !try_to_release_page(page, 0))
70                 return 0;
71
72         ret = remove_mapping(mapping, page);
73         ClearPageUptodate(page);
74
75         return ret;
76 }
77
78 /**
79  * truncate_inode_pages - truncate range of pages specified by start and
80  * end byte offsets
81  * @mapping: mapping to truncate
82  * @lstart: offset from which to truncate
83  * @lend: offset to which to truncate
84  *
85  * Truncate the page cache, removing the pages that are between
86  * specified offsets (and zeroing out partial page
87  * (if lstart is not page aligned)).
88  *
89  * Truncate takes two passes - the first pass is nonblocking.  It will not
90  * block on page locks and it will not block on writeback.  The second pass
91  * will wait.  This is to prevent as much IO as possible in the affected region.
92  * The first pass will remove most pages, so the search cost of the second pass
93  * is low.
94  *
95  * When looking at page->index outside the page lock we need to be careful to
96  * copy it into a local to avoid races (it could change at any time).
97  *
98  * We pass down the cache-hot hint to the page freeing code.  Even if the
99  * mapping is large, it is probably the case that the final pages are the most
100  * recently touched, and freeing happens in ascending file offset order.
101  */
102 void truncate_inode_pages_range(struct address_space *mapping,
103                                 loff_t lstart, loff_t lend)
104 {
105         const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
106         pgoff_t end;
107         const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
108         struct pagevec pvec;
109         pgoff_t next;
110         int i;
111
112         if (mapping->nrpages == 0)
113                 return;
114
115         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
116         end = (lend >> PAGE_CACHE_SHIFT);
117
118         pagevec_init(&pvec, 0);
119         next = start;
120         while (next <= end &&
121                pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
122                 for (i = 0; i < pagevec_count(&pvec); i++) {
123                         struct page *page = pvec.pages[i];
124                         pgoff_t page_index = page->index;
125
126                         if (page_index > end) {
127                                 next = page_index;
128                                 break;
129                         }
130
131                         if (page_index > next)
132                                 next = page_index;
133                         next++;
134                         if (TestSetPageLocked(page))
135                                 continue;
136                         if (PageWriteback(page)) {
137                                 unlock_page(page);
138                                 continue;
139                         }
140                         truncate_complete_page(mapping, page);
141                         unlock_page(page);
142                 }
143                 pagevec_release(&pvec);
144                 cond_resched();
145         }
146
147         if (partial) {
148                 struct page *page = find_lock_page(mapping, start - 1);
149                 if (page) {
150                         wait_on_page_writeback(page);
151                         truncate_partial_page(page, partial);
152                         unlock_page(page);
153                         page_cache_release(page);
154                 }
155         }
156
157         next = start;
158         for ( ; ; ) {
159                 cond_resched();
160                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
161                         if (next == start)
162                                 break;
163                         next = start;
164                         continue;
165                 }
166                 if (pvec.pages[0]->index > end) {
167                         pagevec_release(&pvec);
168                         break;
169                 }
170                 for (i = 0; i < pagevec_count(&pvec); i++) {
171                         struct page *page = pvec.pages[i];
172
173                         if (page->index > end)
174                                 break;
175                         lock_page(page);
176                         wait_on_page_writeback(page);
177                         if (page->index > next)
178                                 next = page->index;
179                         next++;
180                         truncate_complete_page(mapping, page);
181                         unlock_page(page);
182                 }
183                 pagevec_release(&pvec);
184         }
185 }
186 EXPORT_SYMBOL(truncate_inode_pages_range);
187
188 /**
189  * truncate_inode_pages - truncate *all* the pages from an offset
190  * @mapping: mapping to truncate
191  * @lstart: offset from which to truncate
192  *
193  * Called under (and serialised by) inode->i_mutex.
194  */
195 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
196 {
197         truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
198 }
199 EXPORT_SYMBOL(truncate_inode_pages);
200
201 /**
202  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
203  * @mapping: the address_space which holds the pages to invalidate
204  * @start: the offset 'from' which to invalidate
205  * @end: the offset 'to' which to invalidate (inclusive)
206  *
207  * This function only removes the unlocked pages, if you want to
208  * remove all the pages of one inode, you must call truncate_inode_pages.
209  *
210  * invalidate_mapping_pages() will not block on IO activity. It will not
211  * invalidate pages which are dirty, locked, under writeback or mapped into
212  * pagetables.
213  */
214 unsigned long invalidate_mapping_pages(struct address_space *mapping,
215                                 pgoff_t start, pgoff_t end)
216 {
217         struct pagevec pvec;
218         pgoff_t next = start;
219         unsigned long ret = 0;
220         int i;
221
222         pagevec_init(&pvec, 0);
223         while (next <= end &&
224                         pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
225                 for (i = 0; i < pagevec_count(&pvec); i++) {
226                         struct page *page = pvec.pages[i];
227                         pgoff_t index;
228                         int lock_failed;
229
230                         lock_failed = TestSetPageLocked(page);
231
232                         /*
233                          * We really shouldn't be looking at the ->index of an
234                          * unlocked page.  But we're not allowed to lock these
235                          * pages.  So we rely upon nobody altering the ->index
236                          * of this (pinned-by-us) page.
237                          */
238                         index = page->index;
239                         if (index > next)
240                                 next = index;
241                         next++;
242                         if (lock_failed)
243                                 continue;
244
245                         if (PageDirty(page) || PageWriteback(page))
246                                 goto unlock;
247                         if (page_mapped(page))
248                                 goto unlock;
249                         ret += invalidate_complete_page(mapping, page);
250 unlock:
251                         unlock_page(page);
252                         if (next > end)
253                                 break;
254                 }
255                 pagevec_release(&pvec);
256         }
257         return ret;
258 }
259
260 unsigned long invalidate_inode_pages(struct address_space *mapping)
261 {
262         return invalidate_mapping_pages(mapping, 0, ~0UL);
263 }
264
265 EXPORT_SYMBOL(invalidate_inode_pages);
266
267 /**
268  * invalidate_inode_pages2_range - remove range of pages from an address_space
269  * @mapping: the address_space
270  * @start: the page offset 'from' which to invalidate
271  * @end: the page offset 'to' which to invalidate (inclusive)
272  *
273  * Any pages which are found to be mapped into pagetables are unmapped prior to
274  * invalidation.
275  *
276  * Returns -EIO if any pages could not be invalidated.
277  */
278 int invalidate_inode_pages2_range(struct address_space *mapping,
279                                   pgoff_t start, pgoff_t end)
280 {
281         struct pagevec pvec;
282         pgoff_t next;
283         int i;
284         int ret = 0;
285         int did_range_unmap = 0;
286         int wrapped = 0;
287
288         pagevec_init(&pvec, 0);
289         next = start;
290         while (next <= end && !ret && !wrapped &&
291                 pagevec_lookup(&pvec, mapping, next,
292                         min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
293                 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
294                         struct page *page = pvec.pages[i];
295                         pgoff_t page_index;
296                         int was_dirty;
297
298                         lock_page(page);
299                         if (page->mapping != mapping) {
300                                 unlock_page(page);
301                                 continue;
302                         }
303                         page_index = page->index;
304                         next = page_index + 1;
305                         if (next == 0)
306                                 wrapped = 1;
307                         if (page_index > end) {
308                                 unlock_page(page);
309                                 break;
310                         }
311                         wait_on_page_writeback(page);
312                         while (page_mapped(page)) {
313                                 if (!did_range_unmap) {
314                                         /*
315                                          * Zap the rest of the file in one hit.
316                                          */
317                                         unmap_mapping_range(mapping,
318                                            (loff_t)page_index<<PAGE_CACHE_SHIFT,
319                                            (loff_t)(end - page_index + 1)
320                                                         << PAGE_CACHE_SHIFT,
321                                             0);
322                                         did_range_unmap = 1;
323                                 } else {
324                                         /*
325                                          * Just zap this page
326                                          */
327                                         unmap_mapping_range(mapping,
328                                           (loff_t)page_index<<PAGE_CACHE_SHIFT,
329                                           PAGE_CACHE_SIZE, 0);
330                                 }
331                         }
332                         was_dirty = test_clear_page_dirty(page);
333                         if (!invalidate_complete_page(mapping, page)) {
334                                 if (was_dirty)
335                                         set_page_dirty(page);
336                                 ret = -EIO;
337                         }
338                         unlock_page(page);
339                 }
340                 pagevec_release(&pvec);
341                 cond_resched();
342         }
343         return ret;
344 }
345 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
346
347 /**
348  * invalidate_inode_pages2 - remove all pages from an address_space
349  * @mapping: the address_space
350  *
351  * Any pages which are found to be mapped into pagetables are unmapped prior to
352  * invalidation.
353  *
354  * Returns -EIO if any pages could not be invalidated.
355  */
356 int invalidate_inode_pages2(struct address_space *mapping)
357 {
358         return invalidate_inode_pages2_range(mapping, 0, -1);
359 }
360 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);