Merge mulgrave-w:git/scsi-misc-2.6
[pandora-kernel.git] / mm / truncate.c
1 /*
2  * mm/truncate.c - code for taking down pages from address_spaces
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 10Sep2002    akpm@zip.com.au
7  *              Initial version.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/pagevec.h>
15 #include <linux/buffer_head.h>  /* grr. try_to_release_page,
16                                    do_invalidatepage */
17
18
19 static inline void truncate_partial_page(struct page *page, unsigned partial)
20 {
21         memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
22         if (PagePrivate(page))
23                 do_invalidatepage(page, partial);
24 }
25
26 /*
27  * If truncate cannot remove the fs-private metadata from the page, the page
28  * becomes anonymous.  It will be left on the LRU and may even be mapped into
29  * user pagetables if we're racing with filemap_nopage().
30  *
31  * We need to bale out if page->mapping is no longer equal to the original
32  * mapping.  This happens a) when the VM reclaimed the page while we waited on
33  * its lock, b) when a concurrent invalidate_inode_pages got there first and
34  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
35  */
36 static void
37 truncate_complete_page(struct address_space *mapping, struct page *page)
38 {
39         if (page->mapping != mapping)
40                 return;
41
42         if (PagePrivate(page))
43                 do_invalidatepage(page, 0);
44
45         clear_page_dirty(page);
46         ClearPageUptodate(page);
47         ClearPageMappedToDisk(page);
48         remove_from_page_cache(page);
49         page_cache_release(page);       /* pagecache ref */
50 }
51
52 /*
53  * This is for invalidate_inode_pages().  That function can be called at
54  * any time, and is not supposed to throw away dirty pages.  But pages can
55  * be marked dirty at any time too.  So we re-check the dirtiness inside
56  * ->tree_lock.  That provides exclusion against the __set_page_dirty
57  * functions.
58  *
59  * Returns non-zero if the page was successfully invalidated.
60  */
61 static int
62 invalidate_complete_page(struct address_space *mapping, struct page *page)
63 {
64         if (page->mapping != mapping)
65                 return 0;
66
67         if (PagePrivate(page) && !try_to_release_page(page, 0))
68                 return 0;
69
70         write_lock_irq(&mapping->tree_lock);
71         if (PageDirty(page))
72                 goto failed;
73         if (page_count(page) != 2)      /* caller's ref + pagecache ref */
74                 goto failed;
75
76         BUG_ON(PagePrivate(page));
77         __remove_from_page_cache(page);
78         write_unlock_irq(&mapping->tree_lock);
79         ClearPageUptodate(page);
80         page_cache_release(page);       /* pagecache ref */
81         return 1;
82 failed:
83         write_unlock_irq(&mapping->tree_lock);
84         return 0;
85 }
86
87 /**
88  * truncate_inode_pages - truncate range of pages specified by start and
89  * end byte offsets
90  * @mapping: mapping to truncate
91  * @lstart: offset from which to truncate
92  * @lend: offset to which to truncate
93  *
94  * Truncate the page cache, removing the pages that are between
95  * specified offsets (and zeroing out partial page
96  * (if lstart is not page aligned)).
97  *
98  * Truncate takes two passes - the first pass is nonblocking.  It will not
99  * block on page locks and it will not block on writeback.  The second pass
100  * will wait.  This is to prevent as much IO as possible in the affected region.
101  * The first pass will remove most pages, so the search cost of the second pass
102  * is low.
103  *
104  * When looking at page->index outside the page lock we need to be careful to
105  * copy it into a local to avoid races (it could change at any time).
106  *
107  * We pass down the cache-hot hint to the page freeing code.  Even if the
108  * mapping is large, it is probably the case that the final pages are the most
109  * recently touched, and freeing happens in ascending file offset order.
110  */
111 void truncate_inode_pages_range(struct address_space *mapping,
112                                 loff_t lstart, loff_t lend)
113 {
114         const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
115         pgoff_t end;
116         const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
117         struct pagevec pvec;
118         pgoff_t next;
119         int i;
120
121         if (mapping->nrpages == 0)
122                 return;
123
124         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
125         end = (lend >> PAGE_CACHE_SHIFT);
126
127         pagevec_init(&pvec, 0);
128         next = start;
129         while (next <= end &&
130                pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
131                 for (i = 0; i < pagevec_count(&pvec); i++) {
132                         struct page *page = pvec.pages[i];
133                         pgoff_t page_index = page->index;
134
135                         if (page_index > end) {
136                                 next = page_index;
137                                 break;
138                         }
139
140                         if (page_index > next)
141                                 next = page_index;
142                         next++;
143                         if (TestSetPageLocked(page))
144                                 continue;
145                         if (PageWriteback(page)) {
146                                 unlock_page(page);
147                                 continue;
148                         }
149                         truncate_complete_page(mapping, page);
150                         unlock_page(page);
151                 }
152                 pagevec_release(&pvec);
153                 cond_resched();
154         }
155
156         if (partial) {
157                 struct page *page = find_lock_page(mapping, start - 1);
158                 if (page) {
159                         wait_on_page_writeback(page);
160                         truncate_partial_page(page, partial);
161                         unlock_page(page);
162                         page_cache_release(page);
163                 }
164         }
165
166         next = start;
167         for ( ; ; ) {
168                 cond_resched();
169                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
170                         if (next == start)
171                                 break;
172                         next = start;
173                         continue;
174                 }
175                 if (pvec.pages[0]->index > end) {
176                         pagevec_release(&pvec);
177                         break;
178                 }
179                 for (i = 0; i < pagevec_count(&pvec); i++) {
180                         struct page *page = pvec.pages[i];
181
182                         if (page->index > end)
183                                 break;
184                         lock_page(page);
185                         wait_on_page_writeback(page);
186                         if (page->index > next)
187                                 next = page->index;
188                         next++;
189                         truncate_complete_page(mapping, page);
190                         unlock_page(page);
191                 }
192                 pagevec_release(&pvec);
193         }
194 }
195 EXPORT_SYMBOL(truncate_inode_pages_range);
196
197 /**
198  * truncate_inode_pages - truncate *all* the pages from an offset
199  * @mapping: mapping to truncate
200  * @lstart: offset from which to truncate
201  *
202  * Called under (and serialised by) inode->i_mutex.
203  */
204 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
205 {
206         truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
207 }
208 EXPORT_SYMBOL(truncate_inode_pages);
209
210 /**
211  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
212  * @mapping: the address_space which holds the pages to invalidate
213  * @start: the offset 'from' which to invalidate
214  * @end: the offset 'to' which to invalidate (inclusive)
215  *
216  * This function only removes the unlocked pages, if you want to
217  * remove all the pages of one inode, you must call truncate_inode_pages.
218  *
219  * invalidate_mapping_pages() will not block on IO activity. It will not
220  * invalidate pages which are dirty, locked, under writeback or mapped into
221  * pagetables.
222  */
223 unsigned long invalidate_mapping_pages(struct address_space *mapping,
224                                 pgoff_t start, pgoff_t end)
225 {
226         struct pagevec pvec;
227         pgoff_t next = start;
228         unsigned long ret = 0;
229         int i;
230
231         pagevec_init(&pvec, 0);
232         while (next <= end &&
233                         pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
234                 for (i = 0; i < pagevec_count(&pvec); i++) {
235                         struct page *page = pvec.pages[i];
236                         pgoff_t index;
237                         int lock_failed;
238
239                         lock_failed = TestSetPageLocked(page);
240
241                         /*
242                          * We really shouldn't be looking at the ->index of an
243                          * unlocked page.  But we're not allowed to lock these
244                          * pages.  So we rely upon nobody altering the ->index
245                          * of this (pinned-by-us) page.
246                          */
247                         index = page->index;
248                         if (index > next)
249                                 next = index;
250                         next++;
251                         if (lock_failed)
252                                 continue;
253
254                         if (PageDirty(page) || PageWriteback(page))
255                                 goto unlock;
256                         if (page_mapped(page))
257                                 goto unlock;
258                         ret += invalidate_complete_page(mapping, page);
259 unlock:
260                         unlock_page(page);
261                         if (next > end)
262                                 break;
263                 }
264                 pagevec_release(&pvec);
265         }
266         return ret;
267 }
268
269 unsigned long invalidate_inode_pages(struct address_space *mapping)
270 {
271         return invalidate_mapping_pages(mapping, 0, ~0UL);
272 }
273
274 EXPORT_SYMBOL(invalidate_inode_pages);
275
276 /**
277  * invalidate_inode_pages2_range - remove range of pages from an address_space
278  * @mapping: the address_space
279  * @start: the page offset 'from' which to invalidate
280  * @end: the page offset 'to' which to invalidate (inclusive)
281  *
282  * Any pages which are found to be mapped into pagetables are unmapped prior to
283  * invalidation.
284  *
285  * Returns -EIO if any pages could not be invalidated.
286  */
287 int invalidate_inode_pages2_range(struct address_space *mapping,
288                                   pgoff_t start, pgoff_t end)
289 {
290         struct pagevec pvec;
291         pgoff_t next;
292         int i;
293         int ret = 0;
294         int did_range_unmap = 0;
295         int wrapped = 0;
296
297         pagevec_init(&pvec, 0);
298         next = start;
299         while (next <= end && !ret && !wrapped &&
300                 pagevec_lookup(&pvec, mapping, next,
301                         min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
302                 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
303                         struct page *page = pvec.pages[i];
304                         pgoff_t page_index;
305                         int was_dirty;
306
307                         lock_page(page);
308                         if (page->mapping != mapping) {
309                                 unlock_page(page);
310                                 continue;
311                         }
312                         page_index = page->index;
313                         next = page_index + 1;
314                         if (next == 0)
315                                 wrapped = 1;
316                         if (page_index > end) {
317                                 unlock_page(page);
318                                 break;
319                         }
320                         wait_on_page_writeback(page);
321                         while (page_mapped(page)) {
322                                 if (!did_range_unmap) {
323                                         /*
324                                          * Zap the rest of the file in one hit.
325                                          */
326                                         unmap_mapping_range(mapping,
327                                            (loff_t)page_index<<PAGE_CACHE_SHIFT,
328                                            (loff_t)(end - page_index + 1)
329                                                         << PAGE_CACHE_SHIFT,
330                                             0);
331                                         did_range_unmap = 1;
332                                 } else {
333                                         /*
334                                          * Just zap this page
335                                          */
336                                         unmap_mapping_range(mapping,
337                                           (loff_t)page_index<<PAGE_CACHE_SHIFT,
338                                           PAGE_CACHE_SIZE, 0);
339                                 }
340                         }
341                         was_dirty = test_clear_page_dirty(page);
342                         if (!invalidate_complete_page(mapping, page)) {
343                                 if (was_dirty)
344                                         set_page_dirty(page);
345                                 ret = -EIO;
346                         }
347                         unlock_page(page);
348                 }
349                 pagevec_release(&pvec);
350                 cond_resched();
351         }
352         return ret;
353 }
354 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
355
356 /**
357  * invalidate_inode_pages2 - remove all pages from an address_space
358  * @mapping: the address_space
359  *
360  * Any pages which are found to be mapped into pagetables are unmapped prior to
361  * invalidation.
362  *
363  * Returns -EIO if any pages could not be invalidated.
364  */
365 int invalidate_inode_pages2(struct address_space *mapping)
366 {
367         return invalidate_inode_pages2_range(mapping, 0, -1);
368 }
369 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);