[Bluetooth] Handle device registration failures
[pandora-kernel.git] / mm / truncate.c
1 /*
2  * mm/truncate.c - code for taking down pages from address_spaces
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 10Sep2002    akpm@zip.com.au
7  *              Initial version.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagevec.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/buffer_head.h>  /* grr. try_to_release_page,
18                                    do_invalidatepage */
19
20
21 /**
22  * do_invalidatepage - invalidate part of all of a page
23  * @page: the page which is affected
24  * @offset: the index of the truncation point
25  *
26  * do_invalidatepage() is called when all or part of the page has become
27  * invalidated by a truncate operation.
28  *
29  * do_invalidatepage() does not have to release all buffers, but it must
30  * ensure that no dirty buffer is left outside @offset and that no I/O
31  * is underway against any of the blocks which are outside the truncation
32  * point.  Because the caller is about to free (and possibly reuse) those
33  * blocks on-disk.
34  */
35 void do_invalidatepage(struct page *page, unsigned long offset)
36 {
37         void (*invalidatepage)(struct page *, unsigned long);
38         invalidatepage = page->mapping->a_ops->invalidatepage;
39 #ifdef CONFIG_BLOCK
40         if (!invalidatepage)
41                 invalidatepage = block_invalidatepage;
42 #endif
43         if (invalidatepage)
44                 (*invalidatepage)(page, offset);
45 }
46
47 static inline void truncate_partial_page(struct page *page, unsigned partial)
48 {
49         memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
50         if (PagePrivate(page))
51                 do_invalidatepage(page, partial);
52 }
53
54 void cancel_dirty_page(struct page *page, unsigned int account_size)
55 {
56         /* If we're cancelling the page, it had better not be mapped any more */
57         if (page_mapped(page)) {
58                 static unsigned int warncount;
59
60                 WARN_ON(++warncount < 5);
61         }
62                 
63         if (TestClearPageDirty(page)) {
64                 struct address_space *mapping = page->mapping;
65                 if (mapping && mapping_cap_account_dirty(mapping)) {
66                         dec_zone_page_state(page, NR_FILE_DIRTY);
67                         if (account_size)
68                                 task_io_account_cancelled_write(account_size);
69                 }
70         }
71 }
72 EXPORT_SYMBOL(cancel_dirty_page);
73
74 /*
75  * If truncate cannot remove the fs-private metadata from the page, the page
76  * becomes anonymous.  It will be left on the LRU and may even be mapped into
77  * user pagetables if we're racing with filemap_nopage().
78  *
79  * We need to bale out if page->mapping is no longer equal to the original
80  * mapping.  This happens a) when the VM reclaimed the page while we waited on
81  * its lock, b) when a concurrent invalidate_inode_pages got there first and
82  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
83  */
84 static void
85 truncate_complete_page(struct address_space *mapping, struct page *page)
86 {
87         if (page->mapping != mapping)
88                 return;
89
90         cancel_dirty_page(page, PAGE_CACHE_SIZE);
91
92         if (PagePrivate(page))
93                 do_invalidatepage(page, 0);
94
95         ClearPageUptodate(page);
96         ClearPageMappedToDisk(page);
97         remove_from_page_cache(page);
98         page_cache_release(page);       /* pagecache ref */
99 }
100
101 /*
102  * This is for invalidate_inode_pages().  That function can be called at
103  * any time, and is not supposed to throw away dirty pages.  But pages can
104  * be marked dirty at any time too, so use remove_mapping which safely
105  * discards clean, unused pages.
106  *
107  * Returns non-zero if the page was successfully invalidated.
108  */
109 static int
110 invalidate_complete_page(struct address_space *mapping, struct page *page)
111 {
112         int ret;
113
114         if (page->mapping != mapping)
115                 return 0;
116
117         if (PagePrivate(page) && !try_to_release_page(page, 0))
118                 return 0;
119
120         ret = remove_mapping(mapping, page);
121
122         return ret;
123 }
124
125 /**
126  * truncate_inode_pages - truncate range of pages specified by start and
127  * end byte offsets
128  * @mapping: mapping to truncate
129  * @lstart: offset from which to truncate
130  * @lend: offset to which to truncate
131  *
132  * Truncate the page cache, removing the pages that are between
133  * specified offsets (and zeroing out partial page
134  * (if lstart is not page aligned)).
135  *
136  * Truncate takes two passes - the first pass is nonblocking.  It will not
137  * block on page locks and it will not block on writeback.  The second pass
138  * will wait.  This is to prevent as much IO as possible in the affected region.
139  * The first pass will remove most pages, so the search cost of the second pass
140  * is low.
141  *
142  * When looking at page->index outside the page lock we need to be careful to
143  * copy it into a local to avoid races (it could change at any time).
144  *
145  * We pass down the cache-hot hint to the page freeing code.  Even if the
146  * mapping is large, it is probably the case that the final pages are the most
147  * recently touched, and freeing happens in ascending file offset order.
148  */
149 void truncate_inode_pages_range(struct address_space *mapping,
150                                 loff_t lstart, loff_t lend)
151 {
152         const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
153         pgoff_t end;
154         const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
155         struct pagevec pvec;
156         pgoff_t next;
157         int i;
158
159         if (mapping->nrpages == 0)
160                 return;
161
162         BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
163         end = (lend >> PAGE_CACHE_SHIFT);
164
165         pagevec_init(&pvec, 0);
166         next = start;
167         while (next <= end &&
168                pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
169                 for (i = 0; i < pagevec_count(&pvec); i++) {
170                         struct page *page = pvec.pages[i];
171                         pgoff_t page_index = page->index;
172
173                         if (page_index > end) {
174                                 next = page_index;
175                                 break;
176                         }
177
178                         if (page_index > next)
179                                 next = page_index;
180                         next++;
181                         if (TestSetPageLocked(page))
182                                 continue;
183                         if (PageWriteback(page)) {
184                                 unlock_page(page);
185                                 continue;
186                         }
187                         truncate_complete_page(mapping, page);
188                         unlock_page(page);
189                 }
190                 pagevec_release(&pvec);
191                 cond_resched();
192         }
193
194         if (partial) {
195                 struct page *page = find_lock_page(mapping, start - 1);
196                 if (page) {
197                         wait_on_page_writeback(page);
198                         truncate_partial_page(page, partial);
199                         unlock_page(page);
200                         page_cache_release(page);
201                 }
202         }
203
204         next = start;
205         for ( ; ; ) {
206                 cond_resched();
207                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
208                         if (next == start)
209                                 break;
210                         next = start;
211                         continue;
212                 }
213                 if (pvec.pages[0]->index > end) {
214                         pagevec_release(&pvec);
215                         break;
216                 }
217                 for (i = 0; i < pagevec_count(&pvec); i++) {
218                         struct page *page = pvec.pages[i];
219
220                         if (page->index > end)
221                                 break;
222                         lock_page(page);
223                         wait_on_page_writeback(page);
224                         if (page->index > next)
225                                 next = page->index;
226                         next++;
227                         truncate_complete_page(mapping, page);
228                         unlock_page(page);
229                 }
230                 pagevec_release(&pvec);
231         }
232 }
233 EXPORT_SYMBOL(truncate_inode_pages_range);
234
235 /**
236  * truncate_inode_pages - truncate *all* the pages from an offset
237  * @mapping: mapping to truncate
238  * @lstart: offset from which to truncate
239  *
240  * Called under (and serialised by) inode->i_mutex.
241  */
242 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
243 {
244         truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
245 }
246 EXPORT_SYMBOL(truncate_inode_pages);
247
248 /**
249  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
250  * @mapping: the address_space which holds the pages to invalidate
251  * @start: the offset 'from' which to invalidate
252  * @end: the offset 'to' which to invalidate (inclusive)
253  *
254  * This function only removes the unlocked pages, if you want to
255  * remove all the pages of one inode, you must call truncate_inode_pages.
256  *
257  * invalidate_mapping_pages() will not block on IO activity. It will not
258  * invalidate pages which are dirty, locked, under writeback or mapped into
259  * pagetables.
260  */
261 unsigned long invalidate_mapping_pages(struct address_space *mapping,
262                                 pgoff_t start, pgoff_t end)
263 {
264         struct pagevec pvec;
265         pgoff_t next = start;
266         unsigned long ret = 0;
267         int i;
268
269         pagevec_init(&pvec, 0);
270         while (next <= end &&
271                         pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
272                 for (i = 0; i < pagevec_count(&pvec); i++) {
273                         struct page *page = pvec.pages[i];
274                         pgoff_t index;
275                         int lock_failed;
276
277                         lock_failed = TestSetPageLocked(page);
278
279                         /*
280                          * We really shouldn't be looking at the ->index of an
281                          * unlocked page.  But we're not allowed to lock these
282                          * pages.  So we rely upon nobody altering the ->index
283                          * of this (pinned-by-us) page.
284                          */
285                         index = page->index;
286                         if (index > next)
287                                 next = index;
288                         next++;
289                         if (lock_failed)
290                                 continue;
291
292                         if (PageDirty(page) || PageWriteback(page))
293                                 goto unlock;
294                         if (page_mapped(page))
295                                 goto unlock;
296                         ret += invalidate_complete_page(mapping, page);
297 unlock:
298                         unlock_page(page);
299                         if (next > end)
300                                 break;
301                 }
302                 pagevec_release(&pvec);
303         }
304         return ret;
305 }
306
307 unsigned long invalidate_inode_pages(struct address_space *mapping)
308 {
309         return invalidate_mapping_pages(mapping, 0, ~0UL);
310 }
311 EXPORT_SYMBOL(invalidate_inode_pages);
312
313 /*
314  * This is like invalidate_complete_page(), except it ignores the page's
315  * refcount.  We do this because invalidate_inode_pages2() needs stronger
316  * invalidation guarantees, and cannot afford to leave pages behind because
317  * shrink_list() has a temp ref on them, or because they're transiently sitting
318  * in the lru_cache_add() pagevecs.
319  */
320 static int
321 invalidate_complete_page2(struct address_space *mapping, struct page *page)
322 {
323         if (page->mapping != mapping)
324                 return 0;
325
326         if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
327                 return 0;
328
329         write_lock_irq(&mapping->tree_lock);
330         if (PageDirty(page))
331                 goto failed;
332
333         BUG_ON(PagePrivate(page));
334         __remove_from_page_cache(page);
335         write_unlock_irq(&mapping->tree_lock);
336         ClearPageUptodate(page);
337         page_cache_release(page);       /* pagecache ref */
338         return 1;
339 failed:
340         write_unlock_irq(&mapping->tree_lock);
341         return 0;
342 }
343
344 /**
345  * invalidate_inode_pages2_range - remove range of pages from an address_space
346  * @mapping: the address_space
347  * @start: the page offset 'from' which to invalidate
348  * @end: the page offset 'to' which to invalidate (inclusive)
349  *
350  * Any pages which are found to be mapped into pagetables are unmapped prior to
351  * invalidation.
352  *
353  * Returns -EIO if any pages could not be invalidated.
354  */
355 int invalidate_inode_pages2_range(struct address_space *mapping,
356                                   pgoff_t start, pgoff_t end)
357 {
358         struct pagevec pvec;
359         pgoff_t next;
360         int i;
361         int ret = 0;
362         int did_range_unmap = 0;
363         int wrapped = 0;
364
365         pagevec_init(&pvec, 0);
366         next = start;
367         while (next <= end && !ret && !wrapped &&
368                 pagevec_lookup(&pvec, mapping, next,
369                         min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
370                 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
371                         struct page *page = pvec.pages[i];
372                         pgoff_t page_index;
373
374                         lock_page(page);
375                         if (page->mapping != mapping) {
376                                 unlock_page(page);
377                                 continue;
378                         }
379                         page_index = page->index;
380                         next = page_index + 1;
381                         if (next == 0)
382                                 wrapped = 1;
383                         if (page_index > end) {
384                                 unlock_page(page);
385                                 break;
386                         }
387                         wait_on_page_writeback(page);
388                         while (page_mapped(page)) {
389                                 if (!did_range_unmap) {
390                                         /*
391                                          * Zap the rest of the file in one hit.
392                                          */
393                                         unmap_mapping_range(mapping,
394                                            (loff_t)page_index<<PAGE_CACHE_SHIFT,
395                                            (loff_t)(end - page_index + 1)
396                                                         << PAGE_CACHE_SHIFT,
397                                             0);
398                                         did_range_unmap = 1;
399                                 } else {
400                                         /*
401                                          * Just zap this page
402                                          */
403                                         unmap_mapping_range(mapping,
404                                           (loff_t)page_index<<PAGE_CACHE_SHIFT,
405                                           PAGE_CACHE_SIZE, 0);
406                                 }
407                         }
408                         if (!invalidate_complete_page2(mapping, page))
409                                 ret = -EIO;
410                         unlock_page(page);
411                 }
412                 pagevec_release(&pvec);
413                 cond_resched();
414         }
415         WARN_ON_ONCE(ret);
416         return ret;
417 }
418 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
419
420 /**
421  * invalidate_inode_pages2 - remove all pages from an address_space
422  * @mapping: the address_space
423  *
424  * Any pages which are found to be mapped into pagetables are unmapped prior to
425  * invalidation.
426  *
427  * Returns -EIO if any pages could not be invalidated.
428  */
429 int invalidate_inode_pages2(struct address_space *mapping)
430 {
431         return invalidate_inode_pages2_range(mapping, 0, -1);
432 }
433 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);