mm: let swap use exceptional entries
[pandora-kernel.git] / mm / filemap.c
index f820e60..76bfb64 100644 (file)
@@ -78,7 +78,7 @@
  *  ->i_mutex                  (generic_file_buffered_write)
  *    ->mmap_sem               (fault_in_pages_readable->do_page_fault)
  *
- *  inode_wb_list_lock
+ *  bdi->wb.list_lock
  *    sb_lock                  (fs/fs-writeback.c)
  *    ->mapping->tree_lock     (__sync_single_inode)
  *
@@ -96,9 +96,9 @@
  *    ->zone.lru_lock          (check_pte_range->isolate_lru_page)
  *    ->private_lock           (page_remove_rmap->set_page_dirty)
  *    ->tree_lock              (page_remove_rmap->set_page_dirty)
- *    inode_wb_list_lock       (page_remove_rmap->set_page_dirty)
+ *    bdi.wb->list_lock                (page_remove_rmap->set_page_dirty)
  *    ->inode->i_lock          (page_remove_rmap->set_page_dirty)
- *    inode_wb_list_lock       (zap_pte_range->set_page_dirty)
+ *    bdi.wb->list_lock                (zap_pte_range->set_page_dirty)
  *    ->inode->i_lock          (zap_pte_range->set_page_dirty)
  *    ->private_lock           (zap_pte_range->__set_page_dirty_buffers)
  *
@@ -128,6 +128,7 @@ void __delete_from_page_cache(struct page *page)
 
        radix_tree_delete(&mapping->page_tree, page->index);
        page->mapping = NULL;
+       /* Leave page->index set: truncation lookup relies upon it */
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
        if (PageSwapBacked(page))
@@ -483,6 +484,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                        spin_unlock_irq(&mapping->tree_lock);
                } else {
                        page->mapping = NULL;
+                       /* Leave page->index set: truncation relies upon it */
                        spin_unlock_irq(&mapping->tree_lock);
                        mem_cgroup_uncharge_cache_page(page);
                        page_cache_release(page);
@@ -712,9 +714,12 @@ repeat:
                page = radix_tree_deref_slot(pagep);
                if (unlikely(!page))
                        goto out;
-               if (radix_tree_deref_retry(page))
+               if (radix_tree_exception(page)) {
+                       if (radix_tree_exceptional_entry(page))
+                               goto out;
+                       /* radix_tree_deref_retry(page) */
                        goto repeat;
-
+               }
                if (!page_cache_get_speculative(page))
                        goto repeat;
 
@@ -751,7 +756,7 @@ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 
 repeat:
        page = find_get_page(mapping, offset);
-       if (page) {
+       if (page && !radix_tree_exception(page)) {
                lock_page(page);
                /* Has the page been truncated? */
                if (unlikely(page->mapping != mapping)) {
@@ -838,7 +843,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
        rcu_read_lock();
 restart:
        nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
-                               (void ***)pages, start, nr_pages);
+                               (void ***)pages, NULL, start, nr_pages);
        ret = 0;
        for (i = 0; i < nr_found; i++) {
                struct page *page;
@@ -847,11 +852,14 @@ repeat:
                if (unlikely(!page))
                        continue;
 
-               /*
-                * This can only trigger when the entry at index 0 moves out
-                * of or back to the root: none yet gotten, safe to restart.
-                */
-               if (radix_tree_deref_retry(page)) {
+               if (radix_tree_exception(page)) {
+                       if (radix_tree_exceptional_entry(page))
+                               continue;
+                       /*
+                        * radix_tree_deref_retry(page):
+                        * can only trigger when entry at index 0 moves out of
+                        * or back to root: none yet gotten, safe to restart.
+                        */
                        WARN_ON(start | i);
                        goto restart;
                }
@@ -901,7 +909,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
        rcu_read_lock();
 restart:
        nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
-                               (void ***)pages, index, nr_pages);
+                               (void ***)pages, NULL, index, nr_pages);
        ret = 0;
        for (i = 0; i < nr_found; i++) {
                struct page *page;
@@ -910,12 +918,16 @@ repeat:
                if (unlikely(!page))
                        continue;
 
-               /*
-                * This can only trigger when the entry at index 0 moves out
-                * of or back to the root: none yet gotten, safe to restart.
-                */
-               if (radix_tree_deref_retry(page))
+               if (radix_tree_exception(page)) {
+                       if (radix_tree_exceptional_entry(page))
+                               break;
+                       /*
+                        * radix_tree_deref_retry(page):
+                        * can only trigger when entry at index 0 moves out of
+                        * or back to root: none yet gotten, safe to restart.
+                        */
                        goto restart;
+               }
 
                if (!page_cache_get_speculative(page))
                        goto repeat;
@@ -975,12 +987,15 @@ repeat:
                if (unlikely(!page))
                        continue;
 
-               /*
-                * This can only trigger when the entry at index 0 moves out
-                * of or back to the root: none yet gotten, safe to restart.
-                */
-               if (radix_tree_deref_retry(page))
+               if (radix_tree_exception(page)) {
+                       BUG_ON(radix_tree_exceptional_entry(page));
+                       /*
+                        * radix_tree_deref_retry(page):
+                        * can only trigger when entry at index 0 moves out of
+                        * or back to root: none yet gotten, safe to restart.
+                        */
                        goto restart;
+               }
 
                if (!page_cache_get_speculative(page))
                        goto repeat;
@@ -1792,7 +1807,7 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
 
 static struct page *__read_cache_page(struct address_space *mapping,
                                pgoff_t index,
-                               int (*filler)(void *,struct page*),
+                               int (*filler)(void *, struct page *),
                                void *data,
                                gfp_t gfp)
 {
@@ -1823,7 +1838,7 @@ repeat:
 
 static struct page *do_read_cache_page(struct address_space *mapping,
                                pgoff_t index,
-                               int (*filler)(void *,struct page*),
+                               int (*filler)(void *, struct page *),
                                void *data,
                                gfp_t gfp)
 
@@ -1863,7 +1878,7 @@ out:
  * @mapping:   the page's address_space
  * @index:     the page index
  * @filler:    function to perform the read
- * @data:      destination for read data
+ * @data:      first arg to filler(data, page) function, often left as NULL
  *
  * Same as read_cache_page, but don't wait for page to become unlocked
  * after submitting it to the filler.
@@ -1875,7 +1890,7 @@ out:
  */
 struct page *read_cache_page_async(struct address_space *mapping,
                                pgoff_t index,
-                               int (*filler)(void *,struct page*),
+                               int (*filler)(void *, struct page *),
                                void *data)
 {
        return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
@@ -1923,7 +1938,7 @@ EXPORT_SYMBOL(read_cache_page_gfp);
  * @mapping:   the page's address_space
  * @index:     the page index
  * @filler:    function to perform the read
- * @data:      destination for read data
+ * @data:      first arg to filler(data, page) function, often left as NULL
  *
  * Read into the page cache. If a page already exists, and PageUptodate() is
  * not set, try to fill the page then wait for it to become unlocked.
@@ -1932,7 +1947,7 @@ EXPORT_SYMBOL(read_cache_page_gfp);
  */
 struct page *read_cache_page(struct address_space *mapping,
                                pgoff_t index,
-                               int (*filler)(void *,struct page*),
+                               int (*filler)(void *, struct page *),
                                void *data)
 {
        return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));