md: only count actual openers as access which prevent a 'stop'
[pandora-kernel.git] / mm / readahead.c
index 4a58bef..d8723a5 100644 (file)
@@ -22,16 +22,8 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 }
 EXPORT_SYMBOL(default_unplug_io_fn);
 
-/*
- * Convienent macros for min/max read-ahead pages.
- * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
- * The latter is necessary for systems with large page size(i.e. 64k).
- */
-#define MAX_RA_PAGES   (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
-#define MIN_RA_PAGES   DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
-
 struct backing_dev_info default_backing_dev_info = {
-       .ra_pages       = MAX_RA_PAGES,
+       .ra_pages       = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
        .state          = 0,
        .capabilities   = BDI_CAP_MAP_COPY,
        .unplug_io_fn   = default_unplug_io_fn,
@@ -66,28 +58,25 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
                        int (*filler)(void *, struct page *), void *data)
 {
        struct page *page;
-       struct pagevec lru_pvec;
        int ret = 0;
 
-       pagevec_init(&lru_pvec, 0);
-
        while (!list_empty(pages)) {
                page = list_to_page(pages);
                list_del(&page->lru);
-               if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
+               if (add_to_page_cache_lru(page, mapping,
+                                       page->index, GFP_KERNEL)) {
                        page_cache_release(page);
                        continue;
                }
+               page_cache_release(page);
+
                ret = filler(data, page);
-               if (!pagevec_add(&lru_pvec, page))
-                       __pagevec_lru_add(&lru_pvec);
-               if (ret) {
+               if (unlikely(ret)) {
                        put_pages_list(pages);
                        break;
                }
                task_io_account_read(PAGE_CACHE_SIZE);
        }
-       pagevec_lru_add(&lru_pvec);
        return ret;
 }
 
@@ -97,7 +86,6 @@ static int read_pages(struct address_space *mapping, struct file *filp,
                struct list_head *pages, unsigned nr_pages)
 {
        unsigned page_idx;
-       struct pagevec lru_pvec;
        int ret;
 
        if (mapping->a_ops->readpages) {
@@ -107,19 +95,15 @@ static int read_pages(struct address_space *mapping, struct file *filp,
                goto out;
        }
 
-       pagevec_init(&lru_pvec, 0);
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                struct page *page = list_to_page(pages);
                list_del(&page->lru);
-               if (!add_to_page_cache(page, mapping,
+               if (!add_to_page_cache_lru(page, mapping,
                                        page->index, GFP_KERNEL)) {
                        mapping->a_ops->readpage(filp, page);
-                       if (!pagevec_add(&lru_pvec, page))
-                               __pagevec_lru_add(&lru_pvec);
-               } else
-                       page_cache_release(page);
+               }
+               page_cache_release(page);
        }
-       pagevec_lru_add(&lru_pvec);
        ret = 0;
 out:
        return ret;
@@ -157,20 +141,19 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
        /*
         * Preallocate as many pages as we will need.
         */
-       read_lock_irq(&mapping->tree_lock);
        for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
                pgoff_t page_offset = offset + page_idx;
 
                if (page_offset > end_index)
                        break;
 
+               rcu_read_lock();
                page = radix_tree_lookup(&mapping->page_tree, page_offset);
+               rcu_read_unlock();
                if (page)
                        continue;
 
-               read_unlock_irq(&mapping->tree_lock);
                page = page_cache_alloc_cold(mapping);
-               read_lock_irq(&mapping->tree_lock);
                if (!page)
                        break;
                page->index = page_offset;
@@ -179,7 +162,6 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
                        SetPageReadahead(page);
                ret++;
        }
-       read_unlock_irq(&mapping->tree_lock);
 
        /*
         * Now start the IO.  We ignore I/O errors - if the page is not
@@ -251,6 +233,18 @@ unsigned long max_sane_readahead(unsigned long nr)
                + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
 }
 
+static int __init readahead_init(void)
+{
+       int err;
+
+       err = bdi_init(&default_backing_dev_info);
+       if (!err)
+               bdi_register(&default_backing_dev_info, NULL, "default");
+
+       return err;
+}
+subsys_initcall(readahead_init);
+
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
@@ -379,6 +373,29 @@ ondemand_readahead(struct address_space *mapping,
                                                offset, req_size, 0);
        }
 
+       /*
+        * Hit a marked page without valid readahead state.
+        * E.g. interleaved reads.
+        * Query the pagecache for async_size, which normally equals to
+        * readahead size. Ramp it up and use it as the new readahead size.
+        */
+       if (hit_readahead_marker) {
+               pgoff_t start;
+
+               read_lock_irq(&mapping->tree_lock);
+               start = radix_tree_next_hole(&mapping->page_tree, offset, max+1);
+               read_unlock_irq(&mapping->tree_lock);
+
+               if (!start || start - offset > max)
+                       return 0;
+
+               ra->start = start;
+               ra->size = start - offset;      /* old async_size */
+               ra->size = get_next_ra_size(ra, max);
+               ra->async_size = ra->size;
+               goto readit;
+       }
+
        /*
         * It may be one of
         *      - first read on start of file
@@ -390,16 +407,6 @@ ondemand_readahead(struct address_space *mapping,
        ra->size = get_init_ra_size(req_size, max);
        ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
 
-       /*
-        * Hit on a marked page without valid readahead state.
-        * E.g. interleaved reads.
-        * Not knowing its readahead pos/size, bet on the minimal possible one.
-        */
-       if (hit_readahead_marker) {
-               ra->start++;
-               ra->size = get_next_ra_size(ra, max);
-       }
-
 readit:
        return ra_submit(ra, mapping, filp);
 }
@@ -442,9 +449,10 @@ EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
  *            pagecache pages
  *
  * page_cache_async_ondemand() should be called when a page is used which
- * has the PG_readahead flag: this is a marker to suggest that the application
+ * has the PG_readahead flag; this is a marker to suggest that the application
  * has used up enough of the readahead window that we should start pulling in
- * more pages. */
+ * more pages.
+ */
 void
 page_cache_async_readahead(struct address_space *mapping,
                           struct file_ra_state *ra, struct file *filp,