UBI: Change the default percentage of reserved PEB
[pandora-kernel.git] / mm / vmscan.c
index 1e4ee1a..a3b1578 100644 (file)
@@ -734,24 +734,6 @@ static enum page_references page_check_references(struct page *page,
        return PAGEREF_RECLAIM;
 }
 
-static noinline_for_stack void free_page_list(struct list_head *free_pages)
-{
-       struct pagevec freed_pvec;
-       struct page *page, *tmp;
-
-       pagevec_init(&freed_pvec, 1);
-
-       list_for_each_entry_safe(page, tmp, free_pages, lru) {
-               list_del(&page->lru);
-               if (!pagevec_add(&freed_pvec, page)) {
-                       __pagevec_free(&freed_pvec);
-                       pagevec_reinit(&freed_pvec);
-               }
-       }
-
-       pagevec_free(&freed_pvec);
-}
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -1015,7 +997,7 @@ keep_lumpy:
        if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
                zone_set_flag(zone, ZONE_CONGESTED);
 
-       free_page_list(&free_pages);
+       free_hot_cold_page_list(&free_pages, 1);
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
@@ -2492,19 +2474,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 }
 #endif
 
-static bool zone_balanced(struct zone *zone, int order,
-                         unsigned long balance_gap, int classzone_idx)
-{
-       if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
-                                   balance_gap, classzone_idx, 0))
-               return false;
-
-       if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
-               return false;
-
-       return true;
-}
-
 /*
  * pgdat_balanced is used when checking if a node is balanced for high-order
  * allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2564,7 +2533,8 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
                        continue;
                }
 
-               if (!zone_balanced(zone, order, 0, i))
+               if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+                                                       i, 0))
                        all_zones_ok = false;
                else
                        balanced += zone->present_pages;
@@ -2667,7 +2637,8 @@ loop_again:
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
-                       if (!zone_balanced(zone, order, 0, 0)) {
+                       if (!zone_watermark_ok_safe(zone, order,
+                                       high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
                        } else {
@@ -2728,8 +2699,9 @@ loop_again:
                                (zone->present_pages +
                                        KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
                                KSWAPD_ZONE_BALANCE_GAP_RATIO);
-                       if (!zone_balanced(zone, order,
-                                          balance_gap, end_zone)) {
+                       if (!zone_watermark_ok_safe(zone, order,
+                                       high_wmark_pages(zone) + balance_gap,
+                                       end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
 
                                reclaim_state->reclaimed_slab = 0;
@@ -2756,7 +2728,8 @@ loop_again:
                                continue;
                        }
 
-                       if (!zone_balanced(zone, order, 0, end_zone)) {
+                       if (!zone_watermark_ok_safe(zone, order,
+                                       high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
                                /*
                                 * We are still under min water mark.  This
@@ -3025,7 +2998,10 @@ static int kswapd(void *p)
                }
        }
 
+       tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
        current->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+
        return 0;
 }
 
@@ -3530,16 +3506,16 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
  * a specified node's per zone unevictable lists for evictable pages.
  */
 
-static ssize_t read_scan_unevictable_node(struct sys_device *dev,
-                                         struct sysdev_attribute *attr,
+static ssize_t read_scan_unevictable_node(struct device *dev,
+                                         struct device_attribute *attr,
                                          char *buf)
 {
        warn_scan_unevictable_pages();
        return sprintf(buf, "0\n");     /* always zero; should fit... */
 }
 
-static ssize_t write_scan_unevictable_node(struct sys_device *dev,
-                                          struct sysdev_attribute *attr,
+static ssize_t write_scan_unevictable_node(struct device *dev,
+                                          struct device_attribute *attr,
                                        const char *buf, size_t count)
 {
        warn_scan_unevictable_pages();
@@ -3547,17 +3523,17 @@ static ssize_t write_scan_unevictable_node(struct sys_device *dev,
 }
 
 
-static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
                        read_scan_unevictable_node,
                        write_scan_unevictable_node);
 
 int scan_unevictable_register_node(struct node *node)
 {
-       return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+       return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 
 void scan_unevictable_unregister_node(struct node *node)
 {
-       sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+       device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 #endif