vmscan: protect reading of reclaim_stat with lru_lock
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tue, 10 Aug 2010 00:19:51 +0000 (17:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Aug 2010 03:45:02 +0000 (20:45 -0700)
Rik van Riel pointed out reading reclaim_stat should be protected
lru_lock, otherwise vmscan might sweep 2x much pages.

This fault was introduced by

  commit 4f98a2fee8acdb4ac84545df98cccecfd130f8db
  Author: Rik van Riel <riel@redhat.com>
  Date:   Sat Oct 18 20:26:32 2008 -0700

    vmscan: split LRU lists into anon & file sets

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 1b4e4a5..a3d669f 100644 (file)
@@ -1627,6 +1627,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                }
        }
 
                }
        }
 
+       /*
+        * With swappiness at 100, anonymous and file have the same priority.
+        * This scanning priority is essentially the inverse of IO cost.
+        */
+       anon_prio = sc->swappiness;
+       file_prio = 200 - sc->swappiness;
+
        /*
         * OK, so we have swap space and a fair amount of page cache
         * pages.  We use the recently rotated / recently scanned
        /*
         * OK, so we have swap space and a fair amount of page cache
         * pages.  We use the recently rotated / recently scanned
@@ -1638,27 +1645,17 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         *
         * anon in [0], file in [1]
         */
         *
         * anon in [0], file in [1]
         */
+       spin_lock_irq(&zone->lru_lock);
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
-               spin_lock_irq(&zone->lru_lock);
                reclaim_stat->recent_scanned[0] /= 2;
                reclaim_stat->recent_rotated[0] /= 2;
                reclaim_stat->recent_scanned[0] /= 2;
                reclaim_stat->recent_rotated[0] /= 2;
-               spin_unlock_irq(&zone->lru_lock);
        }
 
        if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
        }
 
        if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
-               spin_lock_irq(&zone->lru_lock);
                reclaim_stat->recent_scanned[1] /= 2;
                reclaim_stat->recent_rotated[1] /= 2;
                reclaim_stat->recent_scanned[1] /= 2;
                reclaim_stat->recent_rotated[1] /= 2;
-               spin_unlock_irq(&zone->lru_lock);
        }
 
        }
 
-       /*
-        * With swappiness at 100, anonymous and file have the same priority.
-        * This scanning priority is essentially the inverse of IO cost.
-        */
-       anon_prio = sc->swappiness;
-       file_prio = 200 - sc->swappiness;
-
        /*
         * The amount of pressure on anon vs file pages is inversely
         * proportional to the fraction of recently scanned pages on
        /*
         * The amount of pressure on anon vs file pages is inversely
         * proportional to the fraction of recently scanned pages on
@@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
 
        fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
        fp /= reclaim_stat->recent_rotated[1] + 1;
 
        fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
        fp /= reclaim_stat->recent_rotated[1] + 1;
+       spin_unlock_irq(&zone->lru_lock);
 
        fraction[0] = ap;
        fraction[1] = fp;
 
        fraction[0] = ap;
        fraction[1] = fp;