aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-08-09 17:19:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 20:45:02 -0700
commit58c37f6e0dfaaab85a3c11fcbf24451dfe70c721 (patch)
treef1d6f6299059e5aa5fc3668ef9f561605491deb3 /mm
parent15748048991e801a2d18ce5da4e0d528852bc106 (diff)
downloadkernel_samsung_tuna-58c37f6e0dfaaab85a3c11fcbf24451dfe70c721.zip
kernel_samsung_tuna-58c37f6e0dfaaab85a3c11fcbf24451dfe70c721.tar.gz
kernel_samsung_tuna-58c37f6e0dfaaab85a3c11fcbf24451dfe70c721.tar.bz2
vmscan: protect reading of reclaim_stat with lru_lock
Rik van Riel pointed out reading reclaim_stat should be protected lru_lock, otherwise vmscan might sweep 2x much pages. This fault was introduced by commit 4f98a2fee8acdb4ac84545df98cccecfd130f8db Author: Rik van Riel <riel@redhat.com> Date: Sat Oct 18 20:26:32 2008 -0700 vmscan: split LRU lists into anon & file sets Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1b4e4a5..a3d669f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1628,6 +1628,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
}
/*
+ * With swappiness at 100, anonymous and file have the same priority.
+ * This scanning priority is essentially the inverse of IO cost.
+ */
+ anon_prio = sc->swappiness;
+ file_prio = 200 - sc->swappiness;
+
+ /*
* OK, so we have swap space and a fair amount of page cache
* pages. We use the recently rotated / recently scanned
* ratios to determine how valuable each cache is.
@@ -1638,28 +1645,18 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
*
* anon in [0], file in [1]
*/
+ spin_lock_irq(&zone->lru_lock);
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
- spin_lock_irq(&zone->lru_lock);
reclaim_stat->recent_scanned[0] /= 2;
reclaim_stat->recent_rotated[0] /= 2;
- spin_unlock_irq(&zone->lru_lock);
}
if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
- spin_lock_irq(&zone->lru_lock);
reclaim_stat->recent_scanned[1] /= 2;
reclaim_stat->recent_rotated[1] /= 2;
- spin_unlock_irq(&zone->lru_lock);
}
/*
- * With swappiness at 100, anonymous and file have the same priority.
- * This scanning priority is essentially the inverse of IO cost.
- */
- anon_prio = sc->swappiness;
- file_prio = 200 - sc->swappiness;
-
- /*
* The amount of pressure on anon vs file pages is inversely
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
@@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
+ spin_unlock_irq(&zone->lru_lock);
fraction[0] = ap;
fraction[1] = fp;