aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-08-09 20:19:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:45:02 -0400
commit58c37f6e0dfaaab85a3c11fcbf24451dfe70c721 (patch)
treef1d6f6299059e5aa5fc3668ef9f561605491deb3
parent15748048991e801a2d18ce5da4e0d528852bc106 (diff)
vmscan: protect reading of reclaim_stat with lru_lock
Rik van Riel pointed out reading reclaim_stat should be protected lru_lock, otherwise vmscan might sweep 2x much pages. This fault was introduced by commit 4f98a2fee8acdb4ac84545df98cccecfd130f8db Author: Rik van Riel <riel@redhat.com> Date: Sat Oct 18 20:26:32 2008 -0700 vmscan: split LRU lists into anon & file sets Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1b4e4a597caa..a3d669f8e25e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1628,6 +1628,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1628 } 1628 }
1629 1629
1630 /* 1630 /*
1631 * With swappiness at 100, anonymous and file have the same priority.
1632 * This scanning priority is essentially the inverse of IO cost.
1633 */
1634 anon_prio = sc->swappiness;
1635 file_prio = 200 - sc->swappiness;
1636
1637 /*
1631 * OK, so we have swap space and a fair amount of page cache 1638 * OK, so we have swap space and a fair amount of page cache
1632 * pages. We use the recently rotated / recently scanned 1639 * pages. We use the recently rotated / recently scanned
1633 * ratios to determine how valuable each cache is. 1640 * ratios to determine how valuable each cache is.
@@ -1638,28 +1645,18 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1638 * 1645 *
1639 * anon in [0], file in [1] 1646 * anon in [0], file in [1]
1640 */ 1647 */
1648 spin_lock_irq(&zone->lru_lock);
1641 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1649 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1642 spin_lock_irq(&zone->lru_lock);
1643 reclaim_stat->recent_scanned[0] /= 2; 1650 reclaim_stat->recent_scanned[0] /= 2;
1644 reclaim_stat->recent_rotated[0] /= 2; 1651 reclaim_stat->recent_rotated[0] /= 2;
1645 spin_unlock_irq(&zone->lru_lock);
1646 } 1652 }
1647 1653
1648 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1654 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1649 spin_lock_irq(&zone->lru_lock);
1650 reclaim_stat->recent_scanned[1] /= 2; 1655 reclaim_stat->recent_scanned[1] /= 2;
1651 reclaim_stat->recent_rotated[1] /= 2; 1656 reclaim_stat->recent_rotated[1] /= 2;
1652 spin_unlock_irq(&zone->lru_lock);
1653 } 1657 }
1654 1658
1655 /* 1659 /*
1656 * With swappiness at 100, anonymous and file have the same priority.
1657 * This scanning priority is essentially the inverse of IO cost.
1658 */
1659 anon_prio = sc->swappiness;
1660 file_prio = 200 - sc->swappiness;
1661
1662 /*
1663 * The amount of pressure on anon vs file pages is inversely 1660 * The amount of pressure on anon vs file pages is inversely
1664 * proportional to the fraction of recently scanned pages on 1661 * proportional to the fraction of recently scanned pages on
1665 * each list that were recently referenced and in active use. 1662 * each list that were recently referenced and in active use.
@@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1669 1666
1670 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1667 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1671 fp /= reclaim_stat->recent_rotated[1] + 1; 1668 fp /= reclaim_stat->recent_rotated[1] + 1;
1669 spin_unlock_irq(&zone->lru_lock);
1672 1670
1673 fraction[0] = ap; 1671 fraction[0] = ap;
1674 fraction[1] = fp; 1672 fraction[1] = fp;