diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 1b4e4a597caa..a3d669f8e25e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1628,6 +1628,13 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1628 | } | 1628 | } |
1629 | 1629 | ||
1630 | /* | 1630 | /* |
1631 | * With swappiness at 100, anonymous and file have the same priority. | ||
1632 | * This scanning priority is essentially the inverse of IO cost. | ||
1633 | */ | ||
1634 | anon_prio = sc->swappiness; | ||
1635 | file_prio = 200 - sc->swappiness; | ||
1636 | |||
1637 | /* | ||
1631 | * OK, so we have swap space and a fair amount of page cache | 1638 | * OK, so we have swap space and a fair amount of page cache |
1632 | * pages. We use the recently rotated / recently scanned | 1639 | * pages. We use the recently rotated / recently scanned |
1633 | * ratios to determine how valuable each cache is. | 1640 | * ratios to determine how valuable each cache is. |
@@ -1638,28 +1645,18 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1638 | * | 1645 | * |
1639 | * anon in [0], file in [1] | 1646 | * anon in [0], file in [1] |
1640 | */ | 1647 | */ |
1648 | spin_lock_irq(&zone->lru_lock); | ||
1641 | if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { | 1649 | if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { |
1642 | spin_lock_irq(&zone->lru_lock); | ||
1643 | reclaim_stat->recent_scanned[0] /= 2; | 1650 | reclaim_stat->recent_scanned[0] /= 2; |
1644 | reclaim_stat->recent_rotated[0] /= 2; | 1651 | reclaim_stat->recent_rotated[0] /= 2; |
1645 | spin_unlock_irq(&zone->lru_lock); | ||
1646 | } | 1652 | } |
1647 | 1653 | ||
1648 | if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { | 1654 | if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { |
1649 | spin_lock_irq(&zone->lru_lock); | ||
1650 | reclaim_stat->recent_scanned[1] /= 2; | 1655 | reclaim_stat->recent_scanned[1] /= 2; |
1651 | reclaim_stat->recent_rotated[1] /= 2; | 1656 | reclaim_stat->recent_rotated[1] /= 2; |
1652 | spin_unlock_irq(&zone->lru_lock); | ||
1653 | } | 1657 | } |
1654 | 1658 | ||
1655 | /* | 1659 | /* |
1656 | * With swappiness at 100, anonymous and file have the same priority. | ||
1657 | * This scanning priority is essentially the inverse of IO cost. | ||
1658 | */ | ||
1659 | anon_prio = sc->swappiness; | ||
1660 | file_prio = 200 - sc->swappiness; | ||
1661 | |||
1662 | /* | ||
1663 | * The amount of pressure on anon vs file pages is inversely | 1660 | * The amount of pressure on anon vs file pages is inversely |
1664 | * proportional to the fraction of recently scanned pages on | 1661 | * proportional to the fraction of recently scanned pages on |
1665 | * each list that were recently referenced and in active use. | 1662 | * each list that were recently referenced and in active use. |
@@ -1669,6 +1666,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1669 | 1666 | ||
1670 | fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); | 1667 | fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); |
1671 | fp /= reclaim_stat->recent_rotated[1] + 1; | 1668 | fp /= reclaim_stat->recent_rotated[1] + 1; |
1669 | spin_unlock_irq(&zone->lru_lock); | ||
1672 | 1670 | ||
1673 | fraction[0] = ap; | 1671 | fraction[0] = ap; |
1674 | fraction[1] = fp; | 1672 | fraction[1] = fp; |