aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/vmscan.c107
1 files changed, 55 insertions, 52 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 25b0202c60df..8e1d72333e8a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1514,21 +1514,52 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1514} 1514}
1515 1515
1516/* 1516/*
1517 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1518 * until we collected @swap_cluster_max pages to scan.
1519 */
1520static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1521 unsigned long *nr_saved_scan)
1522{
1523 unsigned long nr;
1524
1525 *nr_saved_scan += nr_to_scan;
1526 nr = *nr_saved_scan;
1527
1528 if (nr >= SWAP_CLUSTER_MAX)
1529 *nr_saved_scan = 0;
1530 else
1531 nr = 0;
1532
1533 return nr;
1534}
1535
1536/*
1517 * Determine how aggressively the anon and file LRU lists should be 1537 * Determine how aggressively the anon and file LRU lists should be
1518 * scanned. The relative value of each set of LRU lists is determined 1538 * scanned. The relative value of each set of LRU lists is determined
1519 * by looking at the fraction of the pages scanned we did rotate back 1539 * by looking at the fraction of the pages scanned we did rotate back
1520 * onto the active list instead of evict. 1540 * onto the active list instead of evict.
1521 * 1541 *
1522 * percent[0] specifies how much pressure to put on ram/swap backed 1542 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1523 * memory, while percent[1] determines pressure on the file LRUs.
1524 */ 1543 */
1525static void get_scan_ratio(struct zone *zone, struct scan_control *sc, 1544static void get_scan_count(struct zone *zone, struct scan_control *sc,
1526 unsigned long *percent) 1545 unsigned long *nr, int priority)
1527{ 1546{
1528 unsigned long anon, file, free; 1547 unsigned long anon, file, free;
1529 unsigned long anon_prio, file_prio; 1548 unsigned long anon_prio, file_prio;
1530 unsigned long ap, fp; 1549 unsigned long ap, fp;
1531 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1550 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1551 u64 fraction[2], denominator;
1552 enum lru_list l;
1553 int noswap = 0;
1554
1555 /* If we have no swap space, do not bother scanning anon pages. */
1556 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1557 noswap = 1;
1558 fraction[0] = 0;
1559 fraction[1] = 1;
1560 denominator = 1;
1561 goto out;
1562 }
1532 1563
1533 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1564 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1534 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1565 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
@@ -1540,9 +1571,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1540 /* If we have very few page cache pages, 1571 /* If we have very few page cache pages,
1541 force-scan anon pages. */ 1572 force-scan anon pages. */
1542 if (unlikely(file + free <= high_wmark_pages(zone))) { 1573 if (unlikely(file + free <= high_wmark_pages(zone))) {
1543 percent[0] = 100; 1574 fraction[0] = 1;
1544 percent[1] = 0; 1575 fraction[1] = 0;
1545 return; 1576 denominator = 1;
1577 goto out;
1546 } 1578 }
1547 } 1579 }
1548 1580
@@ -1589,29 +1621,22 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1589 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1621 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1590 fp /= reclaim_stat->recent_rotated[1] + 1; 1622 fp /= reclaim_stat->recent_rotated[1] + 1;
1591 1623
1592 /* Normalize to percentages */ 1624 fraction[0] = ap;
1593 percent[0] = 100 * ap / (ap + fp + 1); 1625 fraction[1] = fp;
1594 percent[1] = 100 - percent[0]; 1626 denominator = ap + fp + 1;
1595} 1627out:
1596 1628 for_each_evictable_lru(l) {
1597/* 1629 int file = is_file_lru(l);
1598 * Smallish @nr_to_scan's are deposited in @nr_saved_scan, 1630 unsigned long scan;
1599 * until we collected @swap_cluster_max pages to scan.
1600 */
1601static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1602 unsigned long *nr_saved_scan)
1603{
1604 unsigned long nr;
1605
1606 *nr_saved_scan += nr_to_scan;
1607 nr = *nr_saved_scan;
1608
1609 if (nr >= SWAP_CLUSTER_MAX)
1610 *nr_saved_scan = 0;
1611 else
1612 nr = 0;
1613 1631
1614 return nr; 1632 scan = zone_nr_lru_pages(zone, sc, l);
1633 if (priority || noswap) {
1634 scan >>= priority;
1635 scan = div64_u64(scan * fraction[file], denominator);
1636 }
1637 nr[l] = nr_scan_try_batch(scan,
1638 &reclaim_stat->nr_saved_scan[l]);
1639 }
1615} 1640}
1616 1641
1617/* 1642/*
@@ -1622,33 +1647,11 @@ static void shrink_zone(int priority, struct zone *zone,
1622{ 1647{
1623 unsigned long nr[NR_LRU_LISTS]; 1648 unsigned long nr[NR_LRU_LISTS];
1624 unsigned long nr_to_scan; 1649 unsigned long nr_to_scan;
1625 unsigned long percent[2]; /* anon @ 0; file @ 1 */
1626 enum lru_list l; 1650 enum lru_list l;
1627 unsigned long nr_reclaimed = sc->nr_reclaimed; 1651 unsigned long nr_reclaimed = sc->nr_reclaimed;
1628 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1652 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1629 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1630 int noswap = 0;
1631
1632 /* If we have no swap space, do not bother scanning anon pages. */
1633 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1634 noswap = 1;
1635 percent[0] = 0;
1636 percent[1] = 100;
1637 } else
1638 get_scan_ratio(zone, sc, percent);
1639 1653
1640 for_each_evictable_lru(l) { 1654 get_scan_count(zone, sc, nr, priority);
1641 int file = is_file_lru(l);
1642 unsigned long scan;
1643
1644 scan = zone_nr_lru_pages(zone, sc, l);
1645 if (priority || noswap) {
1646 scan >>= priority;
1647 scan = (scan * percent[file]) / 100;
1648 }
1649 nr[l] = nr_scan_try_batch(scan,
1650 &reclaim_stat->nr_saved_scan[l]);
1651 }
1652 1655
1653 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1656 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1654 nr[LRU_INACTIVE_FILE]) { 1657 nr[LRU_INACTIVE_FILE]) {