diff options
author | Konstantin Khlebnikov <khlebnikov@openvz.org> | 2012-05-29 18:07:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:26 -0400 |
commit | 90126375d89ab8e0bde30ff22139b6097d56ed8a (patch) | |
tree | 3e18ac488acb6bf6f413a57eb8095f04e49d7cb2 /mm | |
parent | 1a93be0e7a6fc7f3d19101402665c7a958beb568 (diff) |
mm/vmscan: push lruvec pointer into get_scan_count()
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 6dbf2c2082e7..b139ad7f396e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -150,11 +150,6 @@ static bool global_reclaim(struct scan_control *sc) | |||
150 | } | 150 | } |
151 | #endif | 151 | #endif |
152 | 152 | ||
153 | static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz) | ||
154 | { | ||
155 | return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat; | ||
156 | } | ||
157 | |||
158 | static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru) | 153 | static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru) |
159 | { | 154 | { |
160 | if (!mem_cgroup_disabled()) | 155 | if (!mem_cgroup_disabled()) |
@@ -1581,20 +1576,18 @@ static int vmscan_swappiness(struct scan_control *sc) | |||
1581 | * | 1576 | * |
1582 | * nr[0] = anon pages to scan; nr[1] = file pages to scan | 1577 | * nr[0] = anon pages to scan; nr[1] = file pages to scan |
1583 | */ | 1578 | */ |
1584 | static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | 1579 | static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, |
1585 | unsigned long *nr) | 1580 | unsigned long *nr) |
1586 | { | 1581 | { |
1587 | unsigned long anon, file, free; | 1582 | unsigned long anon, file, free; |
1588 | unsigned long anon_prio, file_prio; | 1583 | unsigned long anon_prio, file_prio; |
1589 | unsigned long ap, fp; | 1584 | unsigned long ap, fp; |
1590 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); | 1585 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; |
1591 | u64 fraction[2], denominator; | 1586 | u64 fraction[2], denominator; |
1592 | enum lru_list lru; | 1587 | enum lru_list lru; |
1593 | int noswap = 0; | 1588 | int noswap = 0; |
1594 | bool force_scan = false; | 1589 | bool force_scan = false; |
1595 | struct lruvec *lruvec; | 1590 | struct zone *zone = lruvec_zone(lruvec); |
1596 | |||
1597 | lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup); | ||
1598 | 1591 | ||
1599 | /* | 1592 | /* |
1600 | * If the zone or memcg is small, nr[l] can be 0. This | 1593 | * If the zone or memcg is small, nr[l] can be 0. This |
@@ -1606,7 +1599,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | |||
1606 | * latencies, so it's better to scan a minimum amount there as | 1599 | * latencies, so it's better to scan a minimum amount there as |
1607 | * well. | 1600 | * well. |
1608 | */ | 1601 | */ |
1609 | if (current_is_kswapd() && mz->zone->all_unreclaimable) | 1602 | if (current_is_kswapd() && zone->all_unreclaimable) |
1610 | force_scan = true; | 1603 | force_scan = true; |
1611 | if (!global_reclaim(sc)) | 1604 | if (!global_reclaim(sc)) |
1612 | force_scan = true; | 1605 | force_scan = true; |
@@ -1626,10 +1619,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | |||
1626 | get_lruvec_size(lruvec, LRU_INACTIVE_FILE); | 1619 | get_lruvec_size(lruvec, LRU_INACTIVE_FILE); |
1627 | 1620 | ||
1628 | if (global_reclaim(sc)) { | 1621 | if (global_reclaim(sc)) { |
1629 | free = zone_page_state(mz->zone, NR_FREE_PAGES); | 1622 | free = zone_page_state(zone, NR_FREE_PAGES); |
1630 | /* If we have very few page cache pages, | 1623 | /* If we have very few page cache pages, |
1631 | force-scan anon pages. */ | 1624 | force-scan anon pages. */ |
1632 | if (unlikely(file + free <= high_wmark_pages(mz->zone))) { | 1625 | if (unlikely(file + free <= high_wmark_pages(zone))) { |
1633 | fraction[0] = 1; | 1626 | fraction[0] = 1; |
1634 | fraction[1] = 0; | 1627 | fraction[1] = 0; |
1635 | denominator = 1; | 1628 | denominator = 1; |
@@ -1655,7 +1648,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | |||
1655 | * | 1648 | * |
1656 | * anon in [0], file in [1] | 1649 | * anon in [0], file in [1] |
1657 | */ | 1650 | */ |
1658 | spin_lock_irq(&mz->zone->lru_lock); | 1651 | spin_lock_irq(&zone->lru_lock); |
1659 | if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { | 1652 | if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { |
1660 | reclaim_stat->recent_scanned[0] /= 2; | 1653 | reclaim_stat->recent_scanned[0] /= 2; |
1661 | reclaim_stat->recent_rotated[0] /= 2; | 1654 | reclaim_stat->recent_rotated[0] /= 2; |
@@ -1676,7 +1669,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | |||
1676 | 1669 | ||
1677 | fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); | 1670 | fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); |
1678 | fp /= reclaim_stat->recent_rotated[1] + 1; | 1671 | fp /= reclaim_stat->recent_rotated[1] + 1; |
1679 | spin_unlock_irq(&mz->zone->lru_lock); | 1672 | spin_unlock_irq(&zone->lru_lock); |
1680 | 1673 | ||
1681 | fraction[0] = ap; | 1674 | fraction[0] = ap; |
1682 | fraction[1] = fp; | 1675 | fraction[1] = fp; |
@@ -1794,7 +1787,7 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz, | |||
1794 | restart: | 1787 | restart: |
1795 | nr_reclaimed = 0; | 1788 | nr_reclaimed = 0; |
1796 | nr_scanned = sc->nr_scanned; | 1789 | nr_scanned = sc->nr_scanned; |
1797 | get_scan_count(mz, sc, nr); | 1790 | get_scan_count(lruvec, sc, nr); |
1798 | 1791 | ||
1799 | blk_start_plug(&plug); | 1792 | blk_start_plug(&plug); |
1800 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || | 1793 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |