diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 20:17:50 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:04 -0500 |
commit | 89b5fae5368f6aec62fb09c8e19b6c61f1154603 (patch) | |
tree | 48ba5619b31b54941ad127acc1c11edfd9701fe9 /mm/vmscan.c | |
parent | 9f3a0d0933de079665ec1b498947ffbf805b0018 (diff) |
mm: vmscan: distinguish global reclaim from global LRU scanning
The traditional zone reclaim code is scanning the per-zone LRU lists
during direct reclaim and kswapd, and the per-zone per-memory cgroup LRU
lists when reclaiming on behalf of a memory cgroup limit.
Subsequent patches will convert the traditional reclaim code to reclaim
exclusively from the per-memory cgroup LRU lists. As a result, using
the predicate for which LRU list is scanned will no longer be
appropriate to tell global reclaim from limit reclaim.
This patch adds a global_reclaim() predicate to tell direct/kswapd
reclaim from memory cgroup limit reclaim and substitutes it in all
places where currently scanning_global_lru() is used for that.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 62 |
1 files changed, 37 insertions, 25 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 26f4a8a4e0c7..ee4a46b8ae33 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -153,9 +153,25 @@ static LIST_HEAD(shrinker_list); | |||
153 | static DECLARE_RWSEM(shrinker_rwsem); | 153 | static DECLARE_RWSEM(shrinker_rwsem); |
154 | 154 | ||
155 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 155 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
156 | #define scanning_global_lru(sc) (!(sc)->mem_cgroup) | 156 | static bool global_reclaim(struct scan_control *sc) |
157 | { | ||
158 | return !sc->mem_cgroup; | ||
159 | } | ||
160 | |||
161 | static bool scanning_global_lru(struct scan_control *sc) | ||
162 | { | ||
163 | return !sc->mem_cgroup; | ||
164 | } | ||
157 | #else | 165 | #else |
158 | #define scanning_global_lru(sc) (1) | 166 | static bool global_reclaim(struct scan_control *sc) |
167 | { | ||
168 | return true; | ||
169 | } | ||
170 | |||
171 | static bool scanning_global_lru(struct scan_control *sc) | ||
172 | { | ||
173 | return true; | ||
174 | } | ||
159 | #endif | 175 | #endif |
160 | 176 | ||
161 | static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, | 177 | static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, |
@@ -994,7 +1010,7 @@ keep_lumpy: | |||
994 | * back off and wait for congestion to clear because further reclaim | 1010 | * back off and wait for congestion to clear because further reclaim |
995 | * will encounter the same problem | 1011 | * will encounter the same problem |
996 | */ | 1012 | */ |
997 | if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) | 1013 | if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) |
998 | zone_set_flag(zone, ZONE_CONGESTED); | 1014 | zone_set_flag(zone, ZONE_CONGESTED); |
999 | 1015 | ||
1000 | free_hot_cold_page_list(&free_pages, 1); | 1016 | free_hot_cold_page_list(&free_pages, 1); |
@@ -1313,7 +1329,7 @@ static int too_many_isolated(struct zone *zone, int file, | |||
1313 | if (current_is_kswapd()) | 1329 | if (current_is_kswapd()) |
1314 | return 0; | 1330 | return 0; |
1315 | 1331 | ||
1316 | if (!scanning_global_lru(sc)) | 1332 | if (!global_reclaim(sc)) |
1317 | return 0; | 1333 | return 0; |
1318 | 1334 | ||
1319 | if (file) { | 1335 | if (file) { |
@@ -1491,6 +1507,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
1491 | if (scanning_global_lru(sc)) { | 1507 | if (scanning_global_lru(sc)) { |
1492 | nr_taken = isolate_pages_global(nr_to_scan, &page_list, | 1508 | nr_taken = isolate_pages_global(nr_to_scan, &page_list, |
1493 | &nr_scanned, sc->order, reclaim_mode, zone, 0, file); | 1509 | &nr_scanned, sc->order, reclaim_mode, zone, 0, file); |
1510 | } else { | ||
1511 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, | ||
1512 | &nr_scanned, sc->order, reclaim_mode, zone, | ||
1513 | sc->mem_cgroup, 0, file); | ||
1514 | } | ||
1515 | if (global_reclaim(sc)) { | ||
1494 | zone->pages_scanned += nr_scanned; | 1516 | zone->pages_scanned += nr_scanned; |
1495 | if (current_is_kswapd()) | 1517 | if (current_is_kswapd()) |
1496 | __count_zone_vm_events(PGSCAN_KSWAPD, zone, | 1518 | __count_zone_vm_events(PGSCAN_KSWAPD, zone, |
@@ -1498,14 +1520,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
1498 | else | 1520 | else |
1499 | __count_zone_vm_events(PGSCAN_DIRECT, zone, | 1521 | __count_zone_vm_events(PGSCAN_DIRECT, zone, |
1500 | nr_scanned); | 1522 | nr_scanned); |
1501 | } else { | ||
1502 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, | ||
1503 | &nr_scanned, sc->order, reclaim_mode, zone, | ||
1504 | sc->mem_cgroup, 0, file); | ||
1505 | /* | ||
1506 | * mem_cgroup_isolate_pages() keeps track of | ||
1507 | * scanned pages on its own. | ||
1508 | */ | ||
1509 | } | 1523 | } |
1510 | 1524 | ||
1511 | if (nr_taken == 0) { | 1525 | if (nr_taken == 0) { |
@@ -1646,18 +1660,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1646 | &pgscanned, sc->order, | 1660 | &pgscanned, sc->order, |
1647 | reclaim_mode, zone, | 1661 | reclaim_mode, zone, |
1648 | 1, file); | 1662 | 1, file); |
1649 | zone->pages_scanned += pgscanned; | ||
1650 | } else { | 1663 | } else { |
1651 | nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, | 1664 | nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, |
1652 | &pgscanned, sc->order, | 1665 | &pgscanned, sc->order, |
1653 | reclaim_mode, zone, | 1666 | reclaim_mode, zone, |
1654 | sc->mem_cgroup, 1, file); | 1667 | sc->mem_cgroup, 1, file); |
1655 | /* | ||
1656 | * mem_cgroup_isolate_pages() keeps track of | ||
1657 | * scanned pages on its own. | ||
1658 | */ | ||
1659 | } | 1668 | } |
1660 | 1669 | ||
1670 | if (global_reclaim(sc)) | ||
1671 | zone->pages_scanned += pgscanned; | ||
1672 | |||
1661 | reclaim_stat->recent_scanned[file] += nr_taken; | 1673 | reclaim_stat->recent_scanned[file] += nr_taken; |
1662 | 1674 | ||
1663 | __count_zone_vm_events(PGREFILL, zone, pgscanned); | 1675 | __count_zone_vm_events(PGREFILL, zone, pgscanned); |
@@ -1827,7 +1839,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, | |||
1827 | 1839 | ||
1828 | static int vmscan_swappiness(struct scan_control *sc) | 1840 | static int vmscan_swappiness(struct scan_control *sc) |
1829 | { | 1841 | { |
1830 | if (scanning_global_lru(sc)) | 1842 | if (global_reclaim(sc)) |
1831 | return vm_swappiness; | 1843 | return vm_swappiness; |
1832 | return mem_cgroup_swappiness(sc->mem_cgroup); | 1844 | return mem_cgroup_swappiness(sc->mem_cgroup); |
1833 | } | 1845 | } |
@@ -1862,9 +1874,9 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1862 | * latencies, so it's better to scan a minimum amount there as | 1874 | * latencies, so it's better to scan a minimum amount there as |
1863 | * well. | 1875 | * well. |
1864 | */ | 1876 | */ |
1865 | if (scanning_global_lru(sc) && current_is_kswapd()) | 1877 | if (current_is_kswapd()) |
1866 | force_scan = true; | 1878 | force_scan = true; |
1867 | if (!scanning_global_lru(sc)) | 1879 | if (!global_reclaim(sc)) |
1868 | force_scan = true; | 1880 | force_scan = true; |
1869 | 1881 | ||
1870 | /* If we have no swap space, do not bother scanning anon pages. */ | 1882 | /* If we have no swap space, do not bother scanning anon pages. */ |
@@ -1881,7 +1893,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1881 | file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + | 1893 | file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + |
1882 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); | 1894 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); |
1883 | 1895 | ||
1884 | if (scanning_global_lru(sc)) { | 1896 | if (global_reclaim(sc)) { |
1885 | free = zone_page_state(zone, NR_FREE_PAGES); | 1897 | free = zone_page_state(zone, NR_FREE_PAGES); |
1886 | /* If we have very few page cache pages, | 1898 | /* If we have very few page cache pages, |
1887 | force-scan anon pages. */ | 1899 | force-scan anon pages. */ |
@@ -2114,7 +2126,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist, | |||
2114 | * Take care memory controller reclaiming has small influence | 2126 | * Take care memory controller reclaiming has small influence |
2115 | * to global LRU. | 2127 | * to global LRU. |
2116 | */ | 2128 | */ |
2117 | if (scanning_global_lru(sc)) { | 2129 | if (global_reclaim(sc)) { |
2118 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 2130 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
2119 | continue; | 2131 | continue; |
2120 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 2132 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) |
@@ -2212,7 +2224,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2212 | get_mems_allowed(); | 2224 | get_mems_allowed(); |
2213 | delayacct_freepages_start(); | 2225 | delayacct_freepages_start(); |
2214 | 2226 | ||
2215 | if (scanning_global_lru(sc)) | 2227 | if (global_reclaim(sc)) |
2216 | count_vm_event(ALLOCSTALL); | 2228 | count_vm_event(ALLOCSTALL); |
2217 | 2229 | ||
2218 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 2230 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
@@ -2226,7 +2238,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2226 | * Don't shrink slabs when reclaiming memory from | 2238 | * Don't shrink slabs when reclaiming memory from |
2227 | * over limit cgroups | 2239 | * over limit cgroups |
2228 | */ | 2240 | */ |
2229 | if (scanning_global_lru(sc)) { | 2241 | if (global_reclaim(sc)) { |
2230 | unsigned long lru_pages = 0; | 2242 | unsigned long lru_pages = 0; |
2231 | for_each_zone_zonelist(zone, z, zonelist, | 2243 | for_each_zone_zonelist(zone, z, zonelist, |
2232 | gfp_zone(sc->gfp_mask)) { | 2244 | gfp_zone(sc->gfp_mask)) { |
@@ -2288,7 +2300,7 @@ out: | |||
2288 | return 0; | 2300 | return 0; |
2289 | 2301 | ||
2290 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 2302 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
2291 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) | 2303 | if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc)) |
2292 | return 1; | 2304 | return 1; |
2293 | 2305 | ||
2294 | return 0; | 2306 | return 0; |