aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commitb95a2f2d486d0d768a92879c023a03757b9c7e58 (patch)
tree4f07a63d7587131bd82c49e0d9ac124d057539fd /mm/vmscan.c
parentad2b8e601099a23dffffb53f91c18d874fe98854 (diff)
mm: vmscan: convert global reclaim to per-memcg LRU lists
The global per-zone LRU lists are about to go away on memcg-enabled kernels, global reclaim must be able to find its pages on the per-memcg LRU lists. Since the LRU pages of a zone are distributed over all existing memory cgroups, a scan target for a zone is complete when all memory cgroups are scanned for their proportional share of a zone's memory. The forced scanning of small scan targets from kswapd is limited to zones marked unreclaimable, otherwise kswapd can quickly overreclaim by force-scanning the LRU lists of multiple memory cgroups. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Ying Han <yinghan@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 136c7eb0ad88..024168cfdcb0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1886,7 +1886,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1886 * latencies, so it's better to scan a minimum amount there as 1886 * latencies, so it's better to scan a minimum amount there as
1887 * well. 1887 * well.
1888 */ 1888 */
1889 if (current_is_kswapd()) 1889 if (current_is_kswapd() && mz->zone->all_unreclaimable)
1890 force_scan = true; 1890 force_scan = true;
1891 if (!global_reclaim(sc)) 1891 if (!global_reclaim(sc))
1892 force_scan = true; 1892 force_scan = true;
@@ -2111,16 +2111,6 @@ static void shrink_zone(int priority, struct zone *zone,
2111 }; 2111 };
2112 struct mem_cgroup *memcg; 2112 struct mem_cgroup *memcg;
2113 2113
2114 if (global_reclaim(sc)) {
2115 struct mem_cgroup_zone mz = {
2116 .mem_cgroup = NULL,
2117 .zone = zone,
2118 };
2119
2120 shrink_mem_cgroup_zone(priority, &mz, sc);
2121 return;
2122 }
2123
2124 memcg = mem_cgroup_iter(root, NULL, &reclaim); 2114 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2125 do { 2115 do {
2126 struct mem_cgroup_zone mz = { 2116 struct mem_cgroup_zone mz = {
@@ -2134,6 +2124,10 @@ static void shrink_zone(int priority, struct zone *zone,
2134 * scanned it with decreasing priority levels until 2124 * scanned it with decreasing priority levels until
2135 * nr_to_reclaim had been reclaimed. This priority 2125 * nr_to_reclaim had been reclaimed. This priority
2136 * cycle is thus over after a single memcg. 2126 * cycle is thus over after a single memcg.
2127 *
2128 * Direct reclaim and kswapd, on the other hand, have
2129 * to scan all memory cgroups to fulfill the overall
2130 * scan target for the zone.
2137 */ 2131 */
2138 if (!global_reclaim(sc)) { 2132 if (!global_reclaim(sc)) {
2139 mem_cgroup_iter_break(root, memcg); 2133 mem_cgroup_iter_break(root, memcg);
@@ -2478,13 +2472,24 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2478static void age_active_anon(struct zone *zone, struct scan_control *sc, 2472static void age_active_anon(struct zone *zone, struct scan_control *sc,
2479 int priority) 2473 int priority)
2480{ 2474{
2481 struct mem_cgroup_zone mz = { 2475 struct mem_cgroup *memcg;
2482 .mem_cgroup = NULL,
2483 .zone = zone,
2484 };
2485 2476
2486 if (inactive_anon_is_low(&mz)) 2477 if (!total_swap_pages)
2487 shrink_active_list(SWAP_CLUSTER_MAX, &mz, sc, priority, 0); 2478 return;
2479
2480 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2481 do {
2482 struct mem_cgroup_zone mz = {
2483 .mem_cgroup = memcg,
2484 .zone = zone,
2485 };
2486
2487 if (inactive_anon_is_low(&mz))
2488 shrink_active_list(SWAP_CLUSTER_MAX, &mz,
2489 sc, priority, 0);
2490
2491 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2492 } while (memcg);
2488} 2493}
2489 2494
2490/* 2495/*