aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2011-07-26 19:08:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 19:49:42 -0400
commitd1a05b6973c7cb33144fa965d73facc708ffc37d (patch)
tree463a8bfedd56b4972f273c816fa62a55d7bc4754 /mm/memcontrol.c
parent82f9d486e59f588c7d100865c36510644abda356 (diff)
memcg: do not try to drain per-cpu caches without pages
drain_all_stock_async tries to optimize a work to be done on the work queue by excluding any work for the current CPU because it assumes that the context we are called from already tried to charge from that cache and it's failed so it must be empty already. While the assumption is correct we can optimize it even more by checking the current number of pages in the cache. This will also reduce a work on other CPUs with an empty stock. For the current CPU we can simply call drain_local_stock rather than deferring it to the work queue. [kamezawa.hiroyu@jp.fujitsu.com: use drain_local_stock for current CPU optimization] Signed-off-by: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 04e505bfd7dd..2f5534e1968c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2180,11 +2180,8 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
2180 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2180 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2181 struct mem_cgroup *mem; 2181 struct mem_cgroup *mem;
2182 2182
2183 if (cpu == curcpu)
2184 continue;
2185
2186 mem = stock->cached; 2183 mem = stock->cached;
2187 if (!mem) 2184 if (!mem || !stock->nr_pages)
2188 continue; 2185 continue;
2189 if (mem != root_mem) { 2186 if (mem != root_mem) {
2190 if (!root_mem->use_hierarchy) 2187 if (!root_mem->use_hierarchy)
@@ -2193,8 +2190,12 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
2193 if (!css_is_ancestor(&mem->css, &root_mem->css)) 2190 if (!css_is_ancestor(&mem->css, &root_mem->css))
2194 continue; 2191 continue;
2195 } 2192 }
2196 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2193 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2197 schedule_work_on(cpu, &stock->work); 2194 if (cpu == curcpu)
2195 drain_local_stock(&stock->work);
2196 else
2197 schedule_work_on(cpu, &stock->work);
2198 }
2198 } 2199 }
2199 put_online_cpus(); 2200 put_online_cpus();
2200 mutex_unlock(&percpu_charge_mutex); 2201 mutex_unlock(&percpu_charge_mutex);