aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2011-07-26 19:08:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 19:49:42 -0400
commitd38144b7a5f8d0a5e05d549177191374c6911009 (patch)
treeca0737291d22791c66aad10ad1218afc3dac51fd /mm
parentd1a05b6973c7cb33144fa965d73facc708ffc37d (diff)
memcg: unify sync and async per-cpu charge cache draining
Currently we have two ways how to drain per-CPU caches for charges. drain_all_stock_sync will synchronously drain all caches while drain_all_stock_async will asynchronously drain only those that refer to a given memory cgroup or its subtree in hierarchy. Targeted async draining has been introduced by 26fe6168 (memcg: fix percpu cached charge draining frequency) to reduce the cpu workers number. sync draining is currently triggered only from mem_cgroup_force_empty which is triggered only by userspace (mem_cgroup_force_empty_write) or when a cgroup is removed (mem_cgroup_pre_destroy). Although these are not usually frequent operations it still makes some sense to do targeted draining as well, especially if the box has many CPUs. This patch unifies both methods to use the single code (drain_all_stock) which relies on the original async implementation and just adds flush_work to wait on all caches that are still under work for the sync mode. We are using FLUSHING_CACHED_CHARGE bit check to prevent from waiting on a work that we haven't triggered. Please note that both sync and async functions are currently protected by percpu_charge_mutex so we cannot race with other drainers. Signed-off-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c48
1 files changed, 34 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2f5534e1968c..af920d0f9025 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2154,19 +2154,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2154} 2154}
2155 2155
2156/* 2156/*
2157 * Tries to drain stocked charges in other cpus. This function is asynchronous 2157 * Drains all per-CPU charge caches for given root_mem resp. subtree
2158 * and just put a work per cpu for draining localy on each cpu. Caller can 2158 * of the hierarchy under it. sync flag says whether we should block
2159 * expects some charges will be back to res_counter later but cannot wait for 2159 * until the work is done.
2160 * it.
2161 */ 2160 */
2162static void drain_all_stock_async(struct mem_cgroup *root_mem) 2161static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2163{ 2162{
2164 int cpu, curcpu; 2163 int cpu, curcpu;
2165 /* 2164
2166 * If someone calls draining, avoid adding more kworker runs.
2167 */
2168 if (!mutex_trylock(&percpu_charge_mutex))
2169 return;
2170 /* Notify other cpus that system-wide "drain" is running */ 2165 /* Notify other cpus that system-wide "drain" is running */
2171 get_online_cpus(); 2166 get_online_cpus();
2172 /* 2167 /*
@@ -2197,17 +2192,42 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
2197 schedule_work_on(cpu, &stock->work); 2192 schedule_work_on(cpu, &stock->work);
2198 } 2193 }
2199 } 2194 }
2195
2196 if (!sync)
2197 goto out;
2198
2199 for_each_online_cpu(cpu) {
2200 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2201 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2202 flush_work(&stock->work);
2203 }
2204out:
2200 put_online_cpus(); 2205 put_online_cpus();
2206}
2207
2208/*
2209 * Tries to drain stocked charges in other cpus. This function is asynchronous
2210 * and just put a work per cpu for draining localy on each cpu. Caller can
2211 * expects some charges will be back to res_counter later but cannot wait for
2212 * it.
2213 */
2214static void drain_all_stock_async(struct mem_cgroup *root_mem)
2215{
2216 /*
2217 * If someone calls draining, avoid adding more kworker runs.
2218 */
2219 if (!mutex_trylock(&percpu_charge_mutex))
2220 return;
2221 drain_all_stock(root_mem, false);
2201 mutex_unlock(&percpu_charge_mutex); 2222 mutex_unlock(&percpu_charge_mutex);
2202 /* We don't wait for flush_work */
2203} 2223}
2204 2224
2205/* This is a synchronous drain interface. */ 2225/* This is a synchronous drain interface. */
2206static void drain_all_stock_sync(void) 2226static void drain_all_stock_sync(struct mem_cgroup *root_mem)
2207{ 2227{
2208 /* called when force_empty is called */ 2228 /* called when force_empty is called */
2209 mutex_lock(&percpu_charge_mutex); 2229 mutex_lock(&percpu_charge_mutex);
2210 schedule_on_each_cpu(drain_local_stock); 2230 drain_all_stock(root_mem, true);
2211 mutex_unlock(&percpu_charge_mutex); 2231 mutex_unlock(&percpu_charge_mutex);
2212} 2232}
2213 2233
@@ -3856,7 +3876,7 @@ move_account:
3856 goto out; 3876 goto out;
3857 /* This is for making all *used* pages to be on LRU. */ 3877 /* This is for making all *used* pages to be on LRU. */
3858 lru_add_drain_all(); 3878 lru_add_drain_all();
3859 drain_all_stock_sync(); 3879 drain_all_stock_sync(mem);
3860 ret = 0; 3880 ret = 0;
3861 mem_cgroup_start_move(mem); 3881 mem_cgroup_start_move(mem);
3862 for_each_node_state(node, N_HIGH_MEMORY) { 3882 for_each_node_state(node, N_HIGH_MEMORY) {