aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.cz>2011-07-26 19:08:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 19:49:43 -0400
commit8521fc50d433507a7cdc96bec280f9e5888a54cc (patch)
treeb89d7b2eb34ba80b52e1f89ca767a86847df59b8 /mm/memcontrol.c
parent3e92041d68b40c47faa34c7dc08fc650a6c36adc (diff)
memcg: get rid of percpu_charge_mutex lock
percpu_charge_mutex protects from multiple simultaneous per-cpu charge caches draining because we might end up having too many work items. At least this was the case until commit 26fe61684449 ("memcg: fix percpu cached charge draining frequency") when we introduced a more targeted draining for async mode. Now that also sync draining is targeted we can safely remove mutex because we will not send more work than the current number of CPUs. FLUSHING_CACHED_CHARGE protects from sending the same work multiple times and stock->nr_pages == 0 protects from pointless sending a work if there is obviously nothing to be done. This is of course racy but we can live with it as the race window is really small (we would have to see FLUSHING_CACHED_CHARGE cleared while nr_pages would be still non-zero). The only remaining place where we can race is synchronous mode when we rely on FLUSHING_CACHED_CHARGE test which might have been set by other drainer on the same group but we should wait in that case as well. Signed-off-by: Michal Hocko <mhocko@suse.cz> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c12
1 files changed, 2 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 79f23a189941..5f84d2351ddb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2092,7 +2092,6 @@ struct memcg_stock_pcp {
2092#define FLUSHING_CACHED_CHARGE (0) 2092#define FLUSHING_CACHED_CHARGE (0)
2093}; 2093};
2094static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2094static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2095static DEFINE_MUTEX(percpu_charge_mutex);
2096 2095
2097/* 2096/*
2098 * Try to consume stocked charge on this cpu. If success, one page is consumed 2097 * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -2199,7 +2198,8 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2199 2198
2200 for_each_online_cpu(cpu) { 2199 for_each_online_cpu(cpu) {
2201 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2200 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2202 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2201 if (mem_cgroup_same_or_subtree(root_mem, stock->cached) &&
2202 test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2203 flush_work(&stock->work); 2203 flush_work(&stock->work);
2204 } 2204 }
2205out: 2205out:
@@ -2214,22 +2214,14 @@ out:
2214 */ 2214 */
2215static void drain_all_stock_async(struct mem_cgroup *root_mem) 2215static void drain_all_stock_async(struct mem_cgroup *root_mem)
2216{ 2216{
2217 /*
2218 * If someone calls draining, avoid adding more kworker runs.
2219 */
2220 if (!mutex_trylock(&percpu_charge_mutex))
2221 return;
2222 drain_all_stock(root_mem, false); 2217 drain_all_stock(root_mem, false);
2223 mutex_unlock(&percpu_charge_mutex);
2224} 2218}
2225 2219
2226/* This is a synchronous drain interface. */ 2220/* This is a synchronous drain interface. */
2227static void drain_all_stock_sync(struct mem_cgroup *root_mem) 2221static void drain_all_stock_sync(struct mem_cgroup *root_mem)
2228{ 2222{
2229 /* called when force_empty is called */ 2223 /* called when force_empty is called */
2230 mutex_lock(&percpu_charge_mutex);
2231 drain_all_stock(root_mem, true); 2224 drain_all_stock(root_mem, true);
2232 mutex_unlock(&percpu_charge_mutex);
2233} 2225}
2234 2226
2235/* 2227/*