aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2012-03-21 19:34:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:55:01 -0400
commit619d094b5872a5af153f1af77a8b7f7326faf0d0 (patch)
tree4de9ff8640d8cee092800cd722e621ea2db63640 /mm/memcontrol.c
parent9e3357907c84517d9e07bc0b19265807f0264b43 (diff)
memcg: simplify move_account() check
In memcg, for avoiding take-lock-irq-off at accessing page_cgroup, a logic, flag + rcu_read_lock(), is used. This works as following CPU-A CPU-B rcu_read_lock() set flag if(flag is set) take heavy lock do job. synchronize_rcu() rcu_read_unlock() take heavy lock. In recent discussion, it's argued that using per-cpu value for this flag just complicates the code because 'set flag' is very rare. This patch changes 'flag' implementation from percpu to atomic_t. This will be much simpler. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Ying Han <yinghan@google.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c70
1 files changed, 30 insertions, 40 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index eba04a481e03..cfd2db08cfe1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -89,7 +89,6 @@ enum mem_cgroup_stat_index {
89 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 89 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
90 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 90 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ 91 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
92 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
93 MEM_CGROUP_STAT_NSTATS, 92 MEM_CGROUP_STAT_NSTATS,
94}; 93};
95 94
@@ -298,6 +297,10 @@ struct mem_cgroup {
298 */ 297 */
299 unsigned long move_charge_at_immigrate; 298 unsigned long move_charge_at_immigrate;
300 /* 299 /*
300 * set > 0 if pages under this cgroup are moving to other cgroup.
301 */
302 atomic_t moving_account;
303 /*
301 * percpu counter. 304 * percpu counter.
302 */ 305 */
303 struct mem_cgroup_stat_cpu *stat; 306 struct mem_cgroup_stat_cpu *stat;
@@ -1287,35 +1290,36 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1287 return memcg->swappiness; 1290 return memcg->swappiness;
1288} 1291}
1289 1292
1293/*
1294 * memcg->moving_account is used for checking possibility that some thread is
1295 * calling move_account(). When a thread on CPU-A starts moving pages under
1296 * a memcg, other threads should check memcg->moving_account under
1297 * rcu_read_lock(), like this:
1298 *
1299 * CPU-A CPU-B
1300 * rcu_read_lock()
1301 * memcg->moving_account+1 if (memcg->mocing_account)
1302 * take heavy locks.
1303 * synchronize_rcu() update something.
1304 * rcu_read_unlock()
1305 * start move here.
1306 */
1290static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1307static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1291{ 1308{
1292 int cpu; 1309 atomic_inc(&memcg->moving_account);
1293
1294 get_online_cpus();
1295 spin_lock(&memcg->pcp_counter_lock);
1296 for_each_online_cpu(cpu)
1297 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1298 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1299 spin_unlock(&memcg->pcp_counter_lock);
1300 put_online_cpus();
1301
1302 synchronize_rcu(); 1310 synchronize_rcu();
1303} 1311}
1304 1312
1305static void mem_cgroup_end_move(struct mem_cgroup *memcg) 1313static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1306{ 1314{
1307 int cpu; 1315 /*
1308 1316 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1309 if (!memcg) 1317 * We check NULL in callee rather than caller.
1310 return; 1318 */
1311 get_online_cpus(); 1319 if (memcg)
1312 spin_lock(&memcg->pcp_counter_lock); 1320 atomic_dec(&memcg->moving_account);
1313 for_each_online_cpu(cpu)
1314 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1315 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1316 spin_unlock(&memcg->pcp_counter_lock);
1317 put_online_cpus();
1318} 1321}
1322
1319/* 1323/*
1320 * 2 routines for checking "mem" is under move_account() or not. 1324 * 2 routines for checking "mem" is under move_account() or not.
1321 * 1325 *
@@ -1331,7 +1335,7 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1331static bool mem_cgroup_stealed(struct mem_cgroup *memcg) 1335static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
1332{ 1336{
1333 VM_BUG_ON(!rcu_read_lock_held()); 1337 VM_BUG_ON(!rcu_read_lock_held());
1334 return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0; 1338 return atomic_read(&memcg->moving_account) > 0;
1335} 1339}
1336 1340
1337static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1341static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
@@ -1882,8 +1886,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1882 * by flags. 1886 * by flags.
1883 * 1887 *
1884 * Considering "move", this is an only case we see a race. To make the race 1888 * Considering "move", this is an only case we see a race. To make the race
1885 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are 1889 * small, we check mm->moving_account and detect there are possibility of race
1886 * possibility of race condition. If there is, we take a lock. 1890 * If there is, we take a lock.
1887 */ 1891 */
1888 1892
1889void mem_cgroup_update_page_stat(struct page *page, 1893void mem_cgroup_update_page_stat(struct page *page,
@@ -2100,17 +2104,6 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2100 per_cpu(memcg->stat->events[i], cpu) = 0; 2104 per_cpu(memcg->stat->events[i], cpu) = 0;
2101 memcg->nocpu_base.events[i] += x; 2105 memcg->nocpu_base.events[i] += x;
2102 } 2106 }
2103 /* need to clear ON_MOVE value, works as a kind of lock. */
2104 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2105 spin_unlock(&memcg->pcp_counter_lock);
2106}
2107
2108static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
2109{
2110 int idx = MEM_CGROUP_ON_MOVE;
2111
2112 spin_lock(&memcg->pcp_counter_lock);
2113 per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
2114 spin_unlock(&memcg->pcp_counter_lock); 2107 spin_unlock(&memcg->pcp_counter_lock);
2115} 2108}
2116 2109
@@ -2122,11 +2115,8 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2122 struct memcg_stock_pcp *stock; 2115 struct memcg_stock_pcp *stock;
2123 struct mem_cgroup *iter; 2116 struct mem_cgroup *iter;
2124 2117
2125 if ((action == CPU_ONLINE)) { 2118 if (action == CPU_ONLINE)
2126 for_each_mem_cgroup(iter)
2127 synchronize_mem_cgroup_on_move(iter, cpu);
2128 return NOTIFY_OK; 2119 return NOTIFY_OK;
2129 }
2130 2120
2131 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2121 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2132 return NOTIFY_OK; 2122 return NOTIFY_OK;