diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2009-04-02 19:57:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-02 22:04:56 -0400 |
commit | 83aae4c737866da3280f51fd15da58eddd788397 (patch) | |
tree | a4fdabfb02949e0d44743850b7f37ae52eaed757 /mm | |
parent | 627991a20b3f4d504d20466ab405fe035cb1a20a (diff) |
memcg: cleanup cache_charge
Current mem_cgroup_cache_charge is a bit complicated especially
in the case of shmem's swap-in.
This patch cleans it up by using try_charge_swapin and commit_charge_swapin.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 60 |
1 files changed, 23 insertions, 37 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 55dea5968464..2fc6d6c48238 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1238,6 +1238,10 @@ int mem_cgroup_newpage_charge(struct page *page, | |||
1238 | MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); | 1238 | MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | static void | ||
1242 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | ||
1243 | enum charge_type ctype); | ||
1244 | |||
1241 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 1245 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
1242 | gfp_t gfp_mask) | 1246 | gfp_t gfp_mask) |
1243 | { | 1247 | { |
@@ -1274,16 +1278,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
1274 | unlock_page_cgroup(pc); | 1278 | unlock_page_cgroup(pc); |
1275 | } | 1279 | } |
1276 | 1280 | ||
1277 | if (do_swap_account && PageSwapCache(page)) { | ||
1278 | mem = try_get_mem_cgroup_from_swapcache(page); | ||
1279 | if (mem) | ||
1280 | mm = NULL; | ||
1281 | else | ||
1282 | mem = NULL; | ||
1283 | /* SwapCache may be still linked to LRU now. */ | ||
1284 | mem_cgroup_lru_del_before_commit_swapcache(page); | ||
1285 | } | ||
1286 | |||
1287 | if (unlikely(!mm && !mem)) | 1281 | if (unlikely(!mm && !mem)) |
1288 | mm = &init_mm; | 1282 | mm = &init_mm; |
1289 | 1283 | ||
@@ -1291,32 +1285,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
1291 | return mem_cgroup_charge_common(page, mm, gfp_mask, | 1285 | return mem_cgroup_charge_common(page, mm, gfp_mask, |
1292 | MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); | 1286 | MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); |
1293 | 1287 | ||
1294 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, | 1288 | /* shmem */ |
1295 | MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); | 1289 | if (PageSwapCache(page)) { |
1296 | if (mem) | 1290 | ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); |
1297 | css_put(&mem->css); | 1291 | if (!ret) |
1298 | if (PageSwapCache(page)) | 1292 | __mem_cgroup_commit_charge_swapin(page, mem, |
1299 | mem_cgroup_lru_add_after_commit_swapcache(page); | 1293 | MEM_CGROUP_CHARGE_TYPE_SHMEM); |
1294 | } else | ||
1295 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, | ||
1296 | MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); | ||
1300 | 1297 | ||
1301 | if (do_swap_account && !ret && PageSwapCache(page)) { | ||
1302 | swp_entry_t ent = {.val = page_private(page)}; | ||
1303 | unsigned short id; | ||
1304 | /* avoid double counting */ | ||
1305 | id = swap_cgroup_record(ent, 0); | ||
1306 | rcu_read_lock(); | ||
1307 | mem = mem_cgroup_lookup(id); | ||
1308 | if (mem) { | ||
1309 | /* | ||
1310 | * We did swap-in. Then, this entry is doubly counted | ||
1311 | * both in mem and memsw. We uncharge it, here. | ||
1312 | * Recorded ID can be obsolete. We avoid calling | ||
1313 | * css_tryget() | ||
1314 | */ | ||
1315 | res_counter_uncharge(&mem->memsw, PAGE_SIZE); | ||
1316 | mem_cgroup_put(mem); | ||
1317 | } | ||
1318 | rcu_read_unlock(); | ||
1319 | } | ||
1320 | return ret; | 1298 | return ret; |
1321 | } | 1299 | } |
1322 | 1300 | ||
@@ -1359,7 +1337,9 @@ charge_cur_mm: | |||
1359 | return __mem_cgroup_try_charge(mm, mask, ptr, true); | 1337 | return __mem_cgroup_try_charge(mm, mask, ptr, true); |
1360 | } | 1338 | } |
1361 | 1339 | ||
1362 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | 1340 | static void |
1341 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | ||
1342 | enum charge_type ctype) | ||
1363 | { | 1343 | { |
1364 | struct page_cgroup *pc; | 1344 | struct page_cgroup *pc; |
1365 | 1345 | ||
@@ -1369,7 +1349,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | |||
1369 | return; | 1349 | return; |
1370 | pc = lookup_page_cgroup(page); | 1350 | pc = lookup_page_cgroup(page); |
1371 | mem_cgroup_lru_del_before_commit_swapcache(page); | 1351 | mem_cgroup_lru_del_before_commit_swapcache(page); |
1372 | __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); | 1352 | __mem_cgroup_commit_charge(ptr, pc, ctype); |
1373 | mem_cgroup_lru_add_after_commit_swapcache(page); | 1353 | mem_cgroup_lru_add_after_commit_swapcache(page); |
1374 | /* | 1354 | /* |
1375 | * Now swap is on-memory. This means this page may be | 1355 | * Now swap is on-memory. This means this page may be |
@@ -1400,6 +1380,12 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | |||
1400 | 1380 | ||
1401 | } | 1381 | } |
1402 | 1382 | ||
1383 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | ||
1384 | { | ||
1385 | __mem_cgroup_commit_charge_swapin(page, ptr, | ||
1386 | MEM_CGROUP_CHARGE_TYPE_MAPPED); | ||
1387 | } | ||
1388 | |||
1403 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) | 1389 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) |
1404 | { | 1390 | { |
1405 | if (mem_cgroup_disabled()) | 1391 | if (mem_cgroup_disabled()) |