aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-01-07 21:08:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:06 -0500
commit2c26fdd70c3094fa3e84caf9ef434911933d5477 (patch)
tree06a3bafc12f5f8fd91d9ed1fca5ea0a632ef2004
parent887007561ae58628f03aa9046949747c04f63be8 (diff)
memcg: revert gfp mask fix
My patch, memcg-fix-gfp_mask-of-callers-of-charge.patch changed gfp_mask of callers of charge to be GFP_HIGHUSER_MOVABLE for showing what will happen at memory reclaim. But in recent discussion, it's NACKed because it sounds ugly. This patch is for reverting it and add some clean up to gfp_mask of callers of charge. No behavior change but need review before generating HUNK in deep queue. This patch also adds explanation to meaning of gfp_mask passed to charge functions in memcontrol.h. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory.c10
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/swapfile.c3
6 files changed, 25 insertions, 18 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2fdd1380bf0a..59ac95a64508 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -26,6 +26,16 @@ struct page;
26struct mm_struct; 26struct mm_struct;
27 27
28#ifdef CONFIG_CGROUP_MEM_RES_CTLR 28#ifdef CONFIG_CGROUP_MEM_RES_CTLR
29/*
30 * All "charge" functions with gfp_mask should use GFP_KERNEL or
31 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
32 * alloc memory but reclaims memory from all available zones. So, "where I want
33 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
34 * available but adding a rule is better. charge functions' gfp_mask should
35 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
36 * codes.
37 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
38 */
29 39
30extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 40extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
31 gfp_t gfp_mask); 41 gfp_t gfp_mask);
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f55a1e2baf7..ceba0bd03662 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 VM_BUG_ON(!PageLocked(page)); 460 VM_BUG_ON(!PageLocked(page));
461 461
462 error = mem_cgroup_cache_charge(page, current->mm, 462 error = mem_cgroup_cache_charge(page, current->mm,
463 gfp_mask & ~__GFP_HIGHMEM); 463 gfp_mask & GFP_RECLAIM_MASK);
464 if (error) 464 if (error)
465 goto out; 465 goto out;
466 466
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9bf5d7c8ede7..b9cd57b667d6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1248 unlock_page_cgroup(pc); 1248 unlock_page_cgroup(pc);
1249 1249
1250 if (mem) { 1250 if (mem) {
1251 ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); 1251 ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1252 css_put(&mem->css); 1252 css_put(&mem->css);
1253 } 1253 }
1254 *ptr = mem; 1254 *ptr = mem;
@@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1378 break; 1378 break;
1379 1379
1380 progress = try_to_free_mem_cgroup_pages(memcg, 1380 progress = try_to_free_mem_cgroup_pages(memcg,
1381 GFP_HIGHUSER_MOVABLE, false); 1381 GFP_KERNEL, false);
1382 if (!progress) retry_count--; 1382 if (!progress) retry_count--;
1383 } 1383 }
1384 return ret; 1384 return ret;
@@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1418 break; 1418 break;
1419 1419
1420 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 1420 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1421 try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true); 1421 try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
1422 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 1422 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1423 if (curusage >= oldusage) 1423 if (curusage >= oldusage)
1424 retry_count--; 1424 retry_count--;
@@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1464 } 1464 }
1465 spin_unlock_irqrestore(&zone->lru_lock, flags); 1465 spin_unlock_irqrestore(&zone->lru_lock, flags);
1466 1466
1467 ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); 1467 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1468 if (ret == -ENOMEM) 1468 if (ret == -ENOMEM)
1469 break; 1469 break;
1470 1470
@@ -1550,7 +1550,7 @@ try_to_free:
1550 goto out; 1550 goto out;
1551 } 1551 }
1552 progress = try_to_free_mem_cgroup_pages(mem, 1552 progress = try_to_free_mem_cgroup_pages(mem,
1553 GFP_HIGHUSER_MOVABLE, false); 1553 GFP_KERNEL, false);
1554 if (!progress) { 1554 if (!progress) {
1555 nr_retries--; 1555 nr_retries--;
1556 /* maybe some writeback is necessary */ 1556 /* maybe some writeback is necessary */
diff --git a/mm/memory.c b/mm/memory.c
index 1358012ffa73..e5bfbe6b594c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2000,7 +2000,7 @@ gotten:
2000 cow_user_page(new_page, old_page, address, vma); 2000 cow_user_page(new_page, old_page, address, vma);
2001 __SetPageUptodate(new_page); 2001 __SetPageUptodate(new_page);
2002 2002
2003 if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) 2003 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2004 goto oom_free_new; 2004 goto oom_free_new;
2005 2005
2006 /* 2006 /*
@@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2431 lock_page(page); 2431 lock_page(page);
2432 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2432 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2433 2433
2434 if (mem_cgroup_try_charge_swapin(mm, page, 2434 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2435 GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
2436 ret = VM_FAULT_OOM; 2435 ret = VM_FAULT_OOM;
2437 unlock_page(page); 2436 unlock_page(page);
2438 goto out; 2437 goto out;
@@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2524 goto oom; 2523 goto oom;
2525 __SetPageUptodate(page); 2524 __SetPageUptodate(page);
2526 2525
2527 if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) 2526 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
2528 goto oom_free_page; 2527 goto oom_free_page;
2529 2528
2530 entry = mk_pte(page, vma->vm_page_prot); 2529 entry = mk_pte(page, vma->vm_page_prot);
@@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2615 ret = VM_FAULT_OOM; 2614 ret = VM_FAULT_OOM;
2616 goto out; 2615 goto out;
2617 } 2616 }
2618 if (mem_cgroup_newpage_charge(page, 2617 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
2619 mm, GFP_HIGHUSER_MOVABLE)) {
2620 ret = VM_FAULT_OOM; 2618 ret = VM_FAULT_OOM;
2621 page_cache_release(page); 2619 page_cache_release(page);
2622 goto out; 2620 goto out;
diff --git a/mm/shmem.c b/mm/shmem.c
index adf5c3eedbc9..bbb7b043c986 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -932,8 +932,8 @@ found:
932 * Charge page using GFP_HIGHUSER_MOVABLE while we can wait. 932 * Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
933 * charged back to the user(not to caller) when swap account is used. 933 * charged back to the user(not to caller) when swap account is used.
934 */ 934 */
935 error = mem_cgroup_cache_charge_swapin(page, 935 error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL,
936 current->mm, GFP_HIGHUSER_MOVABLE, true); 936 true);
937 if (error) 937 if (error)
938 goto out; 938 goto out;
939 error = radix_tree_preload(GFP_KERNEL); 939 error = radix_tree_preload(GFP_KERNEL);
@@ -1275,7 +1275,7 @@ repeat:
1275 * charge against this swap cache here. 1275 * charge against this swap cache here.
1276 */ 1276 */
1277 if (mem_cgroup_cache_charge_swapin(swappage, 1277 if (mem_cgroup_cache_charge_swapin(swappage,
1278 current->mm, gfp, false)) { 1278 current->mm, gfp & GFP_RECLAIM_MASK, false)) {
1279 page_cache_release(swappage); 1279 page_cache_release(swappage);
1280 error = -ENOMEM; 1280 error = -ENOMEM;
1281 goto failed; 1281 goto failed;
@@ -1393,7 +1393,7 @@ repeat:
1393 1393
1394 /* Precharge page while we can wait, compensate after */ 1394 /* Precharge page while we can wait, compensate after */
1395 error = mem_cgroup_cache_charge(filepage, current->mm, 1395 error = mem_cgroup_cache_charge(filepage, current->mm,
1396 GFP_HIGHUSER_MOVABLE); 1396 GFP_KERNEL);
1397 if (error) { 1397 if (error) {
1398 page_cache_release(filepage); 1398 page_cache_release(filepage);
1399 shmem_unacct_blocks(info->flags, 1); 1399 shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0579d9069b61..da422c47e2ee 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
698 pte_t *pte; 698 pte_t *pte;
699 int ret = 1; 699 int ret = 1;
700 700
701 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, 701 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
702 GFP_HIGHUSER_MOVABLE, &ptr))
703 ret = -ENOMEM; 702 ret = -ENOMEM;
704 703
705 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 704 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);