diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2009-01-07 21:07:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:04 -0500 |
commit | bced0520fe462bb94021dcabd32e99630c171be2 (patch) | |
tree | 6fa234f4a25bc8231742aea13e7cc2664b0a69a6 /mm/memory.c | |
parent | 7a81b88cb53e335ff7d019e6398c95792c817d93 (diff) |
memcg: fix gfp_mask of callers of charge
Fix misuse of gfp_kernel.
Now, most of callers of mem_cgroup_charge_xxx functions uses GFP_KERNEL.
I think that this is from the fact that page_cgroup *was* dynamically
allocated.
But now, we allocate all page_cgroup at boot. And
mem_cgroup_try_to_free_pages() reclaim memory from GFP_HIGHUSER_MOVABLE +
specified GFP_RECLAIM_MASK.
* This is because we just want to reduce memory usage.
"Where we should reclaim from ?" is not a problem in memcg.
This patch modifies gfp masks to be GFP_HIGUSER_MOVABLE if possible.
Note: This patch is not for fixing behavior but for showing sane information
in source code.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c index 7f210f160990..ba5189e322e6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2000,7 +2000,7 @@ gotten: | |||
2000 | cow_user_page(new_page, old_page, address, vma); | 2000 | cow_user_page(new_page, old_page, address, vma); |
2001 | __SetPageUptodate(new_page); | 2001 | __SetPageUptodate(new_page); |
2002 | 2002 | ||
2003 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) | 2003 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) |
2004 | goto oom_free_new; | 2004 | goto oom_free_new; |
2005 | 2005 | ||
2006 | /* | 2006 | /* |
@@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2431 | lock_page(page); | 2431 | lock_page(page); |
2432 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2432 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
2433 | 2433 | ||
2434 | if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) { | 2434 | if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { |
2435 | ret = VM_FAULT_OOM; | 2435 | ret = VM_FAULT_OOM; |
2436 | unlock_page(page); | 2436 | unlock_page(page); |
2437 | goto out; | 2437 | goto out; |
@@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2512 | goto oom; | 2512 | goto oom; |
2513 | __SetPageUptodate(page); | 2513 | __SetPageUptodate(page); |
2514 | 2514 | ||
2515 | if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) | 2515 | if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) |
2516 | goto oom_free_page; | 2516 | goto oom_free_page; |
2517 | 2517 | ||
2518 | entry = mk_pte(page, vma->vm_page_prot); | 2518 | entry = mk_pte(page, vma->vm_page_prot); |
@@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2603 | ret = VM_FAULT_OOM; | 2603 | ret = VM_FAULT_OOM; |
2604 | goto out; | 2604 | goto out; |
2605 | } | 2605 | } |
2606 | if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { | 2606 | if (mem_cgroup_newpage_charge(page, |
2607 | mm, GFP_HIGHUSER_MOVABLE)) { | ||
2607 | ret = VM_FAULT_OOM; | 2608 | ret = VM_FAULT_OOM; |
2608 | page_cache_release(page); | 2609 | page_cache_release(page); |
2609 | goto out; | 2610 | goto out; |