aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-01-07 21:07:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:04 -0500
commitbced0520fe462bb94021dcabd32e99630c171be2 (patch)
tree6fa234f4a25bc8231742aea13e7cc2664b0a69a6 /mm
parent7a81b88cb53e335ff7d019e6398c95792c817d93 (diff)
memcg: fix gfp_mask of callers of charge
Fix misuse of gfp_kernel. Now, most of callers of mem_cgroup_charge_xxx functions uses GFP_KERNEL. I think that this is from the fact that page_cgroup *was* dynamically allocated. But now, we allocate all page_cgroup at boot. And mem_cgroup_try_to_free_pages() reclaim memory from GFP_HIGHUSER_MOVABLE + specified GFP_RECLAIM_MASK. * This is because we just want to reduce memory usage. "Where we should reclaim from ?" is not a problem in memcg. This patch modifies gfp masks to be GFP_HIGUSER_MOVABLE if possible. Note: This patch is not for fixing behavior but for showing sane information in source code. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/memory.c9
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/swapfile.c2
4 files changed, 14 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f568b1964551..c34eb52bdc3f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -808,8 +808,9 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
808 } 808 }
809 unlock_page_cgroup(pc); 809 unlock_page_cgroup(pc);
810 if (mem) { 810 if (mem) {
811 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, 811 ret = mem_cgroup_charge_common(newpage, NULL,
812 ctype, mem); 812 GFP_HIGHUSER_MOVABLE,
813 ctype, mem);
813 css_put(&mem->css); 814 css_put(&mem->css);
814 } 815 }
815 return ret; 816 return ret;
@@ -889,7 +890,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
889 ret = -EBUSY; 890 ret = -EBUSY;
890 break; 891 break;
891 } 892 }
892 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); 893 progress = try_to_free_mem_cgroup_pages(memcg,
894 GFP_HIGHUSER_MOVABLE);
893 if (!progress) 895 if (!progress)
894 retry_count--; 896 retry_count--;
895 } 897 }
diff --git a/mm/memory.c b/mm/memory.c
index 7f210f160990..ba5189e322e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2000,7 +2000,7 @@ gotten:
2000 cow_user_page(new_page, old_page, address, vma); 2000 cow_user_page(new_page, old_page, address, vma);
2001 __SetPageUptodate(new_page); 2001 __SetPageUptodate(new_page);
2002 2002
2003 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) 2003 if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
2004 goto oom_free_new; 2004 goto oom_free_new;
2005 2005
2006 /* 2006 /*
@@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2431 lock_page(page); 2431 lock_page(page);
2432 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2432 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2433 2433
2434 if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) { 2434 if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
2435 ret = VM_FAULT_OOM; 2435 ret = VM_FAULT_OOM;
2436 unlock_page(page); 2436 unlock_page(page);
2437 goto out; 2437 goto out;
@@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2512 goto oom; 2512 goto oom;
2513 __SetPageUptodate(page); 2513 __SetPageUptodate(page);
2514 2514
2515 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) 2515 if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
2516 goto oom_free_page; 2516 goto oom_free_page;
2517 2517
2518 entry = mk_pte(page, vma->vm_page_prot); 2518 entry = mk_pte(page, vma->vm_page_prot);
@@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2603 ret = VM_FAULT_OOM; 2603 ret = VM_FAULT_OOM;
2604 goto out; 2604 goto out;
2605 } 2605 }
2606 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { 2606 if (mem_cgroup_newpage_charge(page,
2607 mm, GFP_HIGHUSER_MOVABLE)) {
2607 ret = VM_FAULT_OOM; 2608 ret = VM_FAULT_OOM;
2608 page_cache_release(page); 2609 page_cache_release(page);
2609 goto out; 2610 goto out;
diff --git a/mm/shmem.c b/mm/shmem.c
index 5941f9801363..bd9b4ea307b2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -928,8 +928,8 @@ found:
928 error = 1; 928 error = 1;
929 if (!inode) 929 if (!inode)
930 goto out; 930 goto out;
931 /* Precharge page using GFP_KERNEL while we can wait */ 931 /* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */
932 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 932 error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE);
933 if (error) 933 if (error)
934 goto out; 934 goto out;
935 error = radix_tree_preload(GFP_KERNEL); 935 error = radix_tree_preload(GFP_KERNEL);
@@ -1379,7 +1379,7 @@ repeat:
1379 1379
1380 /* Precharge page while we can wait, compensate after */ 1380 /* Precharge page while we can wait, compensate after */
1381 error = mem_cgroup_cache_charge(filepage, current->mm, 1381 error = mem_cgroup_cache_charge(filepage, current->mm,
1382 gfp & ~__GFP_HIGHMEM); 1382 GFP_HIGHUSER_MOVABLE);
1383 if (error) { 1383 if (error) {
1384 page_cache_release(filepage); 1384 page_cache_release(filepage);
1385 shmem_unacct_blocks(info->flags, 1); 1385 shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fb926efb5167..ddc6d92be2cb 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -695,7 +695,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
695 pte_t *pte; 695 pte_t *pte;
696 int ret = 1; 696 int ret = 1;
697 697
698 if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr)) 698 if (mem_cgroup_try_charge(vma->vm_mm, GFP_HIGHUSER_MOVABLE, &ptr))
699 ret = -ENOMEM; 699 ret = -ENOMEM;
700 700
701 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 701 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);