aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2015-02-11 18:27:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:06 -0500
commit26bcd64aa9a4ded25f0dd1848759081422a14d80 (patch)
treeca6049db2d65e2ca3ef55836b7172aeea0d75b42 /mm/memcontrol.c
parentd85f4d6d3bfe3b82e2903ac51a2f837eab7115d7 (diff)
memcg: cleanup preparation for page table walk
pagewalk.c can handle vma in itself, so we don't have to pass vma via walk->private. And both of mem_cgroup_count_precharge() and mem_cgroup_move_charge() do for each vma loop themselves, but now it's done in pagewalk.c, so let's clean up them. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c49
1 files changed, 16 insertions, 33 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c7a9cb627180..095c1f96fbec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4839,7 +4839,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4839 unsigned long addr, unsigned long end, 4839 unsigned long addr, unsigned long end,
4840 struct mm_walk *walk) 4840 struct mm_walk *walk)
4841{ 4841{
4842 struct vm_area_struct *vma = walk->private; 4842 struct vm_area_struct *vma = walk->vma;
4843 pte_t *pte; 4843 pte_t *pte;
4844 spinlock_t *ptl; 4844 spinlock_t *ptl;
4845 4845
@@ -4865,20 +4865,13 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4865static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4865static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4866{ 4866{
4867 unsigned long precharge; 4867 unsigned long precharge;
4868 struct vm_area_struct *vma;
4869 4868
4869 struct mm_walk mem_cgroup_count_precharge_walk = {
4870 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4871 .mm = mm,
4872 };
4870 down_read(&mm->mmap_sem); 4873 down_read(&mm->mmap_sem);
4871 for (vma = mm->mmap; vma; vma = vma->vm_next) { 4874 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4872 struct mm_walk mem_cgroup_count_precharge_walk = {
4873 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4874 .mm = mm,
4875 .private = vma,
4876 };
4877 if (is_vm_hugetlb_page(vma))
4878 continue;
4879 walk_page_range(vma->vm_start, vma->vm_end,
4880 &mem_cgroup_count_precharge_walk);
4881 }
4882 up_read(&mm->mmap_sem); 4875 up_read(&mm->mmap_sem);
4883 4876
4884 precharge = mc.precharge; 4877 precharge = mc.precharge;
@@ -5011,7 +5004,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5011 struct mm_walk *walk) 5004 struct mm_walk *walk)
5012{ 5005{
5013 int ret = 0; 5006 int ret = 0;
5014 struct vm_area_struct *vma = walk->private; 5007 struct vm_area_struct *vma = walk->vma;
5015 pte_t *pte; 5008 pte_t *pte;
5016 spinlock_t *ptl; 5009 spinlock_t *ptl;
5017 enum mc_target_type target_type; 5010 enum mc_target_type target_type;
@@ -5107,7 +5100,10 @@ put: /* get_mctgt_type() gets the page */
5107 5100
5108static void mem_cgroup_move_charge(struct mm_struct *mm) 5101static void mem_cgroup_move_charge(struct mm_struct *mm)
5109{ 5102{
5110 struct vm_area_struct *vma; 5103 struct mm_walk mem_cgroup_move_charge_walk = {
5104 .pmd_entry = mem_cgroup_move_charge_pte_range,
5105 .mm = mm,
5106 };
5111 5107
5112 lru_add_drain_all(); 5108 lru_add_drain_all();
5113 /* 5109 /*
@@ -5130,24 +5126,11 @@ retry:
5130 cond_resched(); 5126 cond_resched();
5131 goto retry; 5127 goto retry;
5132 } 5128 }
5133 for (vma = mm->mmap; vma; vma = vma->vm_next) { 5129 /*
5134 int ret; 5130 * When we have consumed all precharges and failed in doing
5135 struct mm_walk mem_cgroup_move_charge_walk = { 5131 * additional charge, the page walk just aborts.
5136 .pmd_entry = mem_cgroup_move_charge_pte_range, 5132 */
5137 .mm = mm, 5133 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5138 .private = vma,
5139 };
5140 if (is_vm_hugetlb_page(vma))
5141 continue;
5142 ret = walk_page_range(vma->vm_start, vma->vm_end,
5143 &mem_cgroup_move_charge_walk);
5144 if (ret)
5145 /*
5146 * means we have consumed all precharges and failed in
5147 * doing additional charge. Just abandon here.
5148 */
5149 break;
5150 }
5151 up_read(&mm->mmap_sem); 5134 up_read(&mm->mmap_sem);
5152 atomic_dec(&mc.from->moving_account); 5135 atomic_dec(&mc.from->moving_account);
5153} 5136}