aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@parallels.com>2013-07-03 18:01:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:25 -0400
commitaf9de7eb180fa9b74c2cdc256349304a58c63c02 (patch)
treee67c5d5b89ffff70a96d45b3d95cb5f9df39e65b /fs/proc
parent040fa02077de01c7e08fa75be6125e4ca5636011 (diff)
clear_refs: introduce private struct for mm_walk
In the next patch the clear-refs-type will be required in clear_refs_pte_range funciton, so prepare the walk->private to carry this info. Signed-off-by: Pavel Emelyanov <xemul@parallels.com> Cc: Matt Mackall <mpm@selenic.com> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dad0809db551..ef6f6c62dfee 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -695,10 +695,15 @@ enum clear_refs_types {
695 CLEAR_REFS_LAST, 695 CLEAR_REFS_LAST,
696}; 696};
697 697
698struct clear_refs_private {
699 struct vm_area_struct *vma;
700};
701
698static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 702static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
699 unsigned long end, struct mm_walk *walk) 703 unsigned long end, struct mm_walk *walk)
700{ 704{
701 struct vm_area_struct *vma = walk->private; 705 struct clear_refs_private *cp = walk->private;
706 struct vm_area_struct *vma = cp->vma;
702 pte_t *pte, ptent; 707 pte_t *pte, ptent;
703 spinlock_t *ptl; 708 spinlock_t *ptl;
704 struct page *page; 709 struct page *page;
@@ -753,13 +758,16 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
753 return -ESRCH; 758 return -ESRCH;
754 mm = get_task_mm(task); 759 mm = get_task_mm(task);
755 if (mm) { 760 if (mm) {
761 struct clear_refs_private cp = {
762 };
756 struct mm_walk clear_refs_walk = { 763 struct mm_walk clear_refs_walk = {
757 .pmd_entry = clear_refs_pte_range, 764 .pmd_entry = clear_refs_pte_range,
758 .mm = mm, 765 .mm = mm,
766 .private = &cp,
759 }; 767 };
760 down_read(&mm->mmap_sem); 768 down_read(&mm->mmap_sem);
761 for (vma = mm->mmap; vma; vma = vma->vm_next) { 769 for (vma = mm->mmap; vma; vma = vma->vm_next) {
762 clear_refs_walk.private = vma; 770 cp.vma = vma;
763 if (is_vm_hugetlb_page(vma)) 771 if (is_vm_hugetlb_page(vma))
764 continue; 772 continue;
765 /* 773 /*