aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-06-12 18:21:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-12 21:05:41 -0400
commitbcf8039ed45f56013c4afea5520bca7d909e5e61 (patch)
tree2c3348eb300fdd910df9e012882bd3d2f263a390 /fs
parent2165009bdf63f79716a36ad545df14c3cdf958b7 (diff)
pagemap: fix large pages in pagemap
We were walking right into huge page areas in the pagemap walker, and calling the pmds pmd_bad() and clearing them. That leaked huge pages. Bad. This patch at least works around that for now. It ignores huge pages in the pagemap walker for the time being, and won't leak those pages. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/task_mmu.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0df3109343d..ab8ccc9d14ff 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,24 +553,45 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
553 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 553 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
554} 554}
555 555
556static unsigned long pte_to_pagemap_entry(pte_t pte)
557{
558 unsigned long pme = 0;
559 if (is_swap_pte(pte))
560 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
561 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
562 else if (pte_present(pte))
563 pme = PM_PFRAME(pte_pfn(pte))
564 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
565 return pme;
566}
567
556static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 568static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
557 struct mm_walk *walk) 569 struct mm_walk *walk)
558{ 570{
571 struct vm_area_struct *vma;
559 struct pagemapread *pm = walk->private; 572 struct pagemapread *pm = walk->private;
560 pte_t *pte; 573 pte_t *pte;
561 int err = 0; 574 int err = 0;
562 575
576 /* find the first VMA at or above 'addr' */
577 vma = find_vma(walk->mm, addr);
563 for (; addr != end; addr += PAGE_SIZE) { 578 for (; addr != end; addr += PAGE_SIZE) {
564 u64 pfn = PM_NOT_PRESENT; 579 u64 pfn = PM_NOT_PRESENT;
565 pte = pte_offset_map(pmd, addr); 580
566 if (is_swap_pte(*pte)) 581 /* check to see if we've left 'vma' behind
567 pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte)) 582 * and need a new, higher one */
568 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 583 if (vma && (addr >= vma->vm_end))
569 else if (pte_present(*pte)) 584 vma = find_vma(walk->mm, addr);
570 pfn = PM_PFRAME(pte_pfn(*pte)) 585
571 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 586 /* check that 'vma' actually covers this address,
572 /* unmap so we're not in atomic when we copy to userspace */ 587 * and that it isn't a huge page vma */
573 pte_unmap(pte); 588 if (vma && (vma->vm_start <= addr) &&
589 !is_vm_hugetlb_page(vma)) {
590 pte = pte_offset_map(pmd, addr);
591 pfn = pte_to_pagemap_entry(*pte);
592 /* unmap before userspace copy */
593 pte_unmap(pte);
594 }
574 err = add_to_pagemap(addr, pfn, pm); 595 err = add_to_pagemap(addr, pfn, pm);
575 if (err) 596 if (err)
576 return err; 597 return err;