aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2005-09-03 18:55:00 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:05:46 -0400
commit7bf07f3d4b4358aa6d99a26d7a0165f1e91c3fcc (patch)
tree150e1f1172e3a7912b37bef7b06a657d47bc1657
parent32e51a8c976fc72c3e9bcece9767d9908816bf8e (diff)
[PATCH] hugetlb: move stale pte check into huge_pte_alloc()
Initial Post (Wed, 17 Aug 2005) This patch moves the if (! pte_none(*pte)) hugetlb_clean_stale_pgtable(pte); logic into huge_pte_alloc() so all of its callers can be immune to the bug described by Kenneth Chen at http://lkml.org/lkml/2004/6/16/246 > It turns out there is a bug in hugetlb_prefault(): with 3 level page table, > huge_pte_alloc() might return a pmd that points to a PTE page. It happens > if the virtual address for hugetlb mmap is recycled from previously used > normal page mmap. free_pgtables() might not scrub the pmd entry on > munmap and hugetlb_prefault skips on any pmd presence regardless what type > it is. Unless I am missing something, it seems more correct to place the check inside huge_pte_alloc() to prevent a the same bug wherever a huge pte is allocated. It also allows checking for this condition when lazily faulting huge pages later in the series. Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: <linux-mm@kvack.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/mm/hugetlbpage.c13
-rw-r--r--mm/hugetlb.c2
2 files changed, 11 insertions, 4 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 3b099f32b948..57c486f0e896 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -22,12 +22,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22{ 22{
23 pgd_t *pgd; 23 pgd_t *pgd;
24 pud_t *pud; 24 pud_t *pud;
25 pmd_t *pmd = NULL; 25 pmd_t *pmd;
26 pte_t *pte = NULL;
26 27
27 pgd = pgd_offset(mm, addr); 28 pgd = pgd_offset(mm, addr);
28 pud = pud_alloc(mm, pgd, addr); 29 pud = pud_alloc(mm, pgd, addr);
29 pmd = pmd_alloc(mm, pud, addr); 30 pmd = pmd_alloc(mm, pud, addr);
30 return (pte_t *) pmd; 31
32 if (!pmd)
33 goto out;
34
35 pte = (pte_t *) pmd;
36 if (!pte_none(*pte) && !pte_huge(*pte))
37 hugetlb_clean_stale_pgtable(pte);
38out:
39 return pte;
31} 40}
32 41
33pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 42pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6bf720bc662c..901ac523a1c3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -360,8 +360,6 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
360 ret = -ENOMEM; 360 ret = -ENOMEM;
361 goto out; 361 goto out;
362 } 362 }
363 if (! pte_none(*pte))
364 hugetlb_clean_stale_pgtable(pte);
365 363
366 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) 364 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
367 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 365 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));