diff options
author | Chen, Kenneth W <kenneth.w.chen@intel.com> | 2005-09-03 18:55:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:05:46 -0400 |
commit | 0e5c9f39f64d8a55c5db37a5ea43e37d3422fd92 (patch) | |
tree | 2b7da9a3813f1ce475d276d55243b2675b90349b /arch/i386/mm/hugetlbpage.c | |
parent | 02b0ccef903e85673ead74ddb7c431f2f7ce183d (diff) |
[PATCH] remove hugetlb_clean_stale_pgtable() and fix huge_pte_alloc()
I don't think we need to call hugetlb_clean_stale_pgtable() anymore
in 2.6.13 because of the rework with free_pgtables(). It now collect
all the pte page at the time of munmap. It used to only collect page
table pages when entire one pgd can be freed and left with staled pte
pages. Not anymore with 2.6.13. This function will never be called
and We should turn it into a BUG_ON.
I also spotted two problems here, not Adam's fault :-)
(1) in huge_pte_alloc(), it looks like a bug to me that pud is not
checked before calling pmd_alloc()
(2) in hugetlb_clean_stale_pgtable(), it also missed a call to
pmd_free_tlb. I think a tlb flush is required to flush the mapping
for the page table itself when we clear out the pmd pointing to a
pte page. However, since hugetlb_clean_stale_pgtable() is never
called, so it won't trigger the bug.
Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Cc: Adam Litke <agl@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/mm/hugetlbpage.c')
-rw-r--r-- | arch/i386/mm/hugetlbpage.c | 23 |
1 files changed, 3 insertions, 20 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 24c8a536b588..d524127c9afc 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c | |||
@@ -22,20 +22,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | |||
22 | { | 22 | { |
23 | pgd_t *pgd; | 23 | pgd_t *pgd; |
24 | pud_t *pud; | 24 | pud_t *pud; |
25 | pmd_t *pmd; | ||
26 | pte_t *pte = NULL; | 25 | pte_t *pte = NULL; |
27 | 26 | ||
28 | pgd = pgd_offset(mm, addr); | 27 | pgd = pgd_offset(mm, addr); |
29 | pud = pud_alloc(mm, pgd, addr); | 28 | pud = pud_alloc(mm, pgd, addr); |
30 | pmd = pmd_alloc(mm, pud, addr); | 29 | if (pud) |
30 | pte = (pte_t *) pmd_alloc(mm, pud, addr); | ||
31 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | ||
31 | 32 | ||
32 | if (!pmd) | ||
33 | goto out; | ||
34 | |||
35 | pte = (pte_t *) pmd; | ||
36 | if (!pte_none(*pte) && !pte_huge(*pte)) | ||
37 | hugetlb_clean_stale_pgtable(pte); | ||
38 | out: | ||
39 | return pte; | 33 | return pte; |
40 | } | 34 | } |
41 | 35 | ||
@@ -130,17 +124,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
130 | } | 124 | } |
131 | #endif | 125 | #endif |
132 | 126 | ||
133 | void hugetlb_clean_stale_pgtable(pte_t *pte) | ||
134 | { | ||
135 | pmd_t *pmd = (pmd_t *) pte; | ||
136 | struct page *page; | ||
137 | |||
138 | page = pmd_page(*pmd); | ||
139 | pmd_clear(pmd); | ||
140 | dec_page_state(nr_page_table_pages); | ||
141 | page_cache_release(page); | ||
142 | } | ||
143 | |||
144 | /* x86_64 also uses this file */ | 127 | /* x86_64 also uses this file */ |
145 | 128 | ||
146 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 129 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |