aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-07 11:00:45 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-08-13 16:25:52 -0400
commita0bd12d718b6f9a19f95bf892021cff9438044c4 (patch)
tree08a280b13c2871e3e2edac5db1c71324d6cf9725
parent6b940606d9919616338ede415b48917d4a5bcf73 (diff)
tile: fix some issues in hugepage support
First, in huge_pte_offset(), we were erroneously checking pgd_present(), which is always true, rather than pud_present(), which is the thing that tells us if there is a top-level (L0) PTE. Fixing this means we properly look up huge page entries only when the Present bit is actually set in the PTE. Second, use the standard pte_alloc_map() instead of the hand-rolled pte_alloc_hugetlb() routine that basically was written to avoid worrying about CONFIG_HIGHPTE. However, we no longer plan to support HIGHPTE, so a separate routine was just unnecessary code duplication. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
-rw-r--r--arch/tile/mm/hugetlbpage.c38
1 files changed, 3 insertions, 35 deletions
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 650ccff8378c..e514899e1100 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -49,38 +49,6 @@ int huge_shift[HUGE_SHIFT_ENTRIES] = {
49#endif 49#endif
50}; 50};
51 51
52/*
53 * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
54 * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
55 * It locks the user pagetable, and bumps up the mm->nr_ptes field,
56 * but otherwise allocate the page table using the kernel versions.
57 */
58static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
59 unsigned long address)
60{
61 pte_t *new;
62
63 if (pmd_none(*pmd)) {
64 new = pte_alloc_one_kernel(mm, address);
65 if (!new)
66 return NULL;
67
68 smp_wmb(); /* See comment in __pte_alloc */
69
70 spin_lock(&mm->page_table_lock);
71 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
72 mm->nr_ptes++;
73 pmd_populate_kernel(mm, pmd, new);
74 new = NULL;
75 } else
76 VM_BUG_ON(pmd_trans_splitting(*pmd));
77 spin_unlock(&mm->page_table_lock);
78 if (new)
79 pte_free_kernel(mm, new);
80 }
81
82 return pte_offset_kernel(pmd, address);
83}
84#endif 52#endif
85 53
86pte_t *huge_pte_alloc(struct mm_struct *mm, 54pte_t *huge_pte_alloc(struct mm_struct *mm,
@@ -109,7 +77,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
109 else { 77 else {
110 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE]) 78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
111 panic("Unexpected page size %#lx\n", sz); 79 panic("Unexpected page size %#lx\n", sz);
112 return pte_alloc_hugetlb(mm, pmd, addr); 80 return pte_alloc_map(mm, NULL, pmd, addr);
113 } 81 }
114 } 82 }
115#else 83#else
@@ -144,14 +112,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
144 112
145 /* Get the top-level page table entry. */ 113 /* Get the top-level page table entry. */
146 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0); 114 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
147 if (!pgd_present(*pgd))
148 return NULL;
149 115
150 /* We don't have four levels. */ 116 /* We don't have four levels. */
151 pud = pud_offset(pgd, addr); 117 pud = pud_offset(pgd, addr);
152#ifndef __PAGETABLE_PUD_FOLDED 118#ifndef __PAGETABLE_PUD_FOLDED
153# error support fourth page table level 119# error support fourth page table level
154#endif 120#endif
121 if (!pud_present(*pud))
122 return NULL;
155 123
156 /* Check for an L0 huge PTE, if we have three levels. */ 124 /* Check for an L0 huge PTE, if we have three levels. */
157#ifndef __PAGETABLE_PMD_FOLDED 125#ifndef __PAGETABLE_PMD_FOLDED