aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorBecky Bruce <beckyb@kernel.crashing.org>2011-10-10 06:50:39 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-12-07 00:26:22 -0500
commita1cd54198811e3c35fcaabdb94767b307f7ad1db (patch)
tree025b16589cab83d330fb2ae436dcdf70da813eac /arch/powerpc/mm/hugetlbpage.c
parent8c1674de2b42d9d9f14a32445055aa525892c708 (diff)
powerpc: Update hugetlb huge_pte_alloc and tablewalk code for FSL BOOKE
This updates the hugetlb page table code to handle 64-bit FSL_BOOKE. The previous 32-bit work counted on the inner levels of the page table collapsing. Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c48
1 files changed, 42 insertions, 6 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6b1cf6478851..96178e8fb046 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -155,11 +155,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
155 hpdp->pd = 0; 155 hpdp->pd = 0;
156 kmem_cache_free(cachep, new); 156 kmem_cache_free(cachep, new);
157 } 157 }
158#else
159 if (!hugepd_none(*hpdp))
160 kmem_cache_free(cachep, new);
161 else
162 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
158#endif 163#endif
159 spin_unlock(&mm->page_table_lock); 164 spin_unlock(&mm->page_table_lock);
160 return 0; 165 return 0;
161} 166}
162 167
168/*
169 * These macros define how to determine which level of the page table holds
170 * the hpdp.
171 */
172#ifdef CONFIG_PPC_FSL_BOOK3E
173#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
174#define HUGEPD_PUD_SHIFT PUD_SHIFT
175#else
176#define HUGEPD_PGD_SHIFT PUD_SHIFT
177#define HUGEPD_PUD_SHIFT PMD_SHIFT
178#endif
179
163pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) 180pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
164{ 181{
165 pgd_t *pg; 182 pgd_t *pg;
@@ -172,12 +189,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
172 addr &= ~(sz-1); 189 addr &= ~(sz-1);
173 190
174 pg = pgd_offset(mm, addr); 191 pg = pgd_offset(mm, addr);
175 if (pshift >= PUD_SHIFT) { 192
193 if (pshift >= HUGEPD_PGD_SHIFT) {
176 hpdp = (hugepd_t *)pg; 194 hpdp = (hugepd_t *)pg;
177 } else { 195 } else {
178 pdshift = PUD_SHIFT; 196 pdshift = PUD_SHIFT;
179 pu = pud_alloc(mm, pg, addr); 197 pu = pud_alloc(mm, pg, addr);
180 if (pshift >= PMD_SHIFT) { 198 if (pshift >= HUGEPD_PUD_SHIFT) {
181 hpdp = (hugepd_t *)pu; 199 hpdp = (hugepd_t *)pu;
182 } else { 200 } else {
183 pdshift = PMD_SHIFT; 201 pdshift = PMD_SHIFT;
@@ -453,14 +471,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
453 unsigned long start; 471 unsigned long start;
454 472
455 start = addr; 473 start = addr;
456 pmd = pmd_offset(pud, addr);
457 do { 474 do {
475 pmd = pmd_offset(pud, addr);
458 next = pmd_addr_end(addr, end); 476 next = pmd_addr_end(addr, end);
459 if (pmd_none(*pmd)) 477 if (pmd_none(*pmd))
460 continue; 478 continue;
479#ifdef CONFIG_PPC_FSL_BOOK3E
480 /*
481 * Increment next by the size of the huge mapping since
482 * there may be more than one entry at this level for a
483 * single hugepage, but all of them point to
484 * the same kmem cache that holds the hugepte.
485 */
486 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
487#endif
461 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, 488 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
462 addr, next, floor, ceiling); 489 addr, next, floor, ceiling);
463 } while (pmd++, addr = next, addr != end); 490 } while (addr = next, addr != end);
464 491
465 start &= PUD_MASK; 492 start &= PUD_MASK;
466 if (start < floor) 493 if (start < floor)
@@ -487,8 +514,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
487 unsigned long start; 514 unsigned long start;
488 515
489 start = addr; 516 start = addr;
490 pud = pud_offset(pgd, addr);
491 do { 517 do {
518 pud = pud_offset(pgd, addr);
492 next = pud_addr_end(addr, end); 519 next = pud_addr_end(addr, end);
493 if (!is_hugepd(pud)) { 520 if (!is_hugepd(pud)) {
494 if (pud_none_or_clear_bad(pud)) 521 if (pud_none_or_clear_bad(pud))
@@ -496,10 +523,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
496 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 523 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
497 ceiling); 524 ceiling);
498 } else { 525 } else {
526#ifdef CONFIG_PPC_FSL_BOOK3E
527 /*
528 * Increment next by the size of the huge mapping since
529 * there may be more than one entry at this level for a
530 * single hugepage, but all of them point to
531 * the same kmem cache that holds the hugepte.
532 */
533 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
534#endif
499 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, 535 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
500 addr, next, floor, ceiling); 536 addr, next, floor, ceiling);
501 } 537 }
502 } while (pud++, addr = next, addr != end); 538 } while (addr = next, addr != end);
503 539
504 start &= PGDIR_MASK; 540 start &= PGDIR_MASK;
505 if (start < floor) 541 if (start < floor)