aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Tollefson <kniht@linux.vnet.ibm.com>2008-07-24 00:27:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:19 -0400
commit91224346aa8c1cdaa660300a98e0b074a3a95030 (patch)
tree7b4d9604001afd55e9a3fdb6d1a414f5f2d15214
parent658013e93eb70494f7300bc90457b09a807232a4 (diff)
powerpc: define support for 16G hugepages
The huge page size is defined for 16G pages. If a hugepagesz of 16G is specified at boot-time then it becomes the huge page size instead of the default 16M. The change in pgtable-64K.h is to the macro pte_iterate_hashed_subpages to make the increment to va (the 1 being shifted) be a long so that it is not shifted to 0. Otherwise it would create an infinite loop when the shift value is for a 16G page (when base page size is 64K). Signed-off-by: Jon Tollefson <kniht@linux.vnet.ibm.com> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/mm/hugetlbpage.c62
-rw-r--r--include/asm-powerpc/pgtable-64k.h2
2 files changed, 45 insertions, 19 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index e2a650a9e533..19b1a9cec6d5 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -24,8 +24,9 @@
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/spu.h> 25#include <asm/spu.h>
26 26
27#define HPAGE_SHIFT_64K 16 27#define PAGE_SHIFT_64K 16
28#define HPAGE_SHIFT_16M 24 28#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34
29 30
30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) 31#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) 32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
@@ -95,7 +96,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
95static inline 96static inline
96pmd_t *hpmd_offset(pud_t *pud, unsigned long addr) 97pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
97{ 98{
98 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) 99 if (HPAGE_SHIFT == PAGE_SHIFT_64K)
99 return pmd_offset(pud, addr); 100 return pmd_offset(pud, addr);
100 else 101 else
101 return (pmd_t *) pud; 102 return (pmd_t *) pud;
@@ -103,7 +104,7 @@ pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
103static inline 104static inline
104pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr) 105pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
105{ 106{
106 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) 107 if (HPAGE_SHIFT == PAGE_SHIFT_64K)
107 return pmd_alloc(mm, pud, addr); 108 return pmd_alloc(mm, pud, addr);
108 else 109 else
109 return (pmd_t *) pud; 110 return (pmd_t *) pud;
@@ -260,7 +261,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
260 continue; 261 continue;
261 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); 262 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
262#else 263#else
263 if (HPAGE_SHIFT == HPAGE_SHIFT_64K) { 264 if (HPAGE_SHIFT == PAGE_SHIFT_64K) {
264 if (pud_none_or_clear_bad(pud)) 265 if (pud_none_or_clear_bad(pud))
265 continue; 266 continue;
266 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); 267 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
@@ -592,20 +593,40 @@ void set_huge_psize(int psize)
592{ 593{
593 /* Check that it is a page size supported by the hardware and 594 /* Check that it is a page size supported by the hardware and
594 * that it fits within pagetable limits. */ 595 * that it fits within pagetable limits. */
595 if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT && 596 if (mmu_psize_defs[psize].shift &&
597 mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
596 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT || 598 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
597 mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) { 599 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
600 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
601 /* Return if huge page size is the same as the
602 * base page size. */
603 if (mmu_psize_defs[psize].shift == PAGE_SHIFT)
604 return;
605
598 HPAGE_SHIFT = mmu_psize_defs[psize].shift; 606 HPAGE_SHIFT = mmu_psize_defs[psize].shift;
599 mmu_huge_psize = psize; 607 mmu_huge_psize = psize;
600#ifdef CONFIG_PPC_64K_PAGES
601 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
602#else
603 if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
604 hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
605 else
606 hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
607#endif
608 608
609 switch (HPAGE_SHIFT) {
610 case PAGE_SHIFT_64K:
611 /* We only allow 64k hpages with 4k base page,
612 * which was checked above, and always put them
613 * at the PMD */
614 hugepte_shift = PMD_SHIFT;
615 break;
616 case PAGE_SHIFT_16M:
617 /* 16M pages can be at two different levels
618 * of pagestables based on base page size */
619 if (PAGE_SHIFT == PAGE_SHIFT_64K)
620 hugepte_shift = PMD_SHIFT;
621 else /* 4k base page */
622 hugepte_shift = PUD_SHIFT;
623 break;
624 case PAGE_SHIFT_16G:
625 /* 16G pages are always at PGD level */
626 hugepte_shift = PGDIR_SHIFT;
627 break;
628 }
629 hugepte_shift -= HPAGE_SHIFT;
609 } else 630 } else
610 HPAGE_SHIFT = 0; 631 HPAGE_SHIFT = 0;
611} 632}
@@ -621,17 +642,22 @@ static int __init hugepage_setup_sz(char *str)
621 shift = __ffs(size); 642 shift = __ffs(size);
622 switch (shift) { 643 switch (shift) {
623#ifndef CONFIG_PPC_64K_PAGES 644#ifndef CONFIG_PPC_64K_PAGES
624 case HPAGE_SHIFT_64K: 645 case PAGE_SHIFT_64K:
625 mmu_psize = MMU_PAGE_64K; 646 mmu_psize = MMU_PAGE_64K;
626 break; 647 break;
627#endif 648#endif
628 case HPAGE_SHIFT_16M: 649 case PAGE_SHIFT_16M:
629 mmu_psize = MMU_PAGE_16M; 650 mmu_psize = MMU_PAGE_16M;
630 break; 651 break;
652 case PAGE_SHIFT_16G:
653 mmu_psize = MMU_PAGE_16G;
654 break;
631 } 655 }
632 656
633 if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift) 657 if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift) {
634 set_huge_psize(mmu_psize); 658 set_huge_psize(mmu_psize);
659 hugetlb_add_hstate(shift - PAGE_SHIFT);
660 }
635 else 661 else
636 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); 662 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
637 663
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index c5007712473f..7e54adb35596 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -138,7 +138,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
138 unsigned __split = (psize == MMU_PAGE_4K || \ 138 unsigned __split = (psize == MMU_PAGE_4K || \
139 psize == MMU_PAGE_64K_AP); \ 139 psize == MMU_PAGE_64K_AP); \
140 shift = mmu_psize_defs[psize].shift; \ 140 shift = mmu_psize_defs[psize].shift; \
141 for (index = 0; va < __end; index++, va += (1 << shift)) { \ 141 for (index = 0; va < __end; index++, va += (1L << shift)) { \
142 if (!__split || __rpte_sub_valid(rpte, index)) do { \ 142 if (!__split || __rpte_sub_valid(rpte, index)) do { \
143 143
144#define pte_iterate_hashed_end() } while(0); } } while(0) 144#define pte_iterate_hashed_end() } while(0); } } while(0)