aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2018-11-29 09:07:19 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2018-12-04 03:45:01 -0500
commit3fb69c6a1a13afc5f64ef84e96d69a4be199d485 (patch)
treec2fad3fb4be61d5120dd7e7da6af2827ec77242e
parent22569b881d373153ce0a214f89920851918dd77d (diff)
powerpc/8xx: Enable 512k hugepage support with HW assistance
For using 512k pages with hardware assistance, the PTEs have to be spread every 128 bytes in the L2 table. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/hugetlb.h4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c10
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
3 files changed, 15 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index dfb8bf236586..62a0ca02ca7d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -74,7 +74,9 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
74 unsigned long idx = 0; 74 unsigned long idx = 0;
75 75
76 pte_t *dir = hugepd_page(hpd); 76 pte_t *dir = hugepd_page(hpd);
77#ifndef CONFIG_PPC_FSL_BOOK3E 77#ifdef CONFIG_PPC_8xx
78 idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
79#elif !defined(CONFIG_PPC_FSL_BOOK3E)
78 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); 80 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
79#endif 81#endif
80 82
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index bc97874d7c74..5b236621d302 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -65,6 +65,9 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
65 if (pshift >= pdshift) { 65 if (pshift >= pdshift) {
66 cachep = PGT_CACHE(PTE_T_ORDER); 66 cachep = PGT_CACHE(PTE_T_ORDER);
67 num_hugepd = 1 << (pshift - pdshift); 67 num_hugepd = 1 << (pshift - pdshift);
68 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
69 cachep = PGT_CACHE(PTE_INDEX_SIZE);
70 num_hugepd = 1;
68 } else { 71 } else {
69 cachep = PGT_CACHE(pdshift - pshift); 72 cachep = PGT_CACHE(pdshift - pshift);
70 num_hugepd = 1; 73 num_hugepd = 1;
@@ -331,6 +334,9 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
331 334
332 if (shift >= pdshift) 335 if (shift >= pdshift)
333 hugepd_free(tlb, hugepte); 336 hugepd_free(tlb, hugepte);
337 else if (IS_ENABLED(CONFIG_PPC_8xx))
338 pgtable_free_tlb(tlb, hugepte,
339 get_hugepd_cache_index(PTE_INDEX_SIZE));
334 else 340 else
335 pgtable_free_tlb(tlb, hugepte, 341 pgtable_free_tlb(tlb, hugepte,
336 get_hugepd_cache_index(pdshift - shift)); 342 get_hugepd_cache_index(pdshift - shift));
@@ -700,7 +706,9 @@ static int __init hugetlbpage_init(void)
700 * if we have pdshift and shift value same, we don't 706 * if we have pdshift and shift value same, we don't
701 * use pgt cache for hugepd. 707 * use pgt cache for hugepd.
702 */ 708 */
703 if (pdshift > shift) 709 if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
710 pgtable_cache_add(PTE_INDEX_SIZE);
711 else if (pdshift > shift)
704 pgtable_cache_add(pdshift - shift); 712 pgtable_cache_add(pdshift - shift);
705#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) 713#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
706 else 714 else
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 8ad7aab150b7..ae5d568e267f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -97,6 +97,9 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
97 .shift = 14, 97 .shift = 14,
98 }, 98 },
99#endif 99#endif
100 [MMU_PAGE_512K] = {
101 .shift = 19,
102 },
100 [MMU_PAGE_8M] = { 103 [MMU_PAGE_8M] = {
101 .shift = 23, 104 .shift = 23,
102 }, 105 },