aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/include/asm/hugetlb.h38
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h7
-rw-r--r--arch/powerpc/include/asm/page_64.h2
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c21
-rw-r--r--arch/powerpc/mm/hugetlbpage.c116
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S36
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
14 files changed, 155 insertions, 87 deletions
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index f087de6ec03..8ff630267b5 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -155,6 +155,7 @@ CONFIG_VFAT_FS=y
155CONFIG_NTFS_FS=y 155CONFIG_NTFS_FS=y
156CONFIG_PROC_KCORE=y 156CONFIG_PROC_KCORE=y
157CONFIG_TMPFS=y 157CONFIG_TMPFS=y
158CONFIG_HUGETLBFS=y
158CONFIG_JFFS2_FS=y 159CONFIG_JFFS2_FS=y
159CONFIG_CRAMFS=y 160CONFIG_CRAMFS=y
160CONFIG_NFS_FS=y 161CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 782822c32d1..53741f40207 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -81,6 +81,7 @@ CONFIG_EXT3_FS=y
81# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 81# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
82CONFIG_PROC_KCORE=y 82CONFIG_PROC_KCORE=y
83CONFIG_TMPFS=y 83CONFIG_TMPFS=y
84CONFIG_HUGETLBFS=y
84# CONFIG_MISC_FILESYSTEMS is not set 85# CONFIG_MISC_FILESYSTEMS is not set
85CONFIG_PARTITION_ADVANCED=y 86CONFIG_PARTITION_ADVANCED=y
86CONFIG_MAC_PARTITION=y 87CONFIG_MAC_PARTITION=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index a1e5a178a4a..542eaa11397 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -182,6 +182,7 @@ CONFIG_VFAT_FS=y
182CONFIG_NTFS_FS=y 182CONFIG_NTFS_FS=y
183CONFIG_PROC_KCORE=y 183CONFIG_PROC_KCORE=y
184CONFIG_TMPFS=y 184CONFIG_TMPFS=y
185CONFIG_HUGETLBFS=y
185CONFIG_ADFS_FS=m 186CONFIG_ADFS_FS=m
186CONFIG_AFFS_FS=m 187CONFIG_AFFS_FS=m
187CONFIG_HFS_FS=m 188CONFIG_HFS_FS=m
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index dd1e41386c4..c0a957429f9 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -183,6 +183,7 @@ CONFIG_VFAT_FS=y
183CONFIG_NTFS_FS=y 183CONFIG_NTFS_FS=y
184CONFIG_PROC_KCORE=y 184CONFIG_PROC_KCORE=y
185CONFIG_TMPFS=y 185CONFIG_TMPFS=y
186CONFIG_HUGETLBFS=y
186CONFIG_ADFS_FS=m 187CONFIG_ADFS_FS=m
187CONFIG_AFFS_FS=m 188CONFIG_AFFS_FS=m
188CONFIG_HFS_FS=m 189CONFIG_HFS_FS=m
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 86004930a78..dfdb95bc59a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -5,7 +5,6 @@
5#include <asm/page.h> 5#include <asm/page.h>
6 6
7extern struct kmem_cache *hugepte_cache; 7extern struct kmem_cache *hugepte_cache;
8extern void __init reserve_hugetlb_gpages(void);
9 8
10static inline pte_t *hugepd_page(hugepd_t hpd) 9static inline pte_t *hugepd_page(hugepd_t hpd)
11{ 10{
@@ -22,14 +21,14 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
22 unsigned pdshift) 21 unsigned pdshift)
23{ 22{
24 /* 23 /*
25 * On 32-bit, we have multiple higher-level table entries that point to 24 * On FSL BookE, we have multiple higher-level table entries that
26 * the same hugepte. Just use the first one since they're all 25 * point to the same hugepte. Just use the first one since they're all
27 * identical. So for that case, idx=0. 26 * identical. So for that case, idx=0.
28 */ 27 */
29 unsigned long idx = 0; 28 unsigned long idx = 0;
30 29
31 pte_t *dir = hugepd_page(*hpdp); 30 pte_t *dir = hugepd_page(*hpdp);
32#ifdef CONFIG_PPC64 31#ifndef CONFIG_PPC_FSL_BOOK3E
33 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); 32 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
34#endif 33#endif
35 34
@@ -53,7 +52,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
53} 52}
54#endif 53#endif
55 54
56void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte); 55void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
56 pte_t pte);
57void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 57void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
58 58
59void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 59void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
@@ -124,7 +124,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
124 unsigned long addr, pte_t *ptep, 124 unsigned long addr, pte_t *ptep,
125 pte_t pte, int dirty) 125 pte_t pte, int dirty)
126{ 126{
127#ifdef HUGETLB_NEED_PRELOAD
128 /*
129 * The "return 1" forces a call of update_mmu_cache, which will write a
130 * TLB entry. Without this, platforms that don't do a write of the TLB
131 * entry in the TLB miss handler asm will fault ad infinitum.
132 */
133 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
134 return 1;
135#else
127 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 136 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
137#endif
128} 138}
129 139
130static inline pte_t huge_ptep_get(pte_t *ptep) 140static inline pte_t huge_ptep_get(pte_t *ptep)
@@ -142,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page)
142} 152}
143 153
144#else /* ! CONFIG_HUGETLB_PAGE */ 154#else /* ! CONFIG_HUGETLB_PAGE */
145static inline void reserve_hugetlb_gpages(void)
146{
147 pr_err("Cannot reserve gpages without hugetlb enabled\n");
148}
149static inline void flush_hugetlb_page(struct vm_area_struct *vma, 155static inline void flush_hugetlb_page(struct vm_area_struct *vma,
150 unsigned long vmaddr) 156 unsigned long vmaddr)
151{ 157{
152} 158}
159#endif /* CONFIG_HUGETLB_PAGE */
160
161
162/*
163 * FSL Book3E platforms require special gpage handling - the gpages
164 * are reserved early in the boot process by memblock instead of via
165 * the .dts as on IBM platforms.
166 */
167#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
168extern void __init reserve_hugetlb_gpages(void);
169#else
170static inline void reserve_hugetlb_gpages(void)
171{
172}
153#endif 173#endif
154 174
155#endif /* _ASM_POWERPC_HUGETLB_H */ 175#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 50210b9b014..f5f89cafebd 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -258,6 +258,13 @@ extern int mmu_vmemmap_psize;
258 258
259#ifdef CONFIG_PPC64 259#ifdef CONFIG_PPC64
260extern unsigned long linear_map_top; 260extern unsigned long linear_map_top;
261
262/*
263 * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
264 * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
265 * return 1, indicating that the tlb requires preloading.
266 */
267#define HUGETLB_NEED_PRELOAD
261#endif 268#endif
262 269
263#endif /* !__ASSEMBLY__ */ 270#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index fb40ede6bc0..fed85e6290e 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -130,7 +130,9 @@ do { \
130 130
131#ifdef CONFIG_HUGETLB_PAGE 131#ifdef CONFIG_HUGETLB_PAGE
132 132
133#ifdef CONFIG_PPC_MM_SLICES
133#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 134#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
135#endif
134 136
135#endif /* !CONFIG_HUGETLB_PAGE */ 137#endif /* !CONFIG_HUGETLB_PAGE */
136 138
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index fb9bb46e7e8..4cb8f1e9d04 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -35,6 +35,8 @@
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/lockdep.h> 36#include <linux/lockdep.h>
37#include <linux/memblock.h> 37#include <linux/memblock.h>
38#include <linux/hugetlb.h>
39
38#include <asm/io.h> 40#include <asm/io.h>
39#include <asm/kdump.h> 41#include <asm/kdump.h>
40#include <asm/prom.h> 42#include <asm/prom.h>
@@ -64,6 +66,7 @@
64#include <asm/mmu_context.h> 66#include <asm/mmu_context.h>
65#include <asm/code-patching.h> 67#include <asm/code-patching.h>
66#include <asm/kvm_ppc.h> 68#include <asm/kvm_ppc.h>
69#include <asm/hugetlb.h>
67 70
68#include "setup.h" 71#include "setup.h"
69 72
@@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr)
217 /* Initialize the hash table or TLB handling */ 220 /* Initialize the hash table or TLB handling */
218 early_init_mmu(); 221 early_init_mmu();
219 222
223 /*
224 * Reserve any gigantic pages requested on the command line.
225 * memblock needs to have been initialized by the time this is
226 * called since this will reserve memory.
227 */
228 reserve_hugetlb_gpages();
229
220 DBG(" <- early_setup()\n"); 230 DBG(" <- early_setup()\n");
221} 231}
222 232
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 343ad0b8726..3bc700655fc 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -37,31 +37,32 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
37 return found; 37 return found;
38} 38}
39 39
40void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte) 40void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
41 pte_t pte)
41{ 42{
42 unsigned long mas1, mas2; 43 unsigned long mas1, mas2;
43 u64 mas7_3; 44 u64 mas7_3;
44 unsigned long psize, tsize, shift; 45 unsigned long psize, tsize, shift;
45 unsigned long flags; 46 unsigned long flags;
47 struct mm_struct *mm;
46 48
47#ifdef CONFIG_PPC_FSL_BOOK3E 49#ifdef CONFIG_PPC_FSL_BOOK3E
48 int index, lz, ncams; 50 int index, ncams;
49 struct vm_area_struct *vma;
50#endif 51#endif
51 52
52 if (unlikely(is_kernel_addr(ea))) 53 if (unlikely(is_kernel_addr(ea)))
53 return; 54 return;
54 55
56 mm = vma->vm_mm;
57
55#ifdef CONFIG_PPC_MM_SLICES 58#ifdef CONFIG_PPC_MM_SLICES
56 psize = mmu_get_tsize(get_slice_psize(mm, ea)); 59 psize = get_slice_psize(mm, ea);
57 tsize = mmu_get_psize(psize); 60 tsize = mmu_get_tsize(psize);
58 shift = mmu_psize_defs[psize].shift; 61 shift = mmu_psize_defs[psize].shift;
59#else 62#else
60 vma = find_vma(mm, ea); 63 psize = vma_mmu_pagesize(vma);
61 psize = vma_mmu_pagesize(vma); /* returns actual size in bytes */ 64 shift = __ilog2(psize);
62 asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize)); 65 tsize = shift - 10;
63 shift = 31 - lz;
64 tsize = 21 - lz;
65#endif 66#endif
66 67
67 /* 68 /*
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5964371303a..79c575d3dd6 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -28,22 +28,22 @@ unsigned int HPAGE_SHIFT;
28 28
29/* 29/*
30 * Tracks gpages after the device tree is scanned and before the 30 * Tracks gpages after the device tree is scanned and before the
31 * huge_boot_pages list is ready. On 64-bit implementations, this is 31 * huge_boot_pages list is ready. On non-Freescale implementations, this is
32 * just used to track 16G pages and so is a single array. 32-bit 32 * just used to track 16G pages and so is a single array. FSL-based
33 * implementations may have more than one gpage size due to limitations 33 * implementations may have more than one gpage size, so we need multiple
34 * of the memory allocators, so we need multiple arrays 34 * arrays
35 */ 35 */
36#ifdef CONFIG_PPC64 36#ifdef CONFIG_PPC_FSL_BOOK3E
37#define MAX_NUMBER_GPAGES 1024
38static u64 gpage_freearray[MAX_NUMBER_GPAGES];
39static unsigned nr_gpages;
40#else
41#define MAX_NUMBER_GPAGES 128 37#define MAX_NUMBER_GPAGES 128
42struct psize_gpages { 38struct psize_gpages {
43 u64 gpage_list[MAX_NUMBER_GPAGES]; 39 u64 gpage_list[MAX_NUMBER_GPAGES];
44 unsigned int nr_gpages; 40 unsigned int nr_gpages;
45}; 41};
46static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; 42static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
43#else
44#define MAX_NUMBER_GPAGES 1024
45static u64 gpage_freearray[MAX_NUMBER_GPAGES];
46static unsigned nr_gpages;
47#endif 47#endif
48 48
49static inline int shift_to_mmu_psize(unsigned int shift) 49static inline int shift_to_mmu_psize(unsigned int shift)
@@ -114,12 +114,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
114 struct kmem_cache *cachep; 114 struct kmem_cache *cachep;
115 pte_t *new; 115 pte_t *new;
116 116
117#ifdef CONFIG_PPC64 117#ifdef CONFIG_PPC_FSL_BOOK3E
118 cachep = PGT_CACHE(pdshift - pshift);
119#else
120 int i; 118 int i;
121 int num_hugepd = 1 << (pshift - pdshift); 119 int num_hugepd = 1 << (pshift - pdshift);
122 cachep = hugepte_cache; 120 cachep = hugepte_cache;
121#else
122 cachep = PGT_CACHE(pdshift - pshift);
123#endif 123#endif
124 124
125 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); 125 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
@@ -131,12 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
131 return -ENOMEM; 131 return -ENOMEM;
132 132
133 spin_lock(&mm->page_table_lock); 133 spin_lock(&mm->page_table_lock);
134#ifdef CONFIG_PPC64 134#ifdef CONFIG_PPC_FSL_BOOK3E
135 if (!hugepd_none(*hpdp))
136 kmem_cache_free(cachep, new);
137 else
138 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
139#else
140 /* 135 /*
141 * We have multiple higher-level entries that point to the same 136 * We have multiple higher-level entries that point to the same
142 * actual pte location. Fill in each as we go and backtrack on error. 137 * actual pte location. Fill in each as we go and backtrack on error.
@@ -155,11 +150,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
155 hpdp->pd = 0; 150 hpdp->pd = 0;
156 kmem_cache_free(cachep, new); 151 kmem_cache_free(cachep, new);
157 } 152 }
153#else
154 if (!hugepd_none(*hpdp))
155 kmem_cache_free(cachep, new);
156 else
157 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
158#endif 158#endif
159 spin_unlock(&mm->page_table_lock); 159 spin_unlock(&mm->page_table_lock);
160 return 0; 160 return 0;
161} 161}
162 162
163/*
164 * These macros define how to determine which level of the page table holds
165 * the hpdp.
166 */
167#ifdef CONFIG_PPC_FSL_BOOK3E
168#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
169#define HUGEPD_PUD_SHIFT PUD_SHIFT
170#else
171#define HUGEPD_PGD_SHIFT PUD_SHIFT
172#define HUGEPD_PUD_SHIFT PMD_SHIFT
173#endif
174
163pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) 175pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
164{ 176{
165 pgd_t *pg; 177 pgd_t *pg;
@@ -172,12 +184,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
172 addr &= ~(sz-1); 184 addr &= ~(sz-1);
173 185
174 pg = pgd_offset(mm, addr); 186 pg = pgd_offset(mm, addr);
175 if (pshift >= PUD_SHIFT) { 187
188 if (pshift >= HUGEPD_PGD_SHIFT) {
176 hpdp = (hugepd_t *)pg; 189 hpdp = (hugepd_t *)pg;
177 } else { 190 } else {
178 pdshift = PUD_SHIFT; 191 pdshift = PUD_SHIFT;
179 pu = pud_alloc(mm, pg, addr); 192 pu = pud_alloc(mm, pg, addr);
180 if (pshift >= PMD_SHIFT) { 193 if (pshift >= HUGEPD_PUD_SHIFT) {
181 hpdp = (hugepd_t *)pu; 194 hpdp = (hugepd_t *)pu;
182 } else { 195 } else {
183 pdshift = PMD_SHIFT; 196 pdshift = PMD_SHIFT;
@@ -197,7 +210,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
197 return hugepte_offset(hpdp, addr, pdshift); 210 return hugepte_offset(hpdp, addr, pdshift);
198} 211}
199 212
200#ifdef CONFIG_PPC32 213#ifdef CONFIG_PPC_FSL_BOOK3E
201/* Build list of addresses of gigantic pages. This function is used in early 214/* Build list of addresses of gigantic pages. This function is used in early
202 * boot before the buddy or bootmem allocator is setup. 215 * boot before the buddy or bootmem allocator is setup.
203 */ 216 */
@@ -317,7 +330,7 @@ void __init reserve_hugetlb_gpages(void)
317 } 330 }
318} 331}
319 332
320#else /* PPC64 */ 333#else /* !PPC_FSL_BOOK3E */
321 334
322/* Build list of addresses of gigantic pages. This function is used in early 335/* Build list of addresses of gigantic pages. This function is used in early
323 * boot before the buddy or bootmem allocator is setup. 336 * boot before the buddy or bootmem allocator is setup.
@@ -355,7 +368,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
355 return 0; 368 return 0;
356} 369}
357 370
358#ifdef CONFIG_PPC32 371#ifdef CONFIG_PPC_FSL_BOOK3E
359#define HUGEPD_FREELIST_SIZE \ 372#define HUGEPD_FREELIST_SIZE \
360 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) 373 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
361 374
@@ -415,11 +428,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
415 unsigned long pdmask = ~((1UL << pdshift) - 1); 428 unsigned long pdmask = ~((1UL << pdshift) - 1);
416 unsigned int num_hugepd = 1; 429 unsigned int num_hugepd = 1;
417 430
418#ifdef CONFIG_PPC64 431#ifdef CONFIG_PPC_FSL_BOOK3E
419 unsigned int shift = hugepd_shift(*hpdp); 432 /* Note: On fsl the hpdp may be the first of several */
420#else
421 /* Note: On 32-bit the hpdp may be the first of several */
422 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); 433 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
434#else
435 unsigned int shift = hugepd_shift(*hpdp);
423#endif 436#endif
424 437
425 start &= pdmask; 438 start &= pdmask;
@@ -437,10 +450,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
437 hpdp->pd = 0; 450 hpdp->pd = 0;
438 451
439 tlb->need_flush = 1; 452 tlb->need_flush = 1;
440#ifdef CONFIG_PPC64 453
441 pgtable_free_tlb(tlb, hugepte, pdshift - shift); 454#ifdef CONFIG_PPC_FSL_BOOK3E
442#else
443 hugepd_free(tlb, hugepte); 455 hugepd_free(tlb, hugepte);
456#else
457 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
444#endif 458#endif
445} 459}
446 460
@@ -453,14 +467,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
453 unsigned long start; 467 unsigned long start;
454 468
455 start = addr; 469 start = addr;
456 pmd = pmd_offset(pud, addr);
457 do { 470 do {
471 pmd = pmd_offset(pud, addr);
458 next = pmd_addr_end(addr, end); 472 next = pmd_addr_end(addr, end);
459 if (pmd_none(*pmd)) 473 if (pmd_none(*pmd))
460 continue; 474 continue;
475#ifdef CONFIG_PPC_FSL_BOOK3E
476 /*
477 * Increment next by the size of the huge mapping since
478 * there may be more than one entry at this level for a
479 * single hugepage, but all of them point to
480 * the same kmem cache that holds the hugepte.
481 */
482 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
483#endif
461 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, 484 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
462 addr, next, floor, ceiling); 485 addr, next, floor, ceiling);
463 } while (pmd++, addr = next, addr != end); 486 } while (addr = next, addr != end);
464 487
465 start &= PUD_MASK; 488 start &= PUD_MASK;
466 if (start < floor) 489 if (start < floor)
@@ -487,8 +510,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
487 unsigned long start; 510 unsigned long start;
488 511
489 start = addr; 512 start = addr;
490 pud = pud_offset(pgd, addr);
491 do { 513 do {
514 pud = pud_offset(pgd, addr);
492 next = pud_addr_end(addr, end); 515 next = pud_addr_end(addr, end);
493 if (!is_hugepd(pud)) { 516 if (!is_hugepd(pud)) {
494 if (pud_none_or_clear_bad(pud)) 517 if (pud_none_or_clear_bad(pud))
@@ -496,10 +519,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
496 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 519 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
497 ceiling); 520 ceiling);
498 } else { 521 } else {
522#ifdef CONFIG_PPC_FSL_BOOK3E
523 /*
524 * Increment next by the size of the huge mapping since
525 * there may be more than one entry at this level for a
526 * single hugepage, but all of them point to
527 * the same kmem cache that holds the hugepte.
528 */
529 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
530#endif
499 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, 531 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
500 addr, next, floor, ceiling); 532 addr, next, floor, ceiling);
501 } 533 }
502 } while (pud++, addr = next, addr != end); 534 } while (addr = next, addr != end);
503 535
504 start &= PGDIR_MASK; 536 start &= PGDIR_MASK;
505 if (start < floor) 537 if (start < floor)
@@ -554,12 +586,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
554 continue; 586 continue;
555 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 587 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
556 } else { 588 } else {
557#ifdef CONFIG_PPC32 589#ifdef CONFIG_PPC_FSL_BOOK3E
558 /* 590 /*
559 * Increment next by the size of the huge mapping since 591 * Increment next by the size of the huge mapping since
560 * on 32-bit there may be more than one entry at the pgd 592 * there may be more than one entry at the pgd level
561 * level for a single hugepage, but all of them point to 593 * for a single hugepage, but all of them point to the
562 * the same kmem cache that holds the hugepte. 594 * same kmem cache that holds the hugepte.
563 */ 595 */
564 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); 596 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
565#endif 597#endif
@@ -697,19 +729,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
697 return 1; 729 return 1;
698} 730}
699 731
732#ifdef CONFIG_PPC_MM_SLICES
700unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 733unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
701 unsigned long len, unsigned long pgoff, 734 unsigned long len, unsigned long pgoff,
702 unsigned long flags) 735 unsigned long flags)
703{ 736{
704#ifdef CONFIG_PPC_MM_SLICES
705 struct hstate *hstate = hstate_file(file); 737 struct hstate *hstate = hstate_file(file);
706 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); 738 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
707 739
708 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); 740 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
709#else
710 return get_unmapped_area(file, addr, len, pgoff, flags);
711#endif
712} 741}
742#endif
713 743
714unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 744unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
715{ 745{
@@ -783,7 +813,7 @@ static int __init hugepage_setup_sz(char *str)
783} 813}
784__setup("hugepagesz=", hugepage_setup_sz); 814__setup("hugepagesz=", hugepage_setup_sz);
785 815
786#ifdef CONFIG_FSL_BOOKE 816#ifdef CONFIG_PPC_FSL_BOOK3E
787struct kmem_cache *hugepte_cache; 817struct kmem_cache *hugepte_cache;
788static int __init hugetlbpage_init(void) 818static int __init hugetlbpage_init(void)
789{ 819{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 22563b9664c..83d819f3086 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -553,7 +553,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
553#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 553#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
554 && defined(CONFIG_HUGETLB_PAGE) 554 && defined(CONFIG_HUGETLB_PAGE)
555 if (is_vm_hugetlb_page(vma)) 555 if (is_vm_hugetlb_page(vma))
556 book3e_hugetlb_preload(vma->vm_mm, address, *ptep); 556 book3e_hugetlb_preload(vma, address, *ptep);
557#endif 557#endif
558} 558}
559 559
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index dc4a5f385e4..ff672bd8fea 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -94,11 +94,11 @@
94 94
95 srdi r15,r16,60 /* get region */ 95 srdi r15,r16,60 /* get region */
96 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 96 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
97 bne- dtlb_miss_fault_bolted 97 bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */
98 98
99 rlwinm r10,r11,32-19,27,27 99 rlwinm r10,r11,32-19,27,27
100 rlwimi r10,r11,32-16,19,19 100 rlwimi r10,r11,32-16,19,19
101 cmpwi r15,0 101 cmpwi r15,0 /* user vs kernel check */
102 ori r10,r10,_PAGE_PRESENT 102 ori r10,r10,_PAGE_PRESENT
103 oris r11,r10,_PAGE_ACCESSED@h 103 oris r11,r10,_PAGE_ACCESSED@h
104 104
@@ -120,44 +120,38 @@ tlb_miss_common_bolted:
120 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 120 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
121 cmpldi cr0,r14,0 121 cmpldi cr0,r14,0
122 clrrdi r15,r15,3 122 clrrdi r15,r15,3
123 beq tlb_miss_fault_bolted 123 beq tlb_miss_fault_bolted /* No PGDIR, bail */
124 124
125BEGIN_MMU_FTR_SECTION 125BEGIN_MMU_FTR_SECTION
126 /* Set the TLB reservation and search for existing entry. Then load 126 /* Set the TLB reservation and search for existing entry. Then load
127 * the entry. 127 * the entry.
128 */ 128 */
129 PPC_TLBSRX_DOT(0,r16) 129 PPC_TLBSRX_DOT(0,r16)
130 ldx r14,r14,r15 130 ldx r14,r14,r15 /* grab pgd entry */
131 beq normal_tlb_miss_done 131 beq normal_tlb_miss_done /* tlb exists already, bail */
132MMU_FTR_SECTION_ELSE 132MMU_FTR_SECTION_ELSE
133 ldx r14,r14,r15 133 ldx r14,r14,r15 /* grab pgd entry */
134ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) 134ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
135 135
136#ifndef CONFIG_PPC_64K_PAGES 136#ifndef CONFIG_PPC_64K_PAGES
137 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 137 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
138 clrrdi r15,r15,3 138 clrrdi r15,r15,3
139 139 cmpdi cr0,r14,0
140 cmpldi cr0,r14,0 140 bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
141 beq tlb_miss_fault_bolted 141 ldx r14,r14,r15 /* grab pud entry */
142
143 ldx r14,r14,r15
144#endif /* CONFIG_PPC_64K_PAGES */ 142#endif /* CONFIG_PPC_64K_PAGES */
145 143
146 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 144 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
147 clrrdi r15,r15,3 145 clrrdi r15,r15,3
148 146 cmpdi cr0,r14,0
149 cmpldi cr0,r14,0 147 bge tlb_miss_fault_bolted
150 beq tlb_miss_fault_bolted 148 ldx r14,r14,r15 /* Grab pmd entry */
151
152 ldx r14,r14,r15
153 149
154 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3 150 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
155 clrrdi r15,r15,3 151 clrrdi r15,r15,3
156 152 cmpdi cr0,r14,0
157 cmpldi cr0,r14,0 153 bge tlb_miss_fault_bolted
158 beq tlb_miss_fault_bolted 154 ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */
159
160 ldx r14,r14,r15
161 155
162 /* Check if required permissions are met */ 156 /* Check if required permissions are met */
163 andc. r15,r11,r14 157 andc. r15,r11,r14
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 4e13d6f9023..b2c65c66085 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -52,7 +52,7 @@
52 * indirect page table entries. 52 * indirect page table entries.
53 */ 53 */
54#ifdef CONFIG_PPC_BOOK3E_MMU 54#ifdef CONFIG_PPC_BOOK3E_MMU
55#ifdef CONFIG_FSL_BOOKE 55#ifdef CONFIG_PPC_FSL_BOOK3E
56struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 56struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
57 [MMU_PAGE_4K] = { 57 [MMU_PAGE_4K] = {
58 .shift = 12, 58 .shift = 12,
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 836b44286b3..425db18580a 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -174,7 +174,6 @@ config BOOKE
174config FSL_BOOKE 174config FSL_BOOKE
175 bool 175 bool
176 depends on (E200 || E500) && PPC32 176 depends on (E200 || E500) && PPC32
177 select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT
178 default y 177 default y
179 178
180# this is for common code between PPC32 & PPC64 FSL BOOKE 179# this is for common code between PPC32 & PPC64 FSL BOOKE
@@ -182,6 +181,7 @@ config PPC_FSL_BOOK3E
182 bool 181 bool
183 select FSL_EMB_PERFMON 182 select FSL_EMB_PERFMON
184 select PPC_SMP_MUXED_IPI 183 select PPC_SMP_MUXED_IPI
184 select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
185 default y if FSL_BOOKE 185 default y if FSL_BOOKE
186 186
187config PTE_64BIT 187config PTE_64BIT
@@ -309,7 +309,7 @@ config PPC_BOOK3E_MMU
309 309
310config PPC_MM_SLICES 310config PPC_MM_SLICES
311 bool 311 bool
312 default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES) 312 default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
313 default n 313 default n
314 314
315config VIRT_CPU_ACCOUNTING 315config VIRT_CPU_ACCOUNTING