aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/init_64.c55
-rw-r--r--arch/powerpc/mm/mmu_decl.h7
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c11
4 files changed, 65 insertions, 10 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 68a821add28d..31582329cd67 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -205,6 +205,47 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
205 return 0; 205 return 0;
206} 206}
207 207
208/* On hash-based CPUs, the vmemmap is bolted in the hash table.
209 *
210 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
211 * the vmalloc space using normal page tables, though the size of
212 * pages encoded in the PTEs can be different
213 */
214
215#ifdef CONFIG_PPC_BOOK3E
216static void __meminit vmemmap_create_mapping(unsigned long start,
217 unsigned long page_size,
218 unsigned long phys)
219{
220 /* Create a PTE encoding without page size */
221 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
222 _PAGE_KERNEL_RW;
223
224 /* PTEs only contain page size encodings up to 32M */
225 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
226
227 /* Encode the size in the PTE */
228 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
229
230 /* For each PTE for that area, map things. Note that we don't
231 * increment phys because all PTEs are of the large size and
232 * thus must have the low bits clear
233 */
234 for (i = 0; i < page_size; i += PAGE_SIZE)
235 BUG_ON(map_kernel_page(start + i, phys, flags));
236}
237#else /* CONFIG_PPC_BOOK3E */
238static void __meminit vmemmap_create_mapping(unsigned long start,
239 unsigned long page_size,
240 unsigned long phys)
241{
242 int mapped = htab_bolt_mapping(start, start + page_size, phys,
243 PAGE_KERNEL, mmu_vmemmap_psize,
244 mmu_kernel_ssize);
245 BUG_ON(mapped < 0);
246}
247#endif /* CONFIG_PPC_BOOK3E */
248
208int __meminit vmemmap_populate(struct page *start_page, 249int __meminit vmemmap_populate(struct page *start_page,
209 unsigned long nr_pages, int node) 250 unsigned long nr_pages, int node)
210{ 251{
@@ -215,8 +256,11 @@ int __meminit vmemmap_populate(struct page *start_page,
215 /* Align to the page size of the linear mapping. */ 256 /* Align to the page size of the linear mapping. */
216 start = _ALIGN_DOWN(start, page_size); 257 start = _ALIGN_DOWN(start, page_size);
217 258
259 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
260 start_page, nr_pages, node);
261 pr_debug(" -> map %lx..%lx\n", start, end);
262
218 for (; start < end; start += page_size) { 263 for (; start < end; start += page_size) {
219 int mapped;
220 void *p; 264 void *p;
221 265
222 if (vmemmap_populated(start, page_size)) 266 if (vmemmap_populated(start, page_size))
@@ -226,13 +270,10 @@ int __meminit vmemmap_populate(struct page *start_page,
226 if (!p) 270 if (!p)
227 return -ENOMEM; 271 return -ENOMEM;
228 272
229 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n", 273 pr_debug(" * %016lx..%016lx allocated at %p\n",
230 start, p, __pa(p)); 274 start, start + page_size, p);
231 275
232 mapped = htab_bolt_mapping(start, start + page_size, __pa(p), 276 vmemmap_create_mapping(start, page_size, __pa(p));
233 pgprot_val(PAGE_KERNEL),
234 mmu_vmemmap_psize, mmu_kernel_ssize);
235 BUG_ON(mapped < 0);
236 } 277 }
237 278
238 return 0; 279 return 0;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 5961c6b739dd..d2e5321d5ea6 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -121,7 +121,12 @@ extern unsigned int rtas_data, rtas_size;
121struct hash_pte; 121struct hash_pte;
122extern struct hash_pte *Hash, *Hash_end; 122extern struct hash_pte *Hash, *Hash_end;
123extern unsigned long Hash_size, Hash_mask; 123extern unsigned long Hash_size, Hash_mask;
124#endif 124
125#endif /* CONFIG_PPC32 */
126
127#ifdef CONFIG_PPC64
128extern int map_kernel_page(unsigned long ea, unsigned long pa, int flags);
129#endif /* CONFIG_PPC64 */
125 130
126extern unsigned long ioremap_bot; 131extern unsigned long ioremap_bot;
127extern unsigned long __max_low_memory; 132extern unsigned long __max_low_memory;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 93ed1a3c8729..853d5565eed5 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -79,7 +79,7 @@ static void *early_alloc_pgtable(unsigned long size)
79 * map_kernel_page adds an entry to the ioremap page table 79 * map_kernel_page adds an entry to the ioremap page table
80 * and adds an entry to the HPT, possibly bolting it 80 * and adds an entry to the HPT, possibly bolting it
81 */ 81 */
82static int map_kernel_page(unsigned long ea, unsigned long pa, int flags) 82int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
83{ 83{
84 pgd_t *pgdp; 84 pgd_t *pgdp;
85 pud_t *pudp; 85 pud_t *pudp;
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index d16100c9416a..2fbc680c2c71 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -93,6 +93,7 @@ static inline int mmu_get_tsize(int psize)
93 93
94int mmu_linear_psize; /* Page size used for the linear mapping */ 94int mmu_linear_psize; /* Page size used for the linear mapping */
95int mmu_pte_psize; /* Page size used for PTE pages */ 95int mmu_pte_psize; /* Page size used for PTE pages */
96int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
96int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 97int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
97unsigned long linear_map_top; /* Top of linear mapping */ 98unsigned long linear_map_top; /* Top of linear mapping */
98 99
@@ -356,10 +357,18 @@ static void __early_init_mmu(int boot_cpu)
356 unsigned int mas4; 357 unsigned int mas4;
357 358
358 /* XXX This will have to be decided at runtime, but right 359 /* XXX This will have to be decided at runtime, but right
359 * now our boot and TLB miss code hard wires it 360 * now our boot and TLB miss code hard wires it. Ideally
361 * we should find out a suitable page size and patch the
362 * TLB miss code (either that or use the PACA to store
363 * the value we want)
360 */ 364 */
361 mmu_linear_psize = MMU_PAGE_1G; 365 mmu_linear_psize = MMU_PAGE_1G;
362 366
367 /* XXX This should be decided at runtime based on supported
368 * page sizes in the TLB, but for now let's assume 16M is
369 * always there and a good fit (which it probably is)
370 */
371 mmu_vmemmap_psize = MMU_PAGE_16M;
363 372
364 /* Check if HW tablewalk is present, and if yes, enable it by: 373 /* Check if HW tablewalk is present, and if yes, enable it by:
365 * 374 *