aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:58 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:25:10 -0400
commit32a74949b7337726e76d69f51c48715431126c6c (patch)
tree22383b2b4d568c7fc651e1def000049dde7156c3 /arch
parent25d21ad6e799cccd097b9df2a2fefe19a7e1dfcf (diff)
powerpc/mm: Add support for SPARSEMEM_VMEMMAP on 64-bit Book3E
The base TLB support didn't include support for SPARSEMEM_VMEMMAP, though we did carve out some virtual space for it, the necessary support code wasn't there. This implements it by using 16M pages for now, though the page size could easily be changed at runtime if necessary. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h3
-rw-r--r--arch/powerpc/mm/init_64.c55
-rw-r--r--arch/powerpc/mm/mmu_decl.h7
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c11
6 files changed, 68 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 6ddbe48d07fa..d74580469361 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -196,6 +196,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
196#endif 196#endif
197 197
198extern int mmu_linear_psize; 198extern int mmu_linear_psize;
199extern int mmu_vmemmap_psize;
199 200
200#endif /* !__ASSEMBLY__ */ 201#endif /* !__ASSEMBLY__ */
201 202
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 7254c5a3187c..200ec2dfa034 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -46,6 +46,7 @@
46/* 46/*
47 * The vmalloc space starts at the beginning of that region, and 47 * The vmalloc space starts at the beginning of that region, and
48 * occupies half of it on hash CPUs and a quarter of it on Book3E 48 * occupies half of it on hash CPUs and a quarter of it on Book3E
49 * (we keep a quarter for the virtual memmap)
49 */ 50 */
50#define VMALLOC_START KERN_VIRT_START 51#define VMALLOC_START KERN_VIRT_START
51#ifdef CONFIG_PPC_BOOK3E 52#ifdef CONFIG_PPC_BOOK3E
@@ -83,7 +84,7 @@
83 84
84#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 85#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
85#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 86#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
86#define VMEMMAP_REGION_ID (0xfUL) 87#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
87#define USER_REGION_ID (0UL) 88#define USER_REGION_ID (0UL)
88 89
89/* 90/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 68a821add28d..31582329cd67 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -205,6 +205,47 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
205 return 0; 205 return 0;
206} 206}
207 207
208/* On hash-based CPUs, the vmemmap is bolted in the hash table.
209 *
210 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
211 * the vmalloc space using normal page tables, though the size of
212 * pages encoded in the PTEs can be different
213 */
214
215#ifdef CONFIG_PPC_BOOK3E
216static void __meminit vmemmap_create_mapping(unsigned long start,
217 unsigned long page_size,
218 unsigned long phys)
219{
220 /* Create a PTE encoding without page size */
221 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
222 _PAGE_KERNEL_RW;
223
224 /* PTEs only contain page size encodings up to 32M */
225 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
226
227 /* Encode the size in the PTE */
228 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
229
230 /* For each PTE for that area, map things. Note that we don't
231 * increment phys because all PTEs are of the large size and
232 * thus must have the low bits clear
233 */
234 for (i = 0; i < page_size; i += PAGE_SIZE)
235 BUG_ON(map_kernel_page(start + i, phys, flags));
236}
237#else /* CONFIG_PPC_BOOK3E */
238static void __meminit vmemmap_create_mapping(unsigned long start,
239 unsigned long page_size,
240 unsigned long phys)
241{
242 int mapped = htab_bolt_mapping(start, start + page_size, phys,
243 PAGE_KERNEL, mmu_vmemmap_psize,
244 mmu_kernel_ssize);
245 BUG_ON(mapped < 0);
246}
247#endif /* CONFIG_PPC_BOOK3E */
248
208int __meminit vmemmap_populate(struct page *start_page, 249int __meminit vmemmap_populate(struct page *start_page,
209 unsigned long nr_pages, int node) 250 unsigned long nr_pages, int node)
210{ 251{
@@ -215,8 +256,11 @@ int __meminit vmemmap_populate(struct page *start_page,
215 /* Align to the page size of the linear mapping. */ 256 /* Align to the page size of the linear mapping. */
216 start = _ALIGN_DOWN(start, page_size); 257 start = _ALIGN_DOWN(start, page_size);
217 258
259 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
260 start_page, nr_pages, node);
261 pr_debug(" -> map %lx..%lx\n", start, end);
262
218 for (; start < end; start += page_size) { 263 for (; start < end; start += page_size) {
219 int mapped;
220 void *p; 264 void *p;
221 265
222 if (vmemmap_populated(start, page_size)) 266 if (vmemmap_populated(start, page_size))
@@ -226,13 +270,10 @@ int __meminit vmemmap_populate(struct page *start_page,
226 if (!p) 270 if (!p)
227 return -ENOMEM; 271 return -ENOMEM;
228 272
229 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n", 273 pr_debug(" * %016lx..%016lx allocated at %p\n",
230 start, p, __pa(p)); 274 start, start + page_size, p);
231 275
232 mapped = htab_bolt_mapping(start, start + page_size, __pa(p), 276 vmemmap_create_mapping(start, page_size, __pa(p));
233 pgprot_val(PAGE_KERNEL),
234 mmu_vmemmap_psize, mmu_kernel_ssize);
235 BUG_ON(mapped < 0);
236 } 277 }
237 278
238 return 0; 279 return 0;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 5961c6b739dd..d2e5321d5ea6 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -121,7 +121,12 @@ extern unsigned int rtas_data, rtas_size;
121struct hash_pte; 121struct hash_pte;
122extern struct hash_pte *Hash, *Hash_end; 122extern struct hash_pte *Hash, *Hash_end;
123extern unsigned long Hash_size, Hash_mask; 123extern unsigned long Hash_size, Hash_mask;
124#endif 124
125#endif /* CONFIG_PPC32 */
126
127#ifdef CONFIG_PPC64
128extern int map_kernel_page(unsigned long ea, unsigned long pa, int flags);
129#endif /* CONFIG_PPC64 */
125 130
126extern unsigned long ioremap_bot; 131extern unsigned long ioremap_bot;
127extern unsigned long __max_low_memory; 132extern unsigned long __max_low_memory;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 93ed1a3c8729..853d5565eed5 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -79,7 +79,7 @@ static void *early_alloc_pgtable(unsigned long size)
79 * map_kernel_page adds an entry to the ioremap page table 79 * map_kernel_page adds an entry to the ioremap page table
80 * and adds an entry to the HPT, possibly bolting it 80 * and adds an entry to the HPT, possibly bolting it
81 */ 81 */
82static int map_kernel_page(unsigned long ea, unsigned long pa, int flags) 82int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
83{ 83{
84 pgd_t *pgdp; 84 pgd_t *pgdp;
85 pud_t *pudp; 85 pud_t *pudp;
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index d16100c9416a..2fbc680c2c71 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -93,6 +93,7 @@ static inline int mmu_get_tsize(int psize)
93 93
94int mmu_linear_psize; /* Page size used for the linear mapping */ 94int mmu_linear_psize; /* Page size used for the linear mapping */
95int mmu_pte_psize; /* Page size used for PTE pages */ 95int mmu_pte_psize; /* Page size used for PTE pages */
96int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
96int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 97int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
97unsigned long linear_map_top; /* Top of linear mapping */ 98unsigned long linear_map_top; /* Top of linear mapping */
98 99
@@ -356,10 +357,18 @@ static void __early_init_mmu(int boot_cpu)
356 unsigned int mas4; 357 unsigned int mas4;
357 358
358 /* XXX This will have to be decided at runtime, but right 359 /* XXX This will have to be decided at runtime, but right
359 * now our boot and TLB miss code hard wires it 360 * now our boot and TLB miss code hard wires it. Ideally
361 * we should find out a suitable page size and patch the
362 * TLB miss code (either that or use the PACA to store
363 * the value we want)
360 */ 364 */
361 mmu_linear_psize = MMU_PAGE_1G; 365 mmu_linear_psize = MMU_PAGE_1G;
362 366
367 /* XXX This should be decided at runtime based on supported
368 * page sizes in the TLB, but for now let's assume 16M is
369 * always there and a good fit (which it probably is)
370 */
371 mmu_vmemmap_psize = MMU_PAGE_16M;
363 372
364 /* Check if HW tablewalk is present, and if yes, enable it by: 373 /* Check if HW tablewalk is present, and if yes, enable it by:
365 * 374 *