aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_nohash.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:58 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:25:10 -0400
commit32a74949b7337726e76d69f51c48715431126c6c (patch)
tree22383b2b4d568c7fc651e1def000049dde7156c3 /arch/powerpc/mm/tlb_nohash.c
parent25d21ad6e799cccd097b9df2a2fefe19a7e1dfcf (diff)
powerpc/mm: Add support for SPARSEMEM_VMEMMAP on 64-bit Book3E
The base TLB support didn't include support for SPARSEMEM_VMEMMAP, though we did carve out some virtual space for it, the necessary support code wasn't there. This implements it by using 16M pages for now, though the page size could easily be changed at runtime if necessary. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
-rw-r--r--arch/powerpc/mm/tlb_nohash.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index d16100c9416..2fbc680c2c7 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -93,6 +93,7 @@ static inline int mmu_get_tsize(int psize)
93 93
94int mmu_linear_psize; /* Page size used for the linear mapping */ 94int mmu_linear_psize; /* Page size used for the linear mapping */
95int mmu_pte_psize; /* Page size used for PTE pages */ 95int mmu_pte_psize; /* Page size used for PTE pages */
96int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
96int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 97int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
97unsigned long linear_map_top; /* Top of linear mapping */ 98unsigned long linear_map_top; /* Top of linear mapping */
98 99
@@ -356,10 +357,18 @@ static void __early_init_mmu(int boot_cpu)
356 unsigned int mas4; 357 unsigned int mas4;
357 358
358 /* XXX This will have to be decided at runtime, but right 359 /* XXX This will have to be decided at runtime, but right
359 * now our boot and TLB miss code hard wires it 360 * now our boot and TLB miss code hard wires it. Ideally
361 * we should find out a suitable page size and patch the
362 * TLB miss code (either that or use the PACA to store
363 * the value we want)
360 */ 364 */
361 mmu_linear_psize = MMU_PAGE_1G; 365 mmu_linear_psize = MMU_PAGE_1G;
362 366
367 /* XXX This should be decided at runtime based on supported
368 * page sizes in the TLB, but for now let's assume 16M is
369 * always there and a good fit (which it probably is)
370 */
371 mmu_vmemmap_psize = MMU_PAGE_16M;
363 372
364 /* Check if HW tablewalk is present, and if yes, enable it by: 373 /* Check if HW tablewalk is present, and if yes, enable it by:
365 * 374 *