aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-09 00:57:43 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-14 00:13:53 -0400
commitf2b26c923518e03959142715a2b7615cb161cd16 (patch)
treeffccea3aa71c68a143ef151dfa5673cb0aa3a9b4 /arch/powerpc/mm
parent03247157f73912c98baa918cf46b98ee5483d7f8 (diff)
powerpc/book3e: Adjust the page sizes list based on MMU config
Use the MMU config registers to scan for available direct and indirect page sizes and print out the result. Will be needed for future hugetlbfs implementation. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/tlb_nohash.c136
1 files changed, 104 insertions, 32 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 2ce42bf1f67e..3b10f804b735 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -46,6 +46,7 @@
46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
47 [MMU_PAGE_4K] = { 47 [MMU_PAGE_4K] = {
48 .shift = 12, 48 .shift = 12,
49 .ind = 20,
49 .enc = BOOK3E_PAGESZ_4K, 50 .enc = BOOK3E_PAGESZ_4K,
50 }, 51 },
51 [MMU_PAGE_16K] = { 52 [MMU_PAGE_16K] = {
@@ -54,6 +55,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
54 }, 55 },
55 [MMU_PAGE_64K] = { 56 [MMU_PAGE_64K] = {
56 .shift = 16, 57 .shift = 16,
58 .ind = 28,
57 .enc = BOOK3E_PAGESZ_64K, 59 .enc = BOOK3E_PAGESZ_64K,
58 }, 60 },
59 [MMU_PAGE_1M] = { 61 [MMU_PAGE_1M] = {
@@ -62,6 +64,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
62 }, 64 },
63 [MMU_PAGE_16M] = { 65 [MMU_PAGE_16M] = {
64 .shift = 24, 66 .shift = 24,
67 .ind = 36,
65 .enc = BOOK3E_PAGESZ_16M, 68 .enc = BOOK3E_PAGESZ_16M,
66 }, 69 },
67 [MMU_PAGE_256M] = { 70 [MMU_PAGE_256M] = {
@@ -344,16 +347,108 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
344 } 347 }
345} 348}
346 349
347/* 350static void setup_page_sizes(void)
348 * Early initialization of the MMU TLB code 351{
349 */ 352 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
350static void __early_init_mmu(int boot_cpu) 353 unsigned int tlb0ps = mfspr(SPRN_TLB0PS);
354 unsigned int eptcfg = mfspr(SPRN_EPTCFG);
355 int i, psize;
356
357 /* Look for supported direct sizes */
358 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
359 struct mmu_psize_def *def = &mmu_psize_defs[psize];
360
361 if (tlb0ps & (1U << (def->shift - 10)))
362 def->flags |= MMU_PAGE_SIZE_DIRECT;
363 }
364
365 /* Indirect page sizes supported ? */
366 if ((tlb0cfg & TLBnCFG_IND) == 0)
367 goto no_indirect;
368
369 /* Now, we only deal with one IND page size for each
370 * direct size. Hopefully all implementations today are
371 * unambiguous, but we might want to be careful in the
372 * future.
373 */
374 for (i = 0; i < 3; i++) {
375 unsigned int ps, sps;
376
377 sps = eptcfg & 0x1f;
378 eptcfg >>= 5;
379 ps = eptcfg & 0x1f;
380 eptcfg >>= 5;
381 if (!ps || !sps)
382 continue;
383 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
384 struct mmu_psize_def *def = &mmu_psize_defs[psize];
385
386 if (ps == (def->shift - 10))
387 def->flags |= MMU_PAGE_SIZE_INDIRECT;
388 if (sps == (def->shift - 10))
389 def->ind = ps + 10;
390 }
391 }
392 no_indirect:
393
394 /* Cleanup array and print summary */
395 pr_info("MMU: Supported page sizes\n");
396 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
397 struct mmu_psize_def *def = &mmu_psize_defs[psize];
398 const char *__page_type_names[] = {
399 "unsupported",
400 "direct",
401 "indirect",
402 "direct & indirect"
403 };
404 if (def->flags == 0) {
405 def->shift = 0;
406 continue;
407 }
408 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
409 __page_type_names[def->flags & 0x3]);
410 }
411}
412
413static void setup_mmu_htw(void)
351{ 414{
352 extern unsigned int interrupt_base_book3e; 415 extern unsigned int interrupt_base_book3e;
353 extern unsigned int exc_data_tlb_miss_htw_book3e; 416 extern unsigned int exc_data_tlb_miss_htw_book3e;
354 extern unsigned int exc_instruction_tlb_miss_htw_book3e; 417 extern unsigned int exc_instruction_tlb_miss_htw_book3e;
355 418
356 unsigned int *ibase = &interrupt_base_book3e; 419 unsigned int *ibase = &interrupt_base_book3e;
420
421 /* Check if HW tablewalk is present, and if yes, enable it by:
422 *
423 * - patching the TLB miss handlers to branch to the
424 * one dedicates to it
425 *
426 * - setting the global book3e_htw_enabled
427 */
428 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
429
430 if ((tlb0cfg & TLBnCFG_IND) &&
431 (tlb0cfg & TLBnCFG_PT)) {
432 /* Our exceptions vectors start with a NOP and -then- a branch
433 * to deal with single stepping from userspace which stops on
434 * the second instruction. Thus we need to patch the second
435 * instruction of the exception, not the first one
436 */
437 patch_branch(ibase + (0x1c0 / 4) + 1,
438 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
439 patch_branch(ibase + (0x1e0 / 4) + 1,
440 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
441 book3e_htw_enabled = 1;
442 }
443 pr_info("MMU: Book3E Page Tables %s\n",
444 book3e_htw_enabled ? "Enabled" : "Disabled");
445}
446
447/*
448 * Early initialization of the MMU TLB code
449 */
450static void __early_init_mmu(int boot_cpu)
451{
357 unsigned int mas4; 452 unsigned int mas4;
358 453
359 /* XXX This will have to be decided at runtime, but right 454 /* XXX This will have to be decided at runtime, but right
@@ -370,40 +465,17 @@ static void __early_init_mmu(int boot_cpu)
370 */ 465 */
371 mmu_vmemmap_psize = MMU_PAGE_16M; 466 mmu_vmemmap_psize = MMU_PAGE_16M;
372 467
373 /* Check if HW tablewalk is present, and if yes, enable it by:
374 *
375 * - patching the TLB miss handlers to branch to the
376 * one dedicates to it
377 *
378 * - setting the global book3e_htw_enabled
379 *
380 * - Set MAS4:INDD and default page size
381 */
382
383 /* XXX This code only checks for TLB 0 capabilities and doesn't 468 /* XXX This code only checks for TLB 0 capabilities and doesn't
384 * check what page size combos are supported by the HW. It 469 * check what page size combos are supported by the HW. It
385 * also doesn't handle the case where a separate array holds 470 * also doesn't handle the case where a separate array holds
386 * the IND entries from the array loaded by the PT. 471 * the IND entries from the array loaded by the PT.
387 */ 472 */
388 if (boot_cpu) { 473 if (boot_cpu) {
389 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); 474 /* Look for supported page sizes */
390 475 setup_page_sizes();
391 /* Check if HW loader is supported */ 476
392 if ((tlb0cfg & TLBnCFG_IND) && 477 /* Look for HW tablewalk support */
393 (tlb0cfg & TLBnCFG_PT)) { 478 setup_mmu_htw();
394 /* Our exceptions vectors start with a NOP and -then- a branch
395 * to deal with single stepping from userspace which stops on
396 * the second instruction. Thus we need to patch the second
397 * instruction of the exception, not the first one
398 */
399 patch_branch(ibase + (0x1c0 / 4) + 1,
400 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
401 patch_branch(ibase + (0x1e0 / 4) + 1,
402 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
403 book3e_htw_enabled = 1;
404 }
405 pr_info("MMU: Book3E Page Tables %s\n",
406 book3e_htw_enabled ? "Enabled" : "Disabled");
407 } 479 }
408 480
409 /* Set MAS4 based on page table setting */ 481 /* Set MAS4 based on page table setting */