aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-22 03:12:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-22 03:12:35 -0400
commit405599bd98b01d648becb020efb503abf19f9c9f (patch)
treeccf791e5a40f8c550103b0f7db054dca2973ae79 /arch
parentb206fc4c0997ee858bc3ed35f157d7c3cda54cfd (diff)
[SPARC64]: Break up inherit_prom_mappings() into it's constituent parts.
This thing was just a huge monolithic mess, so chop it up. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/mm/init.c301
1 files changed, 160 insertions, 141 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 141d4cc5fc53..11d2187990d4 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -362,84 +362,107 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error)
362 return(base + (promva & (BASE_PAGE_SIZE - 1))); 362 return(base + (promva & (BASE_PAGE_SIZE - 1)));
363} 363}
364 364
365static void inherit_prom_mappings(void) 365static inline int in_obp_range(unsigned long vaddr)
366{ 366{
367 unsigned long phys_page, tte_vaddr, tte_data; 367 return (vaddr >= LOW_OBP_ADDRESS &&
368 void (*remap_func)(unsigned long, unsigned long, int); 368 vaddr < HI_OBP_ADDRESS);
369 pmd_t *pmdp; 369}
370 pte_t *ptep;
371 int node, n, i, tsz;
372 370
373 node = prom_finddevice("/virtual-memory"); 371/* The obp translations are saved based on 8k pagesize, since obp can
374 n = prom_getproplen(node, "translations"); 372 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
375 if (n == 0 || n == -1) { 373 * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
376 prom_printf("prom_mappings: Couldn't get size.\n"); 374 * scheme (also, see rant in inherit_locked_prom_mappings()).
377 prom_halt(); 375 */
378 } 376static void build_obp_range(unsigned long start, unsigned long end, unsigned long data)
379 n += 24 * sizeof(struct linux_prom_translation); 377{
380 if (n > sizeof(prom_trans)) { 378 unsigned long vaddr;
381 prom_printf("prom_mappings: prom_trans too small, " 379
382 "need %Zd bytes\n", n); 380 for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
383 prom_halt(); 381 unsigned long val;
384 } 382 pmd_t *pmdp;
385 tsz = n; 383 pte_t *ptep;
386 if ((n = prom_getproperty(node, "translations", 384
387 (char *)&prom_trans[0], tsz)) == -1) { 385 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
388 prom_printf("prom_mappings: Couldn't get property.\n"); 386 if (pmd_none(*pmdp)) {
389 prom_halt(); 387 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
388 BASE_PAGE_SIZE,
389 bootmap_base);
390 if (ptep == NULL)
391 early_pgtable_allocfail("pte");
392 memset(ptep, 0, BASE_PAGE_SIZE);
393 pmd_set(pmdp, ptep);
394 }
395 ptep = (pte_t *)__pmd_page(*pmdp) +
396 ((vaddr >> 13) & 0x3ff);
397
398 val = data;
399
400 /* Clear diag TTE bits. */
401 if (tlb_type == spitfire)
402 val &= ~0x0003fe0000000000UL;
403
404 set_pte_at(&init_mm, vaddr,
405 ptep, __pte(val | _PAGE_MODIFIED));
406 data += BASE_PAGE_SIZE;
390 } 407 }
391 n = n / sizeof(struct linux_prom_translation); 408}
392 409
393 /* The obp translations are saved based on 8k pagesize, since obp
394 * can use a mixture of pagesizes. Misses to the 0xf0000000 ->
395 * 0x100000000, ie obp range, are handled in entry.S and do not
396 * use the vpte scheme (see rant: inherit_locked_prom_mappings).
397 */
398#define OBP_PMD_SIZE 2048 410#define OBP_PMD_SIZE 2048
399 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base); 411static void build_obp_pgtable(int prom_trans_ents)
412{
413 int i;
414
415 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE,
416 bootmap_base);
400 if (prompmd == NULL) 417 if (prompmd == NULL)
401 early_pgtable_allocfail("pmd"); 418 early_pgtable_allocfail("pmd");
402 memset(prompmd, 0, OBP_PMD_SIZE); 419 memset(prompmd, 0, OBP_PMD_SIZE);
403 for (i = 0; i < n; i++) { 420 for (i = 0; i < prom_trans_ents; i++) {
404 unsigned long vaddr; 421 unsigned long start, end;
405
406 if (prom_trans[i].virt >= LOW_OBP_ADDRESS && prom_trans[i].virt < HI_OBP_ADDRESS) {
407 for (vaddr = prom_trans[i].virt;
408 ((vaddr < prom_trans[i].virt + prom_trans[i].size) &&
409 (vaddr < HI_OBP_ADDRESS));
410 vaddr += BASE_PAGE_SIZE) {
411 unsigned long val;
412
413 pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
414 if (pmd_none(*pmdp)) {
415 ptep = __alloc_bootmem(BASE_PAGE_SIZE,
416 BASE_PAGE_SIZE,
417 bootmap_base);
418 if (ptep == NULL)
419 early_pgtable_allocfail("pte");
420 memset(ptep, 0, BASE_PAGE_SIZE);
421 pmd_set(pmdp, ptep);
422 }
423 ptep = (pte_t *)__pmd_page(*pmdp) +
424 ((vaddr >> 13) & 0x3ff);
425 422
426 val = prom_trans[i].data; 423 if (!in_obp_range(prom_trans[i].virt))
424 continue;
427 425
428 /* Clear diag TTE bits. */ 426 start = prom_trans[i].virt;
429 if (tlb_type == spitfire) 427 end = start + prom_trans[i].size;
430 val &= ~0x0003fe0000000000UL; 428 if (end > HI_OBP_ADDRESS)
429 end = HI_OBP_ADDRESS;
431 430
432 set_pte_at(&init_mm, vaddr, 431 build_obp_range(start, end, prom_trans[i].data);
433 ptep, __pte(val | _PAGE_MODIFIED));
434 prom_trans[i].data += BASE_PAGE_SIZE;
435 }
436 }
437 } 432 }
438 prom_pmd_phys = __pa(prompmd); 433 prom_pmd_phys = __pa(prompmd);
434}
439 435
440 /* Now fixup OBP's idea about where we really are mapped. */ 436/* Read OBP translations property into 'prom_trans[]'.
441 prom_printf("Remapping the kernel... "); 437 * Return the number of entries.
438 */
439static int read_obp_translations(void)
440{
441 int n, node;
442 442
443 node = prom_finddevice("/virtual-memory");
444 n = prom_getproplen(node, "translations");
445 if (unlikely(n == 0 || n == -1)) {
446 prom_printf("prom_mappings: Couldn't get size.\n");
447 prom_halt();
448 }
449 if (unlikely(n > sizeof(prom_trans))) {
450 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
451 prom_halt();
452 }
453
454 if ((n = prom_getproperty(node, "translations",
455 (char *)&prom_trans[0],
456 sizeof(prom_trans))) == -1) {
457 prom_printf("prom_mappings: Couldn't get property.\n");
458 prom_halt();
459 }
460 n = n / sizeof(struct linux_prom_translation);
461 return n;
462}
463
464static inline void early_spitfire_errata32(void)
465{
443 /* Spitfire Errata #32 workaround */ 466 /* Spitfire Errata #32 workaround */
444 /* NOTE: Using plain zero for the context value is 467 /* NOTE: Using plain zero for the context value is
445 * correct here, we are not using the Linux trap 468 * correct here, we are not using the Linux trap
@@ -449,23 +472,13 @@ static void inherit_prom_mappings(void)
449 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 472 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
450 "flush %%g6" 473 "flush %%g6"
451 : /* No outputs */ 474 : /* No outputs */
452 : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 475 : "r" (0), "r" (PRIMARY_CONTEXT),
453 476 "i" (ASI_DMMU));
454 switch (tlb_type) { 477}
455 default:
456 case spitfire:
457 phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
458 break;
459
460 case cheetah:
461 case cheetah_plus:
462 phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
463 break;
464 };
465 478
466 phys_page &= _PAGE_PADDR; 479static void lock_remap_func_page(unsigned long phys_page)
467 phys_page += ((unsigned long)&prom_boot_page - 480{
468 (unsigned long)KERNBASE); 481 unsigned long tte_data = (phys_page | pgprot_val(PAGE_KERNEL));
469 482
470 if (tlb_type == spitfire) { 483 if (tlb_type == spitfire) {
471 /* Lock this into i/d tlb entry 59 */ 484 /* Lock this into i/d tlb entry 59 */
@@ -478,13 +491,12 @@ static void inherit_prom_mappings(void)
478 "stxa %0, [%1] %6\n\t" 491 "stxa %0, [%1] %6\n\t"
479 "membar #Sync\n\t" 492 "membar #Sync\n\t"
480 "flush %%g6" 493 "flush %%g6"
481 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | 494 : /* no outputs */
482 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), 495 : "r" (tte_data), "r" (59 << 3), "r" (TLB_TAG_ACCESS),
483 "r" (59 << 3), "r" (TLB_TAG_ACCESS), 496 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
484 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), 497 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
485 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) 498 : "memory");
486 : "memory"); 499 } else {
487 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
488 /* Lock this into i/d tlb-0 entry 11 */ 500 /* Lock this into i/d tlb-0 entry 11 */
489 __asm__ __volatile__( 501 __asm__ __volatile__(
490 "stxa %%g0, [%2] %3\n\t" 502 "stxa %%g0, [%2] %3\n\t"
@@ -495,87 +507,80 @@ static void inherit_prom_mappings(void)
495 "stxa %0, [%1] %6\n\t" 507 "stxa %0, [%1] %6\n\t"
496 "membar #Sync\n\t" 508 "membar #Sync\n\t"
497 "flush %%g6" 509 "flush %%g6"
498 : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | 510 : /* no outputs */
499 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), 511 : "r" (tte_data), "r" ((0 << 16) | (11 << 3)),
500 "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS), 512 "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU),
501 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), 513 "i" (ASI_DTLB_DATA_ACCESS), "i" (ASI_IMMU),
502 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) 514 "i" (ASI_ITLB_DATA_ACCESS)
503 : "memory"); 515 : "memory");
504 } else {
505 /* Implement me :-) */
506 BUG();
507 } 516 }
517}
518
519static void remap_kernel(void)
520{
521 unsigned long phys_page, tte_vaddr, tte_data;
522 void (*remap_func)(unsigned long, unsigned long, int);
523 int tlb_ent = sparc64_highest_locked_tlbent();
524
525 early_spitfire_errata32();
526
527 if (tlb_type == spitfire)
528 phys_page = spitfire_get_dtlb_data(tlb_ent);
529 else
530 phys_page = cheetah_get_ldtlb_data(tlb_ent);
531
532 phys_page &= _PAGE_PADDR;
533 phys_page += ((unsigned long)&prom_boot_page -
534 (unsigned long)KERNBASE);
535
536 lock_remap_func_page(phys_page);
508 537
509 tte_vaddr = (unsigned long) KERNBASE; 538 tte_vaddr = (unsigned long) KERNBASE;
510 539
511 /* Spitfire Errata #32 workaround */ 540 early_spitfire_errata32();
512 /* NOTE: Using plain zero for the context value is
513 * correct here, we are not using the Linux trap
514 * tables yet so we should not use the special
515 * UltraSPARC-III+ page size encodings yet.
516 */
517 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
518 "flush %%g6"
519 : /* No outputs */
520 : "r" (0),
521 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
522 541
523 if (tlb_type == spitfire) 542 if (tlb_type == spitfire)
524 tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); 543 tte_data = spitfire_get_dtlb_data(tlb_ent);
525 else 544 else
526 tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent()); 545 tte_data = cheetah_get_ldtlb_data(tlb_ent);
527 546
528 kern_locked_tte_data = tte_data; 547 kern_locked_tte_data = tte_data;
529 548
530 remap_func = (void *) ((unsigned long) &prom_remap - 549 remap_func = (void *) ((unsigned long) &prom_remap -
531 (unsigned long) &prom_boot_page); 550 (unsigned long) &prom_boot_page);
532 551
552 early_spitfire_errata32();
533 553
534 /* Spitfire Errata #32 workaround */ 554 phys_page = tte_data & _PAGE_PADDR;
535 /* NOTE: Using plain zero for the context value is 555 remap_func(phys_page, KERNBASE, prom_get_mmu_ihandle());
536 * correct here, we are not using the Linux trap
537 * tables yet so we should not use the special
538 * UltraSPARC-III+ page size encodings yet.
539 */
540 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
541 "flush %%g6"
542 : /* No outputs */
543 : "r" (0),
544 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
545
546 remap_func((tlb_type == spitfire ?
547 (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
548 (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
549 (unsigned long) KERNBASE,
550 prom_get_mmu_ihandle());
551
552 if (bigkernel) 556 if (bigkernel)
553 remap_func(((tte_data + 0x400000) & _PAGE_PADDR), 557 remap_func(phys_page + 0x400000,
554 (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle()); 558 KERNBASE + 0x400000,
559 prom_get_mmu_ihandle());
555 560
556 /* Flush out that temporary mapping. */ 561 /* Flush out that temporary mapping. */
557 spitfire_flush_dtlb_nucleus_page(0x0); 562 spitfire_flush_dtlb_nucleus_page(0x0);
558 spitfire_flush_itlb_nucleus_page(0x0); 563 spitfire_flush_itlb_nucleus_page(0x0);
559 564
560 /* Now lock us back into the TLBs via OBP. */ 565 /* Now lock us back into the TLBs via OBP. */
561 prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); 566 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
562 prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); 567 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
563 if (bigkernel) { 568 if (bigkernel) {
564 prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 569 prom_dtlb_load(tlb_ent - 1,
565 tte_vaddr + 0x400000); 570 tte_data + 0x400000,
566 prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 571 tte_vaddr + 0x400000);
567 tte_vaddr + 0x400000); 572 prom_itlb_load(tlb_ent - 1,
573 tte_data + 0x400000,
574 tte_vaddr + 0x400000);
568 } 575 }
576}
569 577
570 /* Re-read translations property. */ 578static void readjust_prom_translations(void)
571 if ((n = prom_getproperty(node, "translations", 579{
572 (char *)&prom_trans[0], tsz)) == -1) { 580 int nents, i;
573 prom_printf("prom_mappings: Can't reread prom_trans.\n");
574 prom_halt();
575 }
576 n = n / sizeof(struct linux_prom_translation);
577 581
578 for (i = 0; i < n; i++) { 582 nents = read_obp_translations();
583 for (i = 0; i < nents; i++) {
579 unsigned long vaddr = prom_trans[i].virt; 584 unsigned long vaddr = prom_trans[i].virt;
580 unsigned long size = prom_trans[i].size; 585 unsigned long size = prom_trans[i].size;
581 586
@@ -601,6 +606,20 @@ static void inherit_prom_mappings(void)
601 } 606 }
602 } 607 }
603 } 608 }
609}
610
611static void inherit_prom_mappings(void)
612{
613 int n;
614
615 n = read_obp_translations();
616 build_obp_pgtable(n);
617
618 /* Now fixup OBP's idea about where we really are mapped. */
619 prom_printf("Remapping the kernel... ");
620 remap_kernel();
621
622 readjust_prom_translations();
604 623
605 prom_printf("done.\n"); 624 prom_printf("done.\n");
606 625