aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-22 23:11:33 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-22 23:11:33 -0400
commitbff06d552240ba7f5b49482a4865871d7bc03dc2 (patch)
treeee760e252023bec338921296b12bb54987bedcac /arch/sparc64/mm
parent40fd3533c93f0062b6d1d8540961ef70fc8ab750 (diff)
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel to the correct location, use portable firmware calls to do this instead. What we do now is the following in position independant assembler: chosen_node = prom_finddevice("/chosen"); prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu"); vaddr = 4MB_ALIGN(current_text_addr()); prom_translate(vaddr, &paddr_high, &paddr_low, &mode); prom_boot_mapping_mode = mode; prom_boot_mapping_phys_high = paddr_high; prom_boot_mapping_phys_low = paddr_low; prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low); and that replaces the massive amount of by-hand TLB probing and programming we used to do here. The new code should also handle properly the case where the kernel is mapped at the correct address already (think: future kexec support). Consequently, the bulk of remap_kernel() dies as does the entirety of arch/sparc64/prom/map.S We try to share some strings in the PROM library with the ones used at bootup, and while we're here mark input strings to oplib.h routines with "const" when appropriate. There are many more simplifications now possible. For one thing, we can consolidate the two copies we now have of a lot of cpu setup code sitting in head.S and trampoline.S. This is a significant step towards CONFIG_DEBUG_PAGEALLOC support. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c98
1 files changed, 5 insertions, 93 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index ec47de494c1f..e0b9eebf21ce 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -505,108 +505,20 @@ static int read_obp_translations(void)
505 return n; 505 return n;
506} 506}
507 507
508static inline void early_spitfire_errata32(void)
509{
510 /* Spitfire Errata #32 workaround */
511 /* NOTE: Using plain zero for the context value is
512 * correct here, we are not using the Linux trap
513 * tables yet so we should not use the special
514 * UltraSPARC-III+ page size encodings yet.
515 */
516 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
517 "flush %%g6"
518 : /* No outputs */
519 : "r" (0), "r" (PRIMARY_CONTEXT),
520 "i" (ASI_DMMU));
521}
522
523static void lock_remap_func_page(unsigned long phys_page)
524{
525 unsigned long tte_data = (phys_page | pgprot_val(PAGE_KERNEL));
526
527 if (tlb_type == spitfire) {
528 /* Lock this into i/d tlb entry 59 */
529 __asm__ __volatile__(
530 "stxa %%g0, [%2] %3\n\t"
531 "stxa %0, [%1] %4\n\t"
532 "membar #Sync\n\t"
533 "flush %%g6\n\t"
534 "stxa %%g0, [%2] %5\n\t"
535 "stxa %0, [%1] %6\n\t"
536 "membar #Sync\n\t"
537 "flush %%g6"
538 : /* no outputs */
539 : "r" (tte_data), "r" (59 << 3), "r" (TLB_TAG_ACCESS),
540 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
541 "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
542 : "memory");
543 } else {
544 /* Lock this into i/d tlb-0 entry 11 */
545 __asm__ __volatile__(
546 "stxa %%g0, [%2] %3\n\t"
547 "stxa %0, [%1] %4\n\t"
548 "membar #Sync\n\t"
549 "flush %%g6\n\t"
550 "stxa %%g0, [%2] %5\n\t"
551 "stxa %0, [%1] %6\n\t"
552 "membar #Sync\n\t"
553 "flush %%g6"
554 : /* no outputs */
555 : "r" (tte_data), "r" ((0 << 16) | (11 << 3)),
556 "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU),
557 "i" (ASI_DTLB_DATA_ACCESS), "i" (ASI_IMMU),
558 "i" (ASI_ITLB_DATA_ACCESS)
559 : "memory");
560 }
561}
562
563static void remap_kernel(void) 508static void remap_kernel(void)
564{ 509{
565 unsigned long phys_page, tte_vaddr, tte_data; 510 unsigned long phys_page, tte_vaddr, tte_data;
566 void (*remap_func)(unsigned long, unsigned long, int);
567 int tlb_ent = sparc64_highest_locked_tlbent(); 511 int tlb_ent = sparc64_highest_locked_tlbent();
568 512
569 early_spitfire_errata32();
570
571 if (tlb_type == spitfire)
572 phys_page = spitfire_get_dtlb_data(tlb_ent);
573 else
574 phys_page = cheetah_get_ldtlb_data(tlb_ent);
575
576 phys_page &= _PAGE_PADDR;
577 phys_page += ((unsigned long)&prom_boot_page -
578 (unsigned long)KERNBASE);
579
580 lock_remap_func_page(phys_page);
581
582 tte_vaddr = (unsigned long) KERNBASE; 513 tte_vaddr = (unsigned long) KERNBASE;
583 514 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
584 early_spitfire_errata32(); 515 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
585 516 _PAGE_CP | _PAGE_CV | _PAGE_P |
586 if (tlb_type == spitfire) 517 _PAGE_L | _PAGE_W));
587 tte_data = spitfire_get_dtlb_data(tlb_ent);
588 else
589 tte_data = cheetah_get_ldtlb_data(tlb_ent);
590 518
591 kern_locked_tte_data = tte_data; 519 kern_locked_tte_data = tte_data;
592 520
593 remap_func = (void *) ((unsigned long) &prom_remap - 521 /* Now lock us into the TLBs via OBP. */
594 (unsigned long) &prom_boot_page);
595
596 early_spitfire_errata32();
597
598 phys_page = tte_data & _PAGE_PADDR;
599 remap_func(phys_page, KERNBASE, prom_get_mmu_ihandle());
600 if (bigkernel)
601 remap_func(phys_page + 0x400000,
602 KERNBASE + 0x400000,
603 prom_get_mmu_ihandle());
604
605 /* Flush out that temporary mapping. */
606 spitfire_flush_dtlb_nucleus_page(0x0);
607 spitfire_flush_itlb_nucleus_page(0x0);
608
609 /* Now lock us back into the TLBs via OBP. */
610 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); 522 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
611 prom_itlb_load(tlb_ent, tte_data, tte_vaddr); 523 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
612 if (bigkernel) { 524 if (bigkernel) {