aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/mm/srmmu.c132
1 files changed, 64 insertions, 68 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 533143be9711..4b00f6982a97 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -136,8 +136,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
136 } 136 }
137} 137}
138 138
139/* Find an entry in the third-level page table.. */ 139/* Find an entry in the third-level page table.. */
140pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address) 140pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
141{ 141{
142 void *pte; 142 void *pte;
143 143
@@ -166,7 +166,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
166 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); 166 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
167 167
168 offset = bit_map_string_get(&srmmu_nocache_map, 168 offset = bit_map_string_get(&srmmu_nocache_map,
169 size >> SRMMU_NOCACHE_BITMAP_SHIFT, 169 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
170 align >> SRMMU_NOCACHE_BITMAP_SHIFT); 170 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
171 if (offset == -1) { 171 if (offset == -1) {
172 printk("srmmu: out of nocache %d: %d/%d\n", 172 printk("srmmu: out of nocache %d: %d/%d\n",
@@ -199,7 +199,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
199 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); 199 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
200 BUG(); 200 BUG();
201 } 201 }
202 if (vaddr+size > srmmu_nocache_end) { 202 if (vaddr + size > srmmu_nocache_end) {
203 printk("Vaddr %lx is bigger than nocache end 0x%lx\n", 203 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
204 vaddr, srmmu_nocache_end); 204 vaddr, srmmu_nocache_end);
205 BUG(); 205 BUG();
@@ -212,7 +212,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
212 printk("Size 0x%x is too small\n", size); 212 printk("Size 0x%x is too small\n", size);
213 BUG(); 213 BUG();
214 } 214 }
215 if (vaddr & (size-1)) { 215 if (vaddr & (size - 1)) {
216 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); 216 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
217 BUG(); 217 BUG();
218 } 218 }
@@ -330,7 +330,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
330 330
331 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) 331 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
332 return NULL; 332 return NULL;
333 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); 333 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
334 pgtable_page_ctor(page); 334 pgtable_page_ctor(page);
335 return page; 335 return page;
336} 336}
@@ -355,7 +355,7 @@ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
355 struct ctx_list *ctxp; 355 struct ctx_list *ctxp;
356 356
357 ctxp = ctx_free.next; 357 ctxp = ctx_free.next;
358 if(ctxp != &ctx_free) { 358 if (ctxp != &ctx_free) {
359 remove_from_ctx_list(ctxp); 359 remove_from_ctx_list(ctxp);
360 add_to_used_ctxlist(ctxp); 360 add_to_used_ctxlist(ctxp);
361 mm->context = ctxp->ctx_number; 361 mm->context = ctxp->ctx_number;
@@ -363,9 +363,9 @@ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
363 return; 363 return;
364 } 364 }
365 ctxp = ctx_used.next; 365 ctxp = ctx_used.next;
366 if(ctxp->ctx_mm == old_mm) 366 if (ctxp->ctx_mm == old_mm)
367 ctxp = ctxp->next; 367 ctxp = ctxp->next;
368 if(ctxp == &ctx_used) 368 if (ctxp == &ctx_used)
369 panic("out of mmu contexts"); 369 panic("out of mmu contexts");
370 flush_cache_mm(ctxp->ctx_mm); 370 flush_cache_mm(ctxp->ctx_mm);
371 flush_tlb_mm(ctxp->ctx_mm); 371 flush_tlb_mm(ctxp->ctx_mm);
@@ -389,7 +389,7 @@ static inline void free_context(int context)
389void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, 389void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
390 struct task_struct *tsk) 390 struct task_struct *tsk)
391{ 391{
392 if(mm->context == NO_CONTEXT) { 392 if (mm->context == NO_CONTEXT) {
393 spin_lock(&srmmu_context_spinlock); 393 spin_lock(&srmmu_context_spinlock);
394 alloc_context(old_mm, mm); 394 alloc_context(old_mm, mm);
395 spin_unlock(&srmmu_context_spinlock); 395 spin_unlock(&srmmu_context_spinlock);
@@ -407,7 +407,7 @@ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
407 407
408/* Low level IO area allocation on the SRMMU. */ 408/* Low level IO area allocation on the SRMMU. */
409static inline void srmmu_mapioaddr(unsigned long physaddr, 409static inline void srmmu_mapioaddr(unsigned long physaddr,
410 unsigned long virt_addr, int bus_type) 410 unsigned long virt_addr, int bus_type)
411{ 411{
412 pgd_t *pgdp; 412 pgd_t *pgdp;
413 pmd_t *pmdp; 413 pmd_t *pmdp;
@@ -420,8 +420,7 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
420 ptep = pte_offset_kernel(pmdp, virt_addr); 420 ptep = pte_offset_kernel(pmdp, virt_addr);
421 tmp = (physaddr >> 4) | SRMMU_ET_PTE; 421 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
422 422
423 /* 423 /* I need to test whether this is consistent over all
424 * I need to test whether this is consistent over all
425 * sun4m's. The bus_type represents the upper 4 bits of 424 * sun4m's. The bus_type represents the upper 4 bits of
426 * 36-bit physical address on the I/O space lines... 425 * 36-bit physical address on the I/O space lines...
427 */ 426 */
@@ -591,7 +590,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
591 pmd_t *pmdp; 590 pmd_t *pmdp;
592 pte_t *ptep; 591 pte_t *ptep;
593 592
594 while(start < end) { 593 while (start < end) {
595 pgdp = pgd_offset_k(start); 594 pgdp = pgd_offset_k(start);
596 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 595 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
597 pmdp = (pmd_t *) __srmmu_get_nocache( 596 pmdp = (pmd_t *) __srmmu_get_nocache(
@@ -602,7 +601,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
602 pgd_set(__nocache_fix(pgdp), pmdp); 601 pgd_set(__nocache_fix(pgdp), pmdp);
603 } 602 }
604 pmdp = pmd_offset(__nocache_fix(pgdp), start); 603 pmdp = pmd_offset(__nocache_fix(pgdp), start);
605 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 604 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
606 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 605 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
607 if (ptep == NULL) 606 if (ptep == NULL)
608 early_pgtable_allocfail("pte"); 607 early_pgtable_allocfail("pte");
@@ -622,7 +621,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
622 pmd_t *pmdp; 621 pmd_t *pmdp;
623 pte_t *ptep; 622 pte_t *ptep;
624 623
625 while(start < end) { 624 while (start < end) {
626 pgdp = pgd_offset_k(start); 625 pgdp = pgd_offset_k(start);
627 if (pgd_none(*pgdp)) { 626 if (pgd_none(*pgdp)) {
628 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 627 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
@@ -632,7 +631,7 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
632 pgd_set(pgdp, pmdp); 631 pgd_set(pgdp, pmdp);
633 } 632 }
634 pmdp = pmd_offset(pgdp, start); 633 pmdp = pmd_offset(pgdp, start);
635 if(srmmu_pmd_none(*pmdp)) { 634 if (srmmu_pmd_none(*pmdp)) {
636 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 635 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
637 PTE_SIZE); 636 PTE_SIZE);
638 if (ptep == NULL) 637 if (ptep == NULL)
@@ -677,32 +676,32 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
677 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ 676 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
678 unsigned long prompte; 677 unsigned long prompte;
679 678
680 while(start <= end) { 679 while (start <= end) {
681 if (start == 0) 680 if (start == 0)
682 break; /* probably wrap around */ 681 break; /* probably wrap around */
683 if(start == 0xfef00000) 682 if (start == 0xfef00000)
684 start = KADB_DEBUGGER_BEGVM; 683 start = KADB_DEBUGGER_BEGVM;
685 if(!(prompte = srmmu_probe(start))) { 684 if (!(prompte = srmmu_probe(start))) {
686 start += PAGE_SIZE; 685 start += PAGE_SIZE;
687 continue; 686 continue;
688 } 687 }
689 688
690 /* A red snapper, see what it really is. */ 689 /* A red snapper, see what it really is. */
691 what = 0; 690 what = 0;
692 691
693 if(!(start & ~(SRMMU_REAL_PMD_MASK))) { 692 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
694 if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) 693 if (srmmu_probe((start - PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
695 what = 1; 694 what = 1;
696 } 695 }
697 696
698 if(!(start & ~(SRMMU_PGDIR_MASK))) { 697 if (!(start & ~(SRMMU_PGDIR_MASK))) {
699 if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == 698 if (srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
700 prompte) 699 prompte)
701 what = 2; 700 what = 2;
702 } 701 }
703 702
704 pgdp = pgd_offset_k(start); 703 pgdp = pgd_offset_k(start);
705 if(what == 2) { 704 if (what == 2) {
706 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); 705 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
707 start += SRMMU_PGDIR_SIZE; 706 start += SRMMU_PGDIR_SIZE;
708 continue; 707 continue;
@@ -715,17 +714,15 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
715 pgd_set(__nocache_fix(pgdp), pmdp); 714 pgd_set(__nocache_fix(pgdp), pmdp);
716 } 715 }
717 pmdp = pmd_offset(__nocache_fix(pgdp), start); 716 pmdp = pmd_offset(__nocache_fix(pgdp), start);
718 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 717 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
719 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 718 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
720 PTE_SIZE);
721 if (ptep == NULL) 719 if (ptep == NULL)
722 early_pgtable_allocfail("pte"); 720 early_pgtable_allocfail("pte");
723 memset(__nocache_fix(ptep), 0, PTE_SIZE); 721 memset(__nocache_fix(ptep), 0, PTE_SIZE);
724 pmd_set(__nocache_fix(pmdp), ptep); 722 pmd_set(__nocache_fix(pmdp), ptep);
725 } 723 }
726 if(what == 1) { 724 if (what == 1) {
727 /* 725 /* We bend the rule where all 16 PTPs in a pmd_t point
728 * We bend the rule where all 16 PTPs in a pmd_t point
729 * inside the same PTE page, and we leak a perfectly 726 * inside the same PTE page, and we leak a perfectly
730 * good hardware PTE piece. Alternatives seem worse. 727 * good hardware PTE piece. Alternatives seem worse.
731 */ 728 */
@@ -765,11 +762,11 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
765 762
766 if (vstart < min_vaddr || vstart >= max_vaddr) 763 if (vstart < min_vaddr || vstart >= max_vaddr)
767 return vstart; 764 return vstart;
768 765
769 if (vend > max_vaddr || vend < min_vaddr) 766 if (vend > max_vaddr || vend < min_vaddr)
770 vend = max_vaddr; 767 vend = max_vaddr;
771 768
772 while(vstart < vend) { 769 while (vstart < vend) {
773 do_large_mapping(vstart, pstart); 770 do_large_mapping(vstart, pstart);
774 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; 771 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
775 } 772 }
@@ -814,9 +811,9 @@ void __init srmmu_paging_init(void)
814 /* Find the number of contexts on the srmmu. */ 811 /* Find the number of contexts on the srmmu. */
815 cpunode = prom_getchild(prom_root_node); 812 cpunode = prom_getchild(prom_root_node);
816 num_contexts = 0; 813 num_contexts = 0;
817 while(cpunode != 0) { 814 while (cpunode != 0) {
818 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); 815 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
819 if(!strcmp(node_str, "cpu")) { 816 if (!strcmp(node_str, "cpu")) {
820 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); 817 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
821 break; 818 break;
822 } 819 }
@@ -824,7 +821,7 @@ void __init srmmu_paging_init(void)
824 } 821 }
825 } 822 }
826 823
827 if(!num_contexts) { 824 if (!num_contexts) {
828 prom_printf("Something wrong, can't find cpu node in paging_init.\n"); 825 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
829 prom_halt(); 826 prom_halt();
830 } 827 }
@@ -834,14 +831,14 @@ void __init srmmu_paging_init(void)
834 831
835 srmmu_nocache_calcsize(); 832 srmmu_nocache_calcsize();
836 srmmu_nocache_init(); 833 srmmu_nocache_init();
837 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); 834 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM-PAGE_SIZE));
838 map_kernel(); 835 map_kernel();
839 836
840 /* ctx table has to be physically aligned to its size */ 837 /* ctx table has to be physically aligned to its size */
841 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); 838 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
842 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); 839 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
843 840
844 for(i = 0; i < num_contexts; i++) 841 for (i = 0; i < num_contexts; i++)
845 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); 842 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
846 843
847 flush_cache_all(); 844 flush_cache_all();
@@ -897,7 +894,7 @@ void __init srmmu_paging_init(void)
897 894
898void mmu_info(struct seq_file *m) 895void mmu_info(struct seq_file *m)
899{ 896{
900 seq_printf(m, 897 seq_printf(m,
901 "MMU type\t: %s\n" 898 "MMU type\t: %s\n"
902 "contexts\t: %d\n" 899 "contexts\t: %d\n"
903 "nocache total\t: %ld\n" 900 "nocache total\t: %ld\n"
@@ -911,7 +908,7 @@ void mmu_info(struct seq_file *m)
911void destroy_context(struct mm_struct *mm) 908void destroy_context(struct mm_struct *mm)
912{ 909{
913 910
914 if(mm->context != NO_CONTEXT) { 911 if (mm->context != NO_CONTEXT) {
915 flush_cache_mm(mm); 912 flush_cache_mm(mm);
916 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); 913 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
917 flush_tlb_mm(mm); 914 flush_tlb_mm(mm);
@@ -941,13 +938,12 @@ static void __init init_vac_layout(void)
941#endif 938#endif
942 939
943 nd = prom_getchild(prom_root_node); 940 nd = prom_getchild(prom_root_node);
944 while((nd = prom_getsibling(nd)) != 0) { 941 while ((nd = prom_getsibling(nd)) != 0) {
945 prom_getstring(nd, "device_type", node_str, sizeof(node_str)); 942 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
946 if(!strcmp(node_str, "cpu")) { 943 if (!strcmp(node_str, "cpu")) {
947 vac_line_size = prom_getint(nd, "cache-line-size"); 944 vac_line_size = prom_getint(nd, "cache-line-size");
948 if (vac_line_size == -1) { 945 if (vac_line_size == -1) {
949 prom_printf("can't determine cache-line-size, " 946 prom_printf("can't determine cache-line-size, halting.\n");
950 "halting.\n");
951 prom_halt(); 947 prom_halt();
952 } 948 }
953 cache_lines = prom_getint(nd, "cache-nlines"); 949 cache_lines = prom_getint(nd, "cache-nlines");
@@ -958,9 +954,9 @@ static void __init init_vac_layout(void)
958 954
959 vac_cache_size = cache_lines * vac_line_size; 955 vac_cache_size = cache_lines * vac_line_size;
960#ifdef CONFIG_SMP 956#ifdef CONFIG_SMP
961 if(vac_cache_size > max_size) 957 if (vac_cache_size > max_size)
962 max_size = vac_cache_size; 958 max_size = vac_cache_size;
963 if(vac_line_size < min_line_size) 959 if (vac_line_size < min_line_size)
964 min_line_size = vac_line_size; 960 min_line_size = vac_line_size;
965 //FIXME: cpus not contiguous!! 961 //FIXME: cpus not contiguous!!
966 cpu++; 962 cpu++;
@@ -971,7 +967,7 @@ static void __init init_vac_layout(void)
971#endif 967#endif
972 } 968 }
973 } 969 }
974 if(nd == 0) { 970 if (nd == 0) {
975 prom_printf("No CPU nodes found, halting.\n"); 971 prom_printf("No CPU nodes found, halting.\n");
976 prom_halt(); 972 prom_halt();
977 } 973 }
@@ -1082,7 +1078,7 @@ static void __init init_swift(void)
1082 "=r" (swift_rev) : 1078 "=r" (swift_rev) :
1083 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); 1079 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1084 srmmu_name = "Fujitsu Swift"; 1080 srmmu_name = "Fujitsu Swift";
1085 switch(swift_rev) { 1081 switch (swift_rev) {
1086 case 0x11: 1082 case 0x11:
1087 case 0x20: 1083 case 0x20:
1088 case 0x23: 1084 case 0x23:
@@ -1222,10 +1218,11 @@ static void __cpuinit poke_turbosparc(void)
1222 1218
1223 /* Clear any crap from the cache or else... */ 1219 /* Clear any crap from the cache or else... */
1224 turbosparc_flush_cache_all(); 1220 turbosparc_flush_cache_all();
1225 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ 1221 /* Temporarily disable I & D caches */
1222 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1226 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ 1223 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1227 srmmu_set_mmureg(mreg); 1224 srmmu_set_mmureg(mreg);
1228 1225
1229 ccreg = turbosparc_get_ccreg(); 1226 ccreg = turbosparc_get_ccreg();
1230 1227
1231#ifdef TURBOSPARC_WRITEBACK 1228#ifdef TURBOSPARC_WRITEBACK
@@ -1248,7 +1245,7 @@ static void __cpuinit poke_turbosparc(void)
1248 default: 1245 default:
1249 ccreg |= (TURBOSPARC_SCENABLE); 1246 ccreg |= (TURBOSPARC_SCENABLE);
1250 } 1247 }
1251 turbosparc_set_ccreg (ccreg); 1248 turbosparc_set_ccreg(ccreg);
1252 1249
1253 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ 1250 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1254 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ 1251 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
@@ -1342,7 +1339,7 @@ static void __cpuinit poke_viking(void)
1342 unsigned long bpreg; 1339 unsigned long bpreg;
1343 1340
1344 mreg &= ~(VIKING_TCENABLE); 1341 mreg &= ~(VIKING_TCENABLE);
1345 if(smp_catch++) { 1342 if (smp_catch++) {
1346 /* Must disable mixed-cmd mode here for other cpu's. */ 1343 /* Must disable mixed-cmd mode here for other cpu's. */
1347 bpreg = viking_get_bpreg(); 1344 bpreg = viking_get_bpreg();
1348 bpreg &= ~(VIKING_ACTION_MIX); 1345 bpreg &= ~(VIKING_ACTION_MIX);
@@ -1411,7 +1408,7 @@ static void __init init_viking(void)
1411 unsigned long mreg = srmmu_get_mmureg(); 1408 unsigned long mreg = srmmu_get_mmureg();
1412 1409
1413 /* Ahhh, the viking. SRMMU VLSI abortion number two... */ 1410 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1414 if(mreg & VIKING_MMODE) { 1411 if (mreg & VIKING_MMODE) {
1415 srmmu_name = "TI Viking"; 1412 srmmu_name = "TI Viking";
1416 viking_mxcc_present = 0; 1413 viking_mxcc_present = 0;
1417 msi_set_sync(); 1414 msi_set_sync();
@@ -1467,8 +1464,8 @@ static void __init get_srmmu_type(void)
1467 } 1464 }
1468 1465
1469 /* Second, check for HyperSparc or Cypress. */ 1466 /* Second, check for HyperSparc or Cypress. */
1470 if(mod_typ == 1) { 1467 if (mod_typ == 1) {
1471 switch(mod_rev) { 1468 switch (mod_rev) {
1472 case 7: 1469 case 7:
1473 /* UP or MP Hypersparc */ 1470 /* UP or MP Hypersparc */
1474 init_hypersparc(); 1471 init_hypersparc();
@@ -1488,9 +1485,8 @@ static void __init get_srmmu_type(void)
1488 } 1485 }
1489 return; 1486 return;
1490 } 1487 }
1491 1488
1492 /* 1489 /* Now Fujitsu TurboSparc. It might happen that it is
1493 * Now Fujitsu TurboSparc. It might happen that it is
1494 * in Swift emulation mode, so we will check later... 1490 * in Swift emulation mode, so we will check later...
1495 */ 1491 */
1496 if (psr_typ == 0 && psr_vers == 5) { 1492 if (psr_typ == 0 && psr_vers == 5) {
@@ -1499,15 +1495,15 @@ static void __init get_srmmu_type(void)
1499 } 1495 }
1500 1496
1501 /* Next check for Fujitsu Swift. */ 1497 /* Next check for Fujitsu Swift. */
1502 if(psr_typ == 0 && psr_vers == 4) { 1498 if (psr_typ == 0 && psr_vers == 4) {
1503 phandle cpunode; 1499 phandle cpunode;
1504 char node_str[128]; 1500 char node_str[128];
1505 1501
1506 /* Look if it is not a TurboSparc emulating Swift... */ 1502 /* Look if it is not a TurboSparc emulating Swift... */
1507 cpunode = prom_getchild(prom_root_node); 1503 cpunode = prom_getchild(prom_root_node);
1508 while((cpunode = prom_getsibling(cpunode)) != 0) { 1504 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1509 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); 1505 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1510 if(!strcmp(node_str, "cpu")) { 1506 if (!strcmp(node_str, "cpu")) {
1511 if (!prom_getintdefault(cpunode, "psr-implementation", 1) && 1507 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1512 prom_getintdefault(cpunode, "psr-version", 1) == 5) { 1508 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1513 init_turbosparc(); 1509 init_turbosparc();
@@ -1516,13 +1512,13 @@ static void __init get_srmmu_type(void)
1516 break; 1512 break;
1517 } 1513 }
1518 } 1514 }
1519 1515
1520 init_swift(); 1516 init_swift();
1521 return; 1517 return;
1522 } 1518 }
1523 1519
1524 /* Now the Viking family of srmmu. */ 1520 /* Now the Viking family of srmmu. */
1525 if(psr_typ == 4 && 1521 if (psr_typ == 4 &&
1526 ((psr_vers == 0) || 1522 ((psr_vers == 0) ||
1527 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { 1523 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1528 init_viking(); 1524 init_viking();
@@ -1530,7 +1526,7 @@ static void __init get_srmmu_type(void)
1530 } 1526 }
1531 1527
1532 /* Finally the Tsunami. */ 1528 /* Finally the Tsunami. */
1533 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { 1529 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1534 init_tsunami(); 1530 init_tsunami();
1535 return; 1531 return;
1536 } 1532 }