diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-03 19:46:45 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-03 19:46:45 -0400 |
commit | 2b235826098bb653982894dfc3f70fd029f6c2e4 (patch) | |
tree | ec525ddba74f58017a3f145bb68cf94376648c1c /arch/sparc64/kernel | |
parent | b4b52db71529bbe46da914eda772fb574914c94d (diff) | |
parent | c77054e518d9163578cfcad09826d7b959f95ece (diff) |
Merge branch 'master'
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/cpu.c | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/devices.c | 22 | ||||
-rw-r--r-- | arch/sparc64/kernel/dtlb_backend.S | 13 | ||||
-rw-r--r-- | arch/sparc64/kernel/dtlb_base.S | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 180 | ||||
-rw-r--r-- | arch/sparc64/kernel/head.S | 563 | ||||
-rw-r--r-- | arch/sparc64/kernel/ktlb.S | 198 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/ptrace.c | 14 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 50 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 21 | ||||
-rw-r--r-- | arch/sparc64/kernel/sys32.S | 170 | ||||
-rw-r--r-- | arch/sparc64/kernel/trampoline.S | 16 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 100 | ||||
-rw-r--r-- | arch/sparc64/kernel/una_asm.S | 65 | ||||
-rw-r--r-- | arch/sparc64/kernel/unaligned.c | 53 | ||||
-rw-r--r-- | arch/sparc64/kernel/us3_cpufreq.c | 5 | ||||
-rw-r--r-- | arch/sparc64/kernel/vmlinux.lds.S | 3 |
18 files changed, 668 insertions, 819 deletions
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 48756958116b..77ef5df4e5a7 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c | |||
@@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = { | |||
39 | { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, | 39 | { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, |
40 | { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, | 40 | { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, |
41 | { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, | 41 | { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, |
42 | { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"}, | ||
43 | { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"}, | ||
42 | }; | 44 | }; |
43 | 45 | ||
44 | #define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) | 46 | #define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) |
@@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = { | |||
53 | { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, | 55 | { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, |
54 | { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, | 56 | { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, |
55 | { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, | 57 | { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, |
58 | { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"}, | ||
59 | { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"}, | ||
56 | }; | 60 | }; |
57 | 61 | ||
58 | #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) | 62 | #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) |
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index d710274e516b..df9a1ca8fd77 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c | |||
@@ -135,6 +135,28 @@ void __init device_scan(void) | |||
135 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, | 135 | cpu_data(0).clock_tick = prom_getintdefault(cpu_node, |
136 | "clock-frequency", | 136 | "clock-frequency", |
137 | 0); | 137 | 0); |
138 | cpu_data(0).dcache_size = prom_getintdefault(cpu_node, | ||
139 | "dcache-size", | ||
140 | 16 * 1024); | ||
141 | cpu_data(0).dcache_line_size = | ||
142 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | ||
143 | cpu_data(0).icache_size = prom_getintdefault(cpu_node, | ||
144 | "icache-size", | ||
145 | 16 * 1024); | ||
146 | cpu_data(0).icache_line_size = | ||
147 | prom_getintdefault(cpu_node, "icache-line-size", 32); | ||
148 | cpu_data(0).ecache_size = prom_getintdefault(cpu_node, | ||
149 | "ecache-size", | ||
150 | 4 * 1024 * 1024); | ||
151 | cpu_data(0).ecache_line_size = | ||
152 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | ||
153 | printk("CPU[0]: Caches " | ||
154 | "D[sz(%d):line_sz(%d)] " | ||
155 | "I[sz(%d):line_sz(%d)] " | ||
156 | "E[sz(%d):line_sz(%d)]\n", | ||
157 | cpu_data(0).dcache_size, cpu_data(0).dcache_line_size, | ||
158 | cpu_data(0).icache_size, cpu_data(0).icache_line_size, | ||
159 | cpu_data(0).ecache_size, cpu_data(0).ecache_line_size); | ||
138 | } | 160 | } |
139 | #endif | 161 | #endif |
140 | 162 | ||
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S index 538522848ad4..acc889a7f9c1 100644 --- a/arch/sparc64/kernel/dtlb_backend.S +++ b/arch/sparc64/kernel/dtlb_backend.S | |||
@@ -9,17 +9,7 @@ | |||
9 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
10 | #include <asm/mmu.h> | 10 | #include <asm/mmu.h> |
11 | 11 | ||
12 | #if PAGE_SHIFT == 13 | 12 | #define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS) |
13 | #define SZ_BITS _PAGE_SZ8K | ||
14 | #elif PAGE_SHIFT == 16 | ||
15 | #define SZ_BITS _PAGE_SZ64K | ||
16 | #elif PAGE_SHIFT == 19 | ||
17 | #define SZ_BITS _PAGE_SZ512K | ||
18 | #elif PAGE_SHIFT == 22 | ||
19 | #define SZ_BITS _PAGE_SZ4MB | ||
20 | #endif | ||
21 | |||
22 | #define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) | ||
23 | 13 | ||
24 | #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) | 14 | #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) |
25 | #define VPTE_SHIFT (PAGE_SHIFT - 3) | 15 | #define VPTE_SHIFT (PAGE_SHIFT - 3) |
@@ -163,7 +153,6 @@ sparc64_vpte_continue: | |||
163 | stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS | 153 | stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS |
164 | retry ! Load PTE once again | 154 | retry ! Load PTE once again |
165 | 155 | ||
166 | #undef SZ_BITS | ||
167 | #undef VALID_SZ_BITS | 156 | #undef VALID_SZ_BITS |
168 | #undef VPTE_SHIFT | 157 | #undef VPTE_SHIFT |
169 | #undef VPTE_BITS | 158 | #undef VPTE_BITS |
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S index ded2fed23fcc..702d349c1e88 100644 --- a/arch/sparc64/kernel/dtlb_base.S +++ b/arch/sparc64/kernel/dtlb_base.S | |||
@@ -71,7 +71,7 @@ | |||
71 | from_tl1_trap: | 71 | from_tl1_trap: |
72 | rdpr %tl, %g5 ! For TL==3 test | 72 | rdpr %tl, %g5 ! For TL==3 test |
73 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset | 73 | CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset |
74 | be,pn %xcc, 3f ! Yep, special processing | 74 | be,pn %xcc, kvmap ! Yep, special processing |
75 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset | 75 | CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset |
76 | cmp %g5, 4 ! Last trap level? | 76 | cmp %g5, 4 ! Last trap level? |
77 | be,pn %xcc, longpath ! Yep, cannot risk VPTE miss | 77 | be,pn %xcc, longpath ! Yep, cannot risk VPTE miss |
@@ -83,9 +83,9 @@ from_tl1_trap: | |||
83 | nop ! Delay-slot | 83 | nop ! Delay-slot |
84 | 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | 84 | 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB |
85 | retry ! Trap return | 85 | retry ! Trap return |
86 | 3: brlz,pt %g4, 9b ! Kernel virtual map? | 86 | nop |
87 | xor %g2, %g4, %g5 ! Finish bit twiddles | 87 | nop |
88 | ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc | 88 | nop |
89 | 89 | ||
90 | /* DTLB ** ICACHE line 3: winfixups+real_faults */ | 90 | /* DTLB ** ICACHE line 3: winfixups+real_faults */ |
91 | longpath: | 91 | longpath: |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index b48349527853..2879b1072921 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -30,159 +30,6 @@ | |||
30 | .text | 30 | .text |
31 | .align 32 | 31 | .align 32 |
32 | 32 | ||
33 | .globl sparc64_vpte_patchme1 | ||
34 | .globl sparc64_vpte_patchme2 | ||
35 | /* | ||
36 | * On a second level vpte miss, check whether the original fault is to the OBP | ||
37 | * range (note that this is only possible for instruction miss, data misses to | ||
38 | * obp range do not use vpte). If so, go back directly to the faulting address. | ||
39 | * This is because we want to read the tpc, otherwise we have no way of knowing | ||
40 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | ||
41 | * also ensures no vpte range addresses are dropped into tlb while obp is | ||
42 | * executing (see inherit_locked_prom_mappings() rant). | ||
43 | */ | ||
44 | sparc64_vpte_nucleus: | ||
45 | /* Note that kvmap below has verified that the address is | ||
46 | * in the range MODULES_VADDR --> VMALLOC_END already. So | ||
47 | * here we need only check if it is an OBP address or not. | ||
48 | */ | ||
49 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
50 | cmp %g4, %g5 | ||
51 | blu,pn %xcc, sparc64_vpte_patchme1 | ||
52 | mov 0x1, %g5 | ||
53 | sllx %g5, 32, %g5 | ||
54 | cmp %g4, %g5 | ||
55 | blu,pn %xcc, obp_iaddr_patch | ||
56 | nop | ||
57 | |||
58 | /* These two instructions are patched by paginig_init(). */ | ||
59 | sparc64_vpte_patchme1: | ||
60 | sethi %hi(0), %g5 | ||
61 | sparc64_vpte_patchme2: | ||
62 | or %g5, %lo(0), %g5 | ||
63 | |||
64 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | ||
65 | ba,pt %xcc, sparc64_kpte_continue | ||
66 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | ||
67 | |||
68 | vpte_noent: | ||
69 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | ||
70 | * skip over the trap instruction so that the top level | ||
71 | * TLB miss handler will thing this %g5 value is just an | ||
72 | * invalid PTE, thus branching to full fault processing. | ||
73 | */ | ||
74 | mov TLB_SFSR, %g1 | ||
75 | stxa %g4, [%g1 + %g1] ASI_DMMU | ||
76 | done | ||
77 | |||
78 | .globl obp_iaddr_patch | ||
79 | obp_iaddr_patch: | ||
80 | /* These two instructions patched by inherit_prom_mappings(). */ | ||
81 | sethi %hi(0), %g5 | ||
82 | or %g5, %lo(0), %g5 | ||
83 | |||
84 | /* Behave as if we are at TL0. */ | ||
85 | wrpr %g0, 1, %tl | ||
86 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
87 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
88 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
89 | |||
90 | /* Restore previous TAG_ACCESS. */ | ||
91 | mov TLB_SFSR, %g1 | ||
92 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
93 | |||
94 | /* Get PMD offset. */ | ||
95 | srlx %g4, 23, %g6 | ||
96 | and %g6, 0x7ff, %g6 | ||
97 | sllx %g6, 2, %g6 | ||
98 | |||
99 | /* Load PMD, is it valid? */ | ||
100 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
101 | brz,pn %g5, longpath | ||
102 | sllx %g5, 11, %g5 | ||
103 | |||
104 | /* Get PTE offset. */ | ||
105 | srlx %g4, 13, %g6 | ||
106 | and %g6, 0x3ff, %g6 | ||
107 | sllx %g6, 3, %g6 | ||
108 | |||
109 | /* Load PTE. */ | ||
110 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
111 | brgez,pn %g5, longpath | ||
112 | nop | ||
113 | |||
114 | /* TLB load and return from trap. */ | ||
115 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
116 | retry | ||
117 | |||
118 | .globl obp_daddr_patch | ||
119 | obp_daddr_patch: | ||
120 | /* These two instructions patched by inherit_prom_mappings(). */ | ||
121 | sethi %hi(0), %g5 | ||
122 | or %g5, %lo(0), %g5 | ||
123 | |||
124 | /* Get PMD offset. */ | ||
125 | srlx %g4, 23, %g6 | ||
126 | and %g6, 0x7ff, %g6 | ||
127 | sllx %g6, 2, %g6 | ||
128 | |||
129 | /* Load PMD, is it valid? */ | ||
130 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
131 | brz,pn %g5, longpath | ||
132 | sllx %g5, 11, %g5 | ||
133 | |||
134 | /* Get PTE offset. */ | ||
135 | srlx %g4, 13, %g6 | ||
136 | and %g6, 0x3ff, %g6 | ||
137 | sllx %g6, 3, %g6 | ||
138 | |||
139 | /* Load PTE. */ | ||
140 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
141 | brgez,pn %g5, longpath | ||
142 | nop | ||
143 | |||
144 | /* TLB load and return from trap. */ | ||
145 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
146 | retry | ||
147 | |||
148 | /* | ||
149 | * On a first level data miss, check whether this is to the OBP range (note | ||
150 | * that such accesses can be made by prom, as well as by kernel using | ||
151 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
152 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
153 | * pagesize. | ||
154 | */ | ||
155 | .align 32 | ||
156 | kvmap: | ||
157 | sethi %hi(MODULES_VADDR), %g5 | ||
158 | cmp %g4, %g5 | ||
159 | blu,pn %xcc, longpath | ||
160 | mov (VMALLOC_END >> 24), %g5 | ||
161 | sllx %g5, 24, %g5 | ||
162 | cmp %g4, %g5 | ||
163 | bgeu,pn %xcc, longpath | ||
164 | nop | ||
165 | |||
166 | kvmap_check_obp: | ||
167 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
168 | cmp %g4, %g5 | ||
169 | blu,pn %xcc, kvmap_vmalloc_addr | ||
170 | mov 0x1, %g5 | ||
171 | sllx %g5, 32, %g5 | ||
172 | cmp %g4, %g5 | ||
173 | blu,pn %xcc, obp_daddr_patch | ||
174 | nop | ||
175 | |||
176 | kvmap_vmalloc_addr: | ||
177 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
178 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
179 | brgez,pn %g5, longpath | ||
180 | nop | ||
181 | |||
182 | /* PTE is valid, load into TLB and return from trap. */ | ||
183 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
184 | retry | ||
185 | |||
186 | /* This is trivial with the new code... */ | 33 | /* This is trivial with the new code... */ |
187 | .globl do_fpdis | 34 | .globl do_fpdis |
188 | do_fpdis: | 35 | do_fpdis: |
@@ -525,14 +372,13 @@ cheetah_plus_patch_fpdis: | |||
525 | * | 372 | * |
526 | * DATA 0: [low 32-bits] Address of function to call, jmp to this | 373 | * DATA 0: [low 32-bits] Address of function to call, jmp to this |
527 | * [high 32-bits] MMU Context Argument 0, place in %g5 | 374 | * [high 32-bits] MMU Context Argument 0, place in %g5 |
528 | * DATA 1: Address Argument 1, place in %g6 | 375 | * DATA 1: Address Argument 1, place in %g1 |
529 | * DATA 2: Address Argument 2, place in %g7 | 376 | * DATA 2: Address Argument 2, place in %g7 |
530 | * | 377 | * |
531 | * With this method we can do most of the cross-call tlb/cache | 378 | * With this method we can do most of the cross-call tlb/cache |
532 | * flushing very quickly. | 379 | * flushing very quickly. |
533 | * | 380 | * |
534 | * Current CPU's IRQ worklist table is locked into %g1, | 381 | * Current CPU's IRQ worklist table is locked into %g6, don't touch. |
535 | * don't touch. | ||
536 | */ | 382 | */ |
537 | .text | 383 | .text |
538 | .align 32 | 384 | .align 32 |
@@ -1006,13 +852,14 @@ cheetah_plus_dcpe_trap_vector: | |||
1006 | nop | 852 | nop |
1007 | 853 | ||
1008 | do_cheetah_plus_data_parity: | 854 | do_cheetah_plus_data_parity: |
1009 | ba,pt %xcc, etrap | 855 | rdpr %pil, %g2 |
856 | wrpr %g0, 15, %pil | ||
857 | ba,pt %xcc, etrap_irq | ||
1010 | rd %pc, %g7 | 858 | rd %pc, %g7 |
1011 | mov 0x0, %o0 | 859 | mov 0x0, %o0 |
1012 | call cheetah_plus_parity_error | 860 | call cheetah_plus_parity_error |
1013 | add %sp, PTREGS_OFF, %o1 | 861 | add %sp, PTREGS_OFF, %o1 |
1014 | ba,pt %xcc, rtrap | 862 | ba,a,pt %xcc, rtrap_irq |
1015 | clr %l6 | ||
1016 | 863 | ||
1017 | cheetah_plus_dcpe_trap_vector_tl1: | 864 | cheetah_plus_dcpe_trap_vector_tl1: |
1018 | membar #Sync | 865 | membar #Sync |
@@ -1036,13 +883,14 @@ cheetah_plus_icpe_trap_vector: | |||
1036 | nop | 883 | nop |
1037 | 884 | ||
1038 | do_cheetah_plus_insn_parity: | 885 | do_cheetah_plus_insn_parity: |
1039 | ba,pt %xcc, etrap | 886 | rdpr %pil, %g2 |
887 | wrpr %g0, 15, %pil | ||
888 | ba,pt %xcc, etrap_irq | ||
1040 | rd %pc, %g7 | 889 | rd %pc, %g7 |
1041 | mov 0x1, %o0 | 890 | mov 0x1, %o0 |
1042 | call cheetah_plus_parity_error | 891 | call cheetah_plus_parity_error |
1043 | add %sp, PTREGS_OFF, %o1 | 892 | add %sp, PTREGS_OFF, %o1 |
1044 | ba,pt %xcc, rtrap | 893 | ba,a,pt %xcc, rtrap_irq |
1045 | clr %l6 | ||
1046 | 894 | ||
1047 | cheetah_plus_icpe_trap_vector_tl1: | 895 | cheetah_plus_icpe_trap_vector_tl1: |
1048 | membar #Sync | 896 | membar #Sync |
@@ -1075,6 +923,10 @@ do_dcpe_tl1: | |||
1075 | nop | 923 | nop |
1076 | wrpr %g1, %tl ! Restore original trap level | 924 | wrpr %g1, %tl ! Restore original trap level |
1077 | do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | 925 | do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ |
926 | sethi %hi(dcache_parity_tl1_occurred), %g2 | ||
927 | lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1 | ||
928 | add %g1, 1, %g1 | ||
929 | stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)] | ||
1078 | /* Reset D-cache parity */ | 930 | /* Reset D-cache parity */ |
1079 | sethi %hi(1 << 16), %g1 ! D-cache size | 931 | sethi %hi(1 << 16), %g1 ! D-cache size |
1080 | mov (1 << 5), %g2 ! D-cache line size | 932 | mov (1 << 5), %g2 ! D-cache line size |
@@ -1121,6 +973,10 @@ do_icpe_tl1: | |||
1121 | nop | 973 | nop |
1122 | wrpr %g1, %tl ! Restore original trap level | 974 | wrpr %g1, %tl ! Restore original trap level |
1123 | do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | 975 | do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ |
976 | sethi %hi(icache_parity_tl1_occurred), %g2 | ||
977 | lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1 | ||
978 | add %g1, 1, %g1 | ||
979 | stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)] | ||
1124 | /* Flush I-cache */ | 980 | /* Flush I-cache */ |
1125 | sethi %hi(1 << 15), %g1 ! I-cache size | 981 | sethi %hi(1 << 15), %g1 ! I-cache size |
1126 | mov (1 << 5), %g2 ! I-cache line size | 982 | mov (1 << 5), %g2 ! I-cache line size |
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 1fa06c4e3bdb..89406f9649a9 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S | |||
@@ -80,15 +80,165 @@ sparc_ramdisk_image64: | |||
80 | .xword 0 | 80 | .xword 0 |
81 | .word _end | 81 | .word _end |
82 | 82 | ||
83 | /* We must be careful, 32-bit OpenBOOT will get confused if it | 83 | /* PROM cif handler code address is in %o4. */ |
84 | * tries to save away a register window to a 64-bit kernel | 84 | sparc64_boot: |
85 | * stack address. Flush all windows, disable interrupts, | 85 | 1: rd %pc, %g7 |
86 | * remap if necessary, jump onto kernel trap table, then kernel | 86 | set 1b, %g1 |
87 | * stack, or else we die. | 87 | cmp %g1, %g7 |
88 | be,pn %xcc, sparc64_boot_after_remap | ||
89 | mov %o4, %l7 | ||
90 | |||
91 | /* We need to remap the kernel. Use position independant | ||
92 | * code to remap us to KERNBASE. | ||
88 | * | 93 | * |
89 | * PROM entry point is on %o4 | 94 | * SILO can invoke us with 32-bit address masking enabled, |
95 | * so make sure that's clear. | ||
90 | */ | 96 | */ |
91 | sparc64_boot: | 97 | rdpr %pstate, %g1 |
98 | andn %g1, PSTATE_AM, %g1 | ||
99 | wrpr %g1, 0x0, %pstate | ||
100 | ba,a,pt %xcc, 1f | ||
101 | |||
102 | .globl prom_finddev_name, prom_chosen_path | ||
103 | .globl prom_getprop_name, prom_mmu_name | ||
104 | .globl prom_callmethod_name, prom_translate_name | ||
105 | .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache | ||
106 | .globl prom_boot_mapped_pc, prom_boot_mapping_mode | ||
107 | .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low | ||
108 | prom_finddev_name: | ||
109 | .asciz "finddevice" | ||
110 | prom_chosen_path: | ||
111 | .asciz "/chosen" | ||
112 | prom_getprop_name: | ||
113 | .asciz "getprop" | ||
114 | prom_mmu_name: | ||
115 | .asciz "mmu" | ||
116 | prom_callmethod_name: | ||
117 | .asciz "call-method" | ||
118 | prom_translate_name: | ||
119 | .asciz "translate" | ||
120 | prom_map_name: | ||
121 | .asciz "map" | ||
122 | prom_unmap_name: | ||
123 | .asciz "unmap" | ||
124 | .align 4 | ||
125 | prom_mmu_ihandle_cache: | ||
126 | .word 0 | ||
127 | prom_boot_mapped_pc: | ||
128 | .word 0 | ||
129 | prom_boot_mapping_mode: | ||
130 | .word 0 | ||
131 | .align 8 | ||
132 | prom_boot_mapping_phys_high: | ||
133 | .xword 0 | ||
134 | prom_boot_mapping_phys_low: | ||
135 | .xword 0 | ||
136 | 1: | ||
137 | rd %pc, %l0 | ||
138 | mov (1b - prom_finddev_name), %l1 | ||
139 | mov (1b - prom_chosen_path), %l2 | ||
140 | mov (1b - prom_boot_mapped_pc), %l3 | ||
141 | sub %l0, %l1, %l1 | ||
142 | sub %l0, %l2, %l2 | ||
143 | sub %l0, %l3, %l3 | ||
144 | stw %l0, [%l3] | ||
145 | sub %sp, (192 + 128), %sp | ||
146 | |||
147 | /* chosen_node = prom_finddevice("/chosen") */ | ||
148 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" | ||
149 | mov 1, %l3 | ||
150 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 | ||
151 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
152 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen" | ||
153 | stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 | ||
154 | call %l7 | ||
155 | add %sp, (2047 + 128), %o0 ! argument array | ||
156 | |||
157 | ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node | ||
158 | |||
159 | mov (1b - prom_getprop_name), %l1 | ||
160 | mov (1b - prom_mmu_name), %l2 | ||
161 | mov (1b - prom_mmu_ihandle_cache), %l5 | ||
162 | sub %l0, %l1, %l1 | ||
163 | sub %l0, %l2, %l2 | ||
164 | sub %l0, %l5, %l5 | ||
165 | |||
166 | /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */ | ||
167 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" | ||
168 | mov 4, %l3 | ||
169 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 | ||
170 | mov 1, %l3 | ||
171 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
172 | stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node | ||
173 | stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu" | ||
174 | stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache | ||
175 | mov 4, %l3 | ||
176 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3) | ||
177 | stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 | ||
178 | call %l7 | ||
179 | add %sp, (2047 + 128), %o0 ! argument array | ||
180 | |||
181 | mov (1b - prom_callmethod_name), %l1 | ||
182 | mov (1b - prom_translate_name), %l2 | ||
183 | sub %l0, %l1, %l1 | ||
184 | sub %l0, %l2, %l2 | ||
185 | lduw [%l5], %l5 ! prom_mmu_ihandle_cache | ||
186 | |||
187 | stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method" | ||
188 | mov 3, %l3 | ||
189 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3 | ||
190 | mov 5, %l3 | ||
191 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 | ||
192 | stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" | ||
193 | stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache | ||
194 | srlx %l0, 22, %l3 | ||
195 | sllx %l3, 22, %l3 | ||
196 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC | ||
197 | stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 | ||
198 | stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 | ||
199 | stx %g0, [%sp + 2047 + 128 + 0x40] ! res3 | ||
200 | stx %g0, [%sp + 2047 + 128 + 0x48] ! res4 | ||
201 | stx %g0, [%sp + 2047 + 128 + 0x50] ! res5 | ||
202 | call %l7 | ||
203 | add %sp, (2047 + 128), %o0 ! argument array | ||
204 | |||
205 | ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode | ||
206 | mov (1b - prom_boot_mapping_mode), %l4 | ||
207 | sub %l0, %l4, %l4 | ||
208 | stw %l1, [%l4] | ||
209 | mov (1b - prom_boot_mapping_phys_high), %l4 | ||
210 | sub %l0, %l4, %l4 | ||
211 | ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high | ||
212 | stx %l2, [%l4 + 0x0] | ||
213 | ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low | ||
214 | stx %l3, [%l4 + 0x8] | ||
215 | |||
216 | /* Leave service as-is, "call-method" */ | ||
217 | mov 7, %l3 | ||
218 | stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7 | ||
219 | mov 1, %l3 | ||
220 | stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 | ||
221 | mov (1b - prom_map_name), %l3 | ||
222 | sub %l0, %l3, %l3 | ||
223 | stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map" | ||
224 | /* Leave arg2 as-is, prom_mmu_ihandle_cache */ | ||
225 | mov -1, %l3 | ||
226 | stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) | ||
227 | sethi %hi(8 * 1024 * 1024), %l3 | ||
228 | stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) | ||
229 | sethi %hi(KERNBASE), %l3 | ||
230 | stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) | ||
231 | stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty | ||
232 | mov (1b - prom_boot_mapping_phys_low), %l3 | ||
233 | sub %l0, %l3, %l3 | ||
234 | ldx [%l3], %l3 | ||
235 | stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr | ||
236 | call %l7 | ||
237 | add %sp, (2047 + 128), %o0 ! argument array | ||
238 | |||
239 | add %sp, (192 + 128), %sp | ||
240 | |||
241 | sparc64_boot_after_remap: | ||
92 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) | 242 | BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) |
93 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) | 243 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) |
94 | ba,pt %xcc, spitfire_boot | 244 | ba,pt %xcc, spitfire_boot |
@@ -125,185 +275,7 @@ cheetah_generic_boot: | |||
125 | stxa %g0, [%g3] ASI_IMMU | 275 | stxa %g0, [%g3] ASI_IMMU |
126 | membar #Sync | 276 | membar #Sync |
127 | 277 | ||
128 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate | 278 | ba,a,pt %xcc, jump_to_sun4u_init |
129 | wr %g0, 0, %fprs | ||
130 | |||
131 | /* Just like for Spitfire, we probe itlb-2 for a mapping which | ||
132 | * matches our current %pc. We take the physical address in | ||
133 | * that mapping and use it to make our own. | ||
134 | */ | ||
135 | |||
136 | /* %g5 holds the tlb data */ | ||
137 | sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 | ||
138 | sllx %g5, 32, %g5 | ||
139 | or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5 | ||
140 | |||
141 | /* Put PADDR tlb data mask into %g3. */ | ||
142 | sethi %uhi(_PAGE_PADDR), %g3 | ||
143 | or %g3, %ulo(_PAGE_PADDR), %g3 | ||
144 | sllx %g3, 32, %g3 | ||
145 | sethi %hi(_PAGE_PADDR), %g7 | ||
146 | or %g7, %lo(_PAGE_PADDR), %g7 | ||
147 | or %g3, %g7, %g3 | ||
148 | |||
149 | set 2 << 16, %l0 /* TLB entry walker. */ | ||
150 | set 0x1fff, %l2 /* Page mask. */ | ||
151 | rd %pc, %l3 | ||
152 | andn %l3, %l2, %g2 /* vaddr comparator */ | ||
153 | |||
154 | 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
155 | membar #Sync | ||
156 | andn %g1, %l2, %g1 | ||
157 | cmp %g1, %g2 | ||
158 | be,pn %xcc, cheetah_got_tlbentry | ||
159 | nop | ||
160 | and %l0, (127 << 3), %g1 | ||
161 | cmp %g1, (127 << 3) | ||
162 | blu,pt %xcc, 1b | ||
163 | add %l0, (1 << 3), %l0 | ||
164 | |||
165 | /* Search the small TLB. OBP never maps us like that but | ||
166 | * newer SILO can. | ||
167 | */ | ||
168 | clr %l0 | ||
169 | |||
170 | 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
171 | membar #Sync | ||
172 | andn %g1, %l2, %g1 | ||
173 | cmp %g1, %g2 | ||
174 | be,pn %xcc, cheetah_got_tlbentry | ||
175 | nop | ||
176 | cmp %l0, (15 << 3) | ||
177 | blu,pt %xcc, 1b | ||
178 | add %l0, (1 << 3), %l0 | ||
179 | |||
180 | /* BUG() if we get here... */ | ||
181 | ta 0x5 | ||
182 | |||
183 | cheetah_got_tlbentry: | ||
184 | ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0 | ||
185 | ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1 | ||
186 | membar #Sync | ||
187 | and %g1, %g3, %g1 | ||
188 | set 0x5fff, %l0 | ||
189 | andn %g1, %l0, %g1 | ||
190 | or %g5, %g1, %g5 | ||
191 | |||
192 | /* Clear out any KERNBASE area entries. */ | ||
193 | set 2 << 16, %l0 | ||
194 | sethi %hi(KERNBASE), %g3 | ||
195 | sethi %hi(KERNBASE<<1), %g7 | ||
196 | mov TLB_TAG_ACCESS, %l7 | ||
197 | |||
198 | /* First, check ITLB */ | ||
199 | 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
200 | membar #Sync | ||
201 | andn %g1, %l2, %g1 | ||
202 | cmp %g1, %g3 | ||
203 | blu,pn %xcc, 2f | ||
204 | cmp %g1, %g7 | ||
205 | bgeu,pn %xcc, 2f | ||
206 | nop | ||
207 | stxa %g0, [%l7] ASI_IMMU | ||
208 | membar #Sync | ||
209 | stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS | ||
210 | membar #Sync | ||
211 | |||
212 | 2: and %l0, (127 << 3), %g1 | ||
213 | cmp %g1, (127 << 3) | ||
214 | blu,pt %xcc, 1b | ||
215 | add %l0, (1 << 3), %l0 | ||
216 | |||
217 | /* Next, check DTLB */ | ||
218 | set 2 << 16, %l0 | ||
219 | 1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1 | ||
220 | membar #Sync | ||
221 | andn %g1, %l2, %g1 | ||
222 | cmp %g1, %g3 | ||
223 | blu,pn %xcc, 2f | ||
224 | cmp %g1, %g7 | ||
225 | bgeu,pn %xcc, 2f | ||
226 | nop | ||
227 | stxa %g0, [%l7] ASI_DMMU | ||
228 | membar #Sync | ||
229 | stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS | ||
230 | membar #Sync | ||
231 | |||
232 | 2: and %l0, (511 << 3), %g1 | ||
233 | cmp %g1, (511 << 3) | ||
234 | blu,pt %xcc, 1b | ||
235 | add %l0, (1 << 3), %l0 | ||
236 | |||
237 | /* On Cheetah+, have to check second DTLB. */ | ||
238 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f) | ||
239 | ba,pt %xcc, 9f | ||
240 | nop | ||
241 | |||
242 | 2: set 3 << 16, %l0 | ||
243 | 1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1 | ||
244 | membar #Sync | ||
245 | andn %g1, %l2, %g1 | ||
246 | cmp %g1, %g3 | ||
247 | blu,pn %xcc, 2f | ||
248 | cmp %g1, %g7 | ||
249 | bgeu,pn %xcc, 2f | ||
250 | nop | ||
251 | stxa %g0, [%l7] ASI_DMMU | ||
252 | membar #Sync | ||
253 | stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS | ||
254 | membar #Sync | ||
255 | |||
256 | 2: and %l0, (511 << 3), %g1 | ||
257 | cmp %g1, (511 << 3) | ||
258 | blu,pt %xcc, 1b | ||
259 | add %l0, (1 << 3), %l0 | ||
260 | |||
261 | 9: | ||
262 | |||
263 | /* Now lock the TTE we created into ITLB-0 and DTLB-0, | ||
264 | * entry 15 (and maybe 14 too). | ||
265 | */ | ||
266 | sethi %hi(KERNBASE), %g3 | ||
267 | set (0 << 16) | (15 << 3), %g7 | ||
268 | stxa %g3, [%l7] ASI_DMMU | ||
269 | membar #Sync | ||
270 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS | ||
271 | membar #Sync | ||
272 | stxa %g3, [%l7] ASI_IMMU | ||
273 | membar #Sync | ||
274 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS | ||
275 | membar #Sync | ||
276 | flush %g3 | ||
277 | membar #Sync | ||
278 | sethi %hi(_end), %g3 /* Check for bigkernel case */ | ||
279 | or %g3, %lo(_end), %g3 | ||
280 | srl %g3, 23, %g3 /* Check if _end > 8M */ | ||
281 | brz,pt %g3, 1f | ||
282 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
283 | sethi %hi(0x400000), %g3 | ||
284 | or %g3, %lo(0x400000), %g3 | ||
285 | add %g5, %g3, %g5 /* New tte data */ | ||
286 | andn %g5, (_PAGE_G), %g5 | ||
287 | sethi %hi(KERNBASE+0x400000), %g3 | ||
288 | or %g3, %lo(KERNBASE+0x400000), %g3 | ||
289 | set (0 << 16) | (14 << 3), %g7 | ||
290 | stxa %g3, [%l7] ASI_DMMU | ||
291 | membar #Sync | ||
292 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS | ||
293 | membar #Sync | ||
294 | stxa %g3, [%l7] ASI_IMMU | ||
295 | membar #Sync | ||
296 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS | ||
297 | membar #Sync | ||
298 | flush %g3 | ||
299 | membar #Sync | ||
300 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
301 | ba,pt %xcc, 1f | ||
302 | nop | ||
303 | |||
304 | 1: set sun4u_init, %g2 | ||
305 | jmpl %g2 + %g0, %g0 | ||
306 | nop | ||
307 | 279 | ||
308 | spitfire_boot: | 280 | spitfire_boot: |
309 | /* Typically PROM has already enabled both MMU's and both on-chip | 281 | /* Typically PROM has already enabled both MMU's and both on-chip |
@@ -313,6 +285,7 @@ spitfire_boot: | |||
313 | stxa %g1, [%g0] ASI_LSU_CONTROL | 285 | stxa %g1, [%g0] ASI_LSU_CONTROL |
314 | membar #Sync | 286 | membar #Sync |
315 | 287 | ||
288 | jump_to_sun4u_init: | ||
316 | /* | 289 | /* |
317 | * Make sure we are in privileged mode, have address masking, | 290 | * Make sure we are in privileged mode, have address masking, |
318 | * using the ordinary globals and have enabled floating | 291 | * using the ordinary globals and have enabled floating |
@@ -324,151 +297,6 @@ spitfire_boot: | |||
324 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate | 297 | wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate |
325 | wr %g0, 0, %fprs | 298 | wr %g0, 0, %fprs |
326 | 299 | ||
327 | spitfire_create_mappings: | ||
328 | /* %g5 holds the tlb data */ | ||
329 | sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 | ||
330 | sllx %g5, 32, %g5 | ||
331 | or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5 | ||
332 | |||
333 | /* Base of physical memory cannot reliably be assumed to be | ||
334 | * at 0x0! Figure out where it happens to be. -DaveM | ||
335 | */ | ||
336 | |||
337 | /* Put PADDR tlb data mask into %g3. */ | ||
338 | sethi %uhi(_PAGE_PADDR_SF), %g3 | ||
339 | or %g3, %ulo(_PAGE_PADDR_SF), %g3 | ||
340 | sllx %g3, 32, %g3 | ||
341 | sethi %hi(_PAGE_PADDR_SF), %g7 | ||
342 | or %g7, %lo(_PAGE_PADDR_SF), %g7 | ||
343 | or %g3, %g7, %g3 | ||
344 | |||
345 | /* Walk through entire ITLB, looking for entry which maps | ||
346 | * our %pc currently, stick PADDR from there into %g5 tlb data. | ||
347 | */ | ||
348 | clr %l0 /* TLB entry walker. */ | ||
349 | set 0x1fff, %l2 /* Page mask. */ | ||
350 | rd %pc, %l3 | ||
351 | andn %l3, %l2, %g2 /* vaddr comparator */ | ||
352 | 1: | ||
353 | /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ | ||
354 | ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
355 | nop | ||
356 | nop | ||
357 | nop | ||
358 | andn %g1, %l2, %g1 /* Get vaddr */ | ||
359 | cmp %g1, %g2 | ||
360 | be,a,pn %xcc, spitfire_got_tlbentry | ||
361 | ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1 | ||
362 | cmp %l0, (63 << 3) | ||
363 | blu,pt %xcc, 1b | ||
364 | add %l0, (1 << 3), %l0 | ||
365 | |||
366 | /* BUG() if we get here... */ | ||
367 | ta 0x5 | ||
368 | |||
369 | spitfire_got_tlbentry: | ||
370 | /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */ | ||
371 | nop | ||
372 | nop | ||
373 | nop | ||
374 | and %g1, %g3, %g1 /* Mask to just get paddr bits. */ | ||
375 | set 0x5fff, %l3 /* Mask offset to get phys base. */ | ||
376 | andn %g1, %l3, %g1 | ||
377 | |||
378 | /* NOTE: We hold on to %g1 paddr base as we need it below to lock | ||
379 | * NOTE: the PROM cif code into the TLB. | ||
380 | */ | ||
381 | |||
382 | or %g5, %g1, %g5 /* Or it into TAG being built. */ | ||
383 | |||
384 | clr %l0 /* TLB entry walker. */ | ||
385 | sethi %hi(KERNBASE), %g3 /* 4M lower limit */ | ||
386 | sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */ | ||
387 | mov TLB_TAG_ACCESS, %l7 | ||
388 | 1: | ||
389 | /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ | ||
390 | ldxa [%l0] ASI_ITLB_TAG_READ, %g1 | ||
391 | nop | ||
392 | nop | ||
393 | nop | ||
394 | andn %g1, %l2, %g1 /* Get vaddr */ | ||
395 | cmp %g1, %g3 | ||
396 | blu,pn %xcc, 2f | ||
397 | cmp %g1, %g7 | ||
398 | bgeu,pn %xcc, 2f | ||
399 | nop | ||
400 | stxa %g0, [%l7] ASI_IMMU | ||
401 | stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS | ||
402 | membar #Sync | ||
403 | 2: | ||
404 | cmp %l0, (63 << 3) | ||
405 | blu,pt %xcc, 1b | ||
406 | add %l0, (1 << 3), %l0 | ||
407 | |||
408 | nop; nop; nop | ||
409 | |||
410 | clr %l0 /* TLB entry walker. */ | ||
411 | 1: | ||
412 | /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ | ||
413 | ldxa [%l0] ASI_DTLB_TAG_READ, %g1 | ||
414 | nop | ||
415 | nop | ||
416 | nop | ||
417 | andn %g1, %l2, %g1 /* Get vaddr */ | ||
418 | cmp %g1, %g3 | ||
419 | blu,pn %xcc, 2f | ||
420 | cmp %g1, %g7 | ||
421 | bgeu,pn %xcc, 2f | ||
422 | nop | ||
423 | stxa %g0, [%l7] ASI_DMMU | ||
424 | stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS | ||
425 | membar #Sync | ||
426 | 2: | ||
427 | cmp %l0, (63 << 3) | ||
428 | blu,pt %xcc, 1b | ||
429 | add %l0, (1 << 3), %l0 | ||
430 | |||
431 | nop; nop; nop | ||
432 | |||
433 | |||
434 | /* PROM never puts any TLB entries into the MMU with the lock bit | ||
435 | * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too. | ||
436 | */ | ||
437 | |||
438 | sethi %hi(KERNBASE), %g3 | ||
439 | mov (63 << 3), %g7 | ||
440 | stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */ | ||
441 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */ | ||
442 | membar #Sync | ||
443 | stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */ | ||
444 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */ | ||
445 | membar #Sync | ||
446 | flush %g3 | ||
447 | membar #Sync | ||
448 | sethi %hi(_end), %g3 /* Check for bigkernel case */ | ||
449 | or %g3, %lo(_end), %g3 | ||
450 | srl %g3, 23, %g3 /* Check if _end > 8M */ | ||
451 | brz,pt %g3, 2f | ||
452 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
453 | sethi %hi(0x400000), %g3 | ||
454 | or %g3, %lo(0x400000), %g3 | ||
455 | add %g5, %g3, %g5 /* New tte data */ | ||
456 | andn %g5, (_PAGE_G), %g5 | ||
457 | sethi %hi(KERNBASE+0x400000), %g3 | ||
458 | or %g3, %lo(KERNBASE+0x400000), %g3 | ||
459 | mov (62 << 3), %g7 | ||
460 | stxa %g3, [%l7] ASI_DMMU | ||
461 | stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS | ||
462 | membar #Sync | ||
463 | stxa %g3, [%l7] ASI_IMMU | ||
464 | stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS | ||
465 | membar #Sync | ||
466 | flush %g3 | ||
467 | membar #Sync | ||
468 | sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ | ||
469 | 2: ba,pt %xcc, 1f | ||
470 | nop | ||
471 | 1: | ||
472 | set sun4u_init, %g2 | 300 | set sun4u_init, %g2 |
473 | jmpl %g2 + %g0, %g0 | 301 | jmpl %g2 + %g0, %g0 |
474 | nop | 302 | nop |
@@ -483,38 +311,12 @@ sun4u_init: | |||
483 | stxa %g0, [%g7] ASI_DMMU | 311 | stxa %g0, [%g7] ASI_DMMU |
484 | membar #Sync | 312 | membar #Sync |
485 | 313 | ||
486 | /* We are now safely (we hope) in Nucleus context (0), rewrite | ||
487 | * the KERNBASE TTE's so they no longer have the global bit set. | ||
488 | * Don't forget to setup TAG_ACCESS first 8-) | ||
489 | */ | ||
490 | mov TLB_TAG_ACCESS, %g2 | ||
491 | stxa %g3, [%g2] ASI_IMMU | ||
492 | stxa %g3, [%g2] ASI_DMMU | ||
493 | membar #Sync | ||
494 | |||
495 | BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) | 314 | BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) |
496 | 315 | ||
497 | ba,pt %xcc, spitfire_tlb_fixup | 316 | ba,pt %xcc, spitfire_tlb_fixup |
498 | nop | 317 | nop |
499 | 318 | ||
500 | cheetah_tlb_fixup: | 319 | cheetah_tlb_fixup: |
501 | set (0 << 16) | (15 << 3), %g7 | ||
502 | ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0 | ||
503 | ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 | ||
504 | andn %g1, (_PAGE_G), %g1 | ||
505 | stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS | ||
506 | membar #Sync | ||
507 | |||
508 | ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0 | ||
509 | ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 | ||
510 | andn %g1, (_PAGE_G), %g1 | ||
511 | stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS | ||
512 | membar #Sync | ||
513 | |||
514 | /* Kill instruction prefetch queues. */ | ||
515 | flush %g3 | ||
516 | membar #Sync | ||
517 | |||
518 | mov 2, %g2 /* Set TLB type to cheetah+. */ | 320 | mov 2, %g2 /* Set TLB type to cheetah+. */ |
519 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) | 321 | BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) |
520 | 322 | ||
@@ -551,21 +353,6 @@ cheetah_tlb_fixup: | |||
551 | nop | 353 | nop |
552 | 354 | ||
553 | spitfire_tlb_fixup: | 355 | spitfire_tlb_fixup: |
554 | mov (63 << 3), %g7 | ||
555 | ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 | ||
556 | andn %g1, (_PAGE_G), %g1 | ||
557 | stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS | ||
558 | membar #Sync | ||
559 | |||
560 | ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 | ||
561 | andn %g1, (_PAGE_G), %g1 | ||
562 | stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS | ||
563 | membar #Sync | ||
564 | |||
565 | /* Kill instruction prefetch queues. */ | ||
566 | flush %g3 | ||
567 | membar #Sync | ||
568 | |||
569 | /* Set TLB type to spitfire. */ | 356 | /* Set TLB type to spitfire. */ |
570 | mov 0, %g2 | 357 | mov 0, %g2 |
571 | sethi %hi(tlb_type), %g1 | 358 | sethi %hi(tlb_type), %g1 |
@@ -578,24 +365,6 @@ tlb_fixup_done: | |||
578 | mov %sp, %l6 | 365 | mov %sp, %l6 |
579 | mov %o4, %l7 | 366 | mov %o4, %l7 |
580 | 367 | ||
581 | #if 0 /* We don't do it like this anymore, but for historical hack value | ||
582 | * I leave this snippet here to show how crazy we can be sometimes. 8-) | ||
583 | */ | ||
584 | |||
585 | /* Setup "Linux Current Register", thanks Sun 8-) */ | ||
586 | wr %g0, 0x1, %pcr | ||
587 | |||
588 | /* Blackbird errata workaround. See commentary in | ||
589 | * smp.c:smp_percpu_timer_interrupt() for more | ||
590 | * information. | ||
591 | */ | ||
592 | ba,pt %xcc, 99f | ||
593 | nop | ||
594 | .align 64 | ||
595 | 99: wr %g6, %g0, %pic | ||
596 | rd %pic, %g0 | ||
597 | #endif | ||
598 | |||
599 | wr %g0, ASI_P, %asi | 368 | wr %g0, ASI_P, %asi |
600 | mov 1, %g1 | 369 | mov 1, %g1 |
601 | sllx %g1, THREAD_SHIFT, %g1 | 370 | sllx %g1, THREAD_SHIFT, %g1 |
@@ -756,12 +525,7 @@ bootup_user_stack_end: | |||
756 | 525 | ||
757 | #include "ttable.S" | 526 | #include "ttable.S" |
758 | #include "systbls.S" | 527 | #include "systbls.S" |
759 | 528 | #include "ktlb.S" | |
760 | .align 1024 | ||
761 | .globl swapper_pg_dir | ||
762 | swapper_pg_dir: | ||
763 | .word 0 | ||
764 | |||
765 | #include "etrap.S" | 529 | #include "etrap.S" |
766 | #include "rtrap.S" | 530 | #include "rtrap.S" |
767 | #include "winfixup.S" | 531 | #include "winfixup.S" |
@@ -776,8 +540,11 @@ swapper_pg_dir: | |||
776 | prom_tba: .xword 0 | 540 | prom_tba: .xword 0 |
777 | tlb_type: .word 0 /* Must NOT end up in BSS */ | 541 | tlb_type: .word 0 /* Must NOT end up in BSS */ |
778 | .section ".fixup",#alloc,#execinstr | 542 | .section ".fixup",#alloc,#execinstr |
779 | .globl __ret_efault | 543 | |
544 | .globl __ret_efault, __retl_efault | ||
780 | __ret_efault: | 545 | __ret_efault: |
781 | ret | 546 | ret |
782 | restore %g0, -EFAULT, %o0 | 547 | restore %g0, -EFAULT, %o0 |
783 | 548 | __retl_efault: | |
549 | retl | ||
550 | mov -EFAULT, %o0 | ||
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S new file mode 100644 index 000000000000..7796b37f478c --- /dev/null +++ b/arch/sparc64/kernel/ktlb.S | |||
@@ -0,0 +1,198 @@ | |||
1 | /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net> | ||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) | ||
5 | * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) | ||
6 | * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/head.h> | ||
11 | #include <asm/asi.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/pgtable.h> | ||
14 | |||
15 | .text | ||
16 | .align 32 | ||
17 | |||
18 | /* | ||
19 | * On a second level vpte miss, check whether the original fault is to the OBP | ||
20 | * range (note that this is only possible for instruction miss, data misses to | ||
21 | * obp range do not use vpte). If so, go back directly to the faulting address. | ||
22 | * This is because we want to read the tpc, otherwise we have no way of knowing | ||
23 | * the 8k aligned faulting address if we are using >8k kernel pagesize. This | ||
24 | * also ensures no vpte range addresses are dropped into tlb while obp is | ||
25 | * executing (see inherit_locked_prom_mappings() rant). | ||
26 | */ | ||
27 | sparc64_vpte_nucleus: | ||
28 | /* Note that kvmap below has verified that the address is | ||
29 | * in the range MODULES_VADDR --> VMALLOC_END already. So | ||
30 | * here we need only check if it is an OBP address or not. | ||
31 | */ | ||
32 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
33 | cmp %g4, %g5 | ||
34 | blu,pn %xcc, kern_vpte | ||
35 | mov 0x1, %g5 | ||
36 | sllx %g5, 32, %g5 | ||
37 | cmp %g4, %g5 | ||
38 | blu,pn %xcc, vpte_insn_obp | ||
39 | nop | ||
40 | |||
41 | /* These two instructions are patched by paginig_init(). */ | ||
42 | kern_vpte: | ||
43 | sethi %hi(swapper_pgd_zero), %g5 | ||
44 | lduw [%g5 + %lo(swapper_pgd_zero)], %g5 | ||
45 | |||
46 | /* With kernel PGD in %g5, branch back into dtlb_backend. */ | ||
47 | ba,pt %xcc, sparc64_kpte_continue | ||
48 | andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ | ||
49 | |||
50 | vpte_noent: | ||
51 | /* Restore previous TAG_ACCESS, %g5 is zero, and we will | ||
52 | * skip over the trap instruction so that the top level | ||
53 | * TLB miss handler will thing this %g5 value is just an | ||
54 | * invalid PTE, thus branching to full fault processing. | ||
55 | */ | ||
56 | mov TLB_SFSR, %g1 | ||
57 | stxa %g4, [%g1 + %g1] ASI_DMMU | ||
58 | done | ||
59 | |||
60 | vpte_insn_obp: | ||
61 | sethi %hi(prom_pmd_phys), %g5 | ||
62 | ldx [%g5 + %lo(prom_pmd_phys)], %g5 | ||
63 | |||
64 | /* Behave as if we are at TL0. */ | ||
65 | wrpr %g0, 1, %tl | ||
66 | rdpr %tpc, %g4 /* Find original faulting iaddr */ | ||
67 | srlx %g4, 13, %g4 /* Throw out context bits */ | ||
68 | sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ | ||
69 | |||
70 | /* Restore previous TAG_ACCESS. */ | ||
71 | mov TLB_SFSR, %g1 | ||
72 | stxa %g4, [%g1 + %g1] ASI_IMMU | ||
73 | |||
74 | /* Get PMD offset. */ | ||
75 | srlx %g4, 23, %g6 | ||
76 | and %g6, 0x7ff, %g6 | ||
77 | sllx %g6, 2, %g6 | ||
78 | |||
79 | /* Load PMD, is it valid? */ | ||
80 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
81 | brz,pn %g5, longpath | ||
82 | sllx %g5, 11, %g5 | ||
83 | |||
84 | /* Get PTE offset. */ | ||
85 | srlx %g4, 13, %g6 | ||
86 | and %g6, 0x3ff, %g6 | ||
87 | sllx %g6, 3, %g6 | ||
88 | |||
89 | /* Load PTE. */ | ||
90 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
91 | brgez,pn %g5, longpath | ||
92 | nop | ||
93 | |||
94 | /* TLB load and return from trap. */ | ||
95 | stxa %g5, [%g0] ASI_ITLB_DATA_IN | ||
96 | retry | ||
97 | |||
98 | kvmap_do_obp: | ||
99 | sethi %hi(prom_pmd_phys), %g5 | ||
100 | ldx [%g5 + %lo(prom_pmd_phys)], %g5 | ||
101 | |||
102 | /* Get PMD offset. */ | ||
103 | srlx %g4, 23, %g6 | ||
104 | and %g6, 0x7ff, %g6 | ||
105 | sllx %g6, 2, %g6 | ||
106 | |||
107 | /* Load PMD, is it valid? */ | ||
108 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
109 | brz,pn %g5, longpath | ||
110 | sllx %g5, 11, %g5 | ||
111 | |||
112 | /* Get PTE offset. */ | ||
113 | srlx %g4, 13, %g6 | ||
114 | and %g6, 0x3ff, %g6 | ||
115 | sllx %g6, 3, %g6 | ||
116 | |||
117 | /* Load PTE. */ | ||
118 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
119 | brgez,pn %g5, longpath | ||
120 | nop | ||
121 | |||
122 | /* TLB load and return from trap. */ | ||
123 | stxa %g5, [%g0] ASI_DTLB_DATA_IN | ||
124 | retry | ||
125 | |||
126 | /* | ||
127 | * On a first level data miss, check whether this is to the OBP range (note | ||
128 | * that such accesses can be made by prom, as well as by kernel using | ||
129 | * prom_getproperty on "address"), and if so, do not use vpte access ... | ||
130 | * rather, use information saved during inherit_prom_mappings() using 8k | ||
131 | * pagesize. | ||
132 | */ | ||
133 | .align 32 | ||
134 | kvmap: | ||
135 | brgez,pn %g4, kvmap_nonlinear | ||
136 | nop | ||
137 | |||
138 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
139 | .globl kvmap_linear_patch | ||
140 | kvmap_linear_patch: | ||
141 | #endif | ||
142 | ba,pt %xcc, kvmap_load | ||
143 | xor %g2, %g4, %g5 | ||
144 | |||
145 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
146 | sethi %hi(swapper_pg_dir), %g5 | ||
147 | or %g5, %lo(swapper_pg_dir), %g5 | ||
148 | sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 | ||
149 | srlx %g6, 64 - PAGE_SHIFT, %g6 | ||
150 | andn %g6, 0x3, %g6 | ||
151 | lduw [%g5 + %g6], %g5 | ||
152 | brz,pn %g5, longpath | ||
153 | sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 | ||
154 | srlx %g6, 64 - PAGE_SHIFT, %g6 | ||
155 | sllx %g5, 11, %g5 | ||
156 | andn %g6, 0x3, %g6 | ||
157 | lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
158 | brz,pn %g5, longpath | ||
159 | sllx %g4, 64 - PMD_SHIFT, %g6 | ||
160 | srlx %g6, 64 - PAGE_SHIFT, %g6 | ||
161 | sllx %g5, 11, %g5 | ||
162 | andn %g6, 0x7, %g6 | ||
163 | ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 | ||
164 | brz,pn %g5, longpath | ||
165 | nop | ||
166 | ba,a,pt %xcc, kvmap_load | ||
167 | #endif | ||
168 | |||
169 | kvmap_nonlinear: | ||
170 | sethi %hi(MODULES_VADDR), %g5 | ||
171 | cmp %g4, %g5 | ||
172 | blu,pn %xcc, longpath | ||
173 | mov (VMALLOC_END >> 24), %g5 | ||
174 | sllx %g5, 24, %g5 | ||
175 | cmp %g4, %g5 | ||
176 | bgeu,pn %xcc, longpath | ||
177 | nop | ||
178 | |||
179 | kvmap_check_obp: | ||
180 | sethi %hi(LOW_OBP_ADDRESS), %g5 | ||
181 | cmp %g4, %g5 | ||
182 | blu,pn %xcc, kvmap_vmalloc_addr | ||
183 | mov 0x1, %g5 | ||
184 | sllx %g5, 32, %g5 | ||
185 | cmp %g4, %g5 | ||
186 | blu,pn %xcc, kvmap_do_obp | ||
187 | nop | ||
188 | |||
189 | kvmap_vmalloc_addr: | ||
190 | /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ | ||
191 | ldxa [%g3 + %g6] ASI_N, %g5 | ||
192 | brgez,pn %g5, longpath | ||
193 | nop | ||
194 | |||
195 | kvmap_load: | ||
196 | /* PTE is valid, load into TLB and return from trap. */ | ||
197 | stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB | ||
198 | retry | ||
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 331382e1a75d..cae5b61fe2f0 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
330 | static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) | 330 | static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) |
331 | { | 331 | { |
332 | unsigned long sync_reg = (unsigned long) _arg2; | 332 | unsigned long sync_reg = (unsigned long) _arg2; |
333 | u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); | 333 | u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO); |
334 | u64 val; | 334 | u64 val; |
335 | int limit; | 335 | int limit; |
336 | 336 | ||
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 5efbff90d668..774ecbb8a031 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/visasm.h> | 31 | #include <asm/visasm.h> |
32 | #include <asm/spitfire.h> | 32 | #include <asm/spitfire.h> |
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/cpudata.h> | ||
34 | 35 | ||
35 | /* Returning from ptrace is a bit tricky because the syscall return | 36 | /* Returning from ptrace is a bit tricky because the syscall return |
36 | * low level code assumes any value returned which is negative and | 37 | * low level code assumes any value returned which is negative and |
@@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
132 | if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { | 133 | if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { |
133 | unsigned long start = __pa(kaddr); | 134 | unsigned long start = __pa(kaddr); |
134 | unsigned long end = start + len; | 135 | unsigned long end = start + len; |
136 | unsigned long dcache_line_size; | ||
137 | |||
138 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
135 | 139 | ||
136 | if (tlb_type == spitfire) { | 140 | if (tlb_type == spitfire) { |
137 | for (; start < end; start += 32) | 141 | for (; start < end; start += dcache_line_size) |
138 | spitfire_put_dcache_tag(start & 0x3fe0, 0x0); | 142 | spitfire_put_dcache_tag(start & 0x3fe0, 0x0); |
139 | } else { | 143 | } else { |
140 | for (; start < end; start += 32) | 144 | start &= ~(dcache_line_size - 1); |
145 | for (; start < end; start += dcache_line_size) | ||
141 | __asm__ __volatile__( | 146 | __asm__ __volatile__( |
142 | "stxa %%g0, [%0] %1\n\t" | 147 | "stxa %%g0, [%0] %1\n\t" |
143 | "membar #Sync" | 148 | "membar #Sync" |
@@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
150 | if (write && tlb_type == spitfire) { | 155 | if (write && tlb_type == spitfire) { |
151 | unsigned long start = (unsigned long) kaddr; | 156 | unsigned long start = (unsigned long) kaddr; |
152 | unsigned long end = start + len; | 157 | unsigned long end = start + len; |
158 | unsigned long icache_line_size; | ||
159 | |||
160 | icache_line_size = local_cpu_data().icache_line_size; | ||
153 | 161 | ||
154 | for (; start < end; start += 32) | 162 | for (; start < end; start += icache_line_size) |
155 | flushi(start); | 163 | flushi(start); |
156 | } | 164 | } |
157 | } | 165 | } |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index ddbed3341a23..4c9c8f241748 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -464,8 +464,6 @@ static void __init boot_flags_init(char *commands) | |||
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | extern int prom_probe_memory(void); | ||
468 | extern unsigned long start, end; | ||
469 | extern void panic_setup(char *, int *); | 467 | extern void panic_setup(char *, int *); |
470 | 468 | ||
471 | extern unsigned short root_flags; | 469 | extern unsigned short root_flags; |
@@ -492,13 +490,8 @@ void register_prom_callbacks(void) | |||
492 | "' linux-.soft2 to .soft2"); | 490 | "' linux-.soft2 to .soft2"); |
493 | } | 491 | } |
494 | 492 | ||
495 | extern void paging_init(void); | ||
496 | |||
497 | void __init setup_arch(char **cmdline_p) | 493 | void __init setup_arch(char **cmdline_p) |
498 | { | 494 | { |
499 | unsigned long highest_paddr; | ||
500 | int i; | ||
501 | |||
502 | /* Initialize PROM console and command line. */ | 495 | /* Initialize PROM console and command line. */ |
503 | *cmdline_p = prom_getbootargs(); | 496 | *cmdline_p = prom_getbootargs(); |
504 | strcpy(saved_command_line, *cmdline_p); | 497 | strcpy(saved_command_line, *cmdline_p); |
@@ -517,40 +510,6 @@ void __init setup_arch(char **cmdline_p) | |||
517 | boot_flags_init(*cmdline_p); | 510 | boot_flags_init(*cmdline_p); |
518 | 511 | ||
519 | idprom_init(); | 512 | idprom_init(); |
520 | (void) prom_probe_memory(); | ||
521 | |||
522 | /* In paging_init() we tip off this value to see if we need | ||
523 | * to change init_mm.pgd to point to the real alias mapping. | ||
524 | */ | ||
525 | phys_base = 0xffffffffffffffffUL; | ||
526 | highest_paddr = 0UL; | ||
527 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { | ||
528 | unsigned long top; | ||
529 | |||
530 | if (sp_banks[i].base_addr < phys_base) | ||
531 | phys_base = sp_banks[i].base_addr; | ||
532 | top = sp_banks[i].base_addr + | ||
533 | sp_banks[i].num_bytes; | ||
534 | if (highest_paddr < top) | ||
535 | highest_paddr = top; | ||
536 | } | ||
537 | pfn_base = phys_base >> PAGE_SHIFT; | ||
538 | |||
539 | switch (tlb_type) { | ||
540 | default: | ||
541 | case spitfire: | ||
542 | kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent()); | ||
543 | kern_base &= _PAGE_PADDR_SF; | ||
544 | break; | ||
545 | |||
546 | case cheetah: | ||
547 | case cheetah_plus: | ||
548 | kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent()); | ||
549 | kern_base &= _PAGE_PADDR; | ||
550 | break; | ||
551 | }; | ||
552 | |||
553 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | ||
554 | 513 | ||
555 | if (!root_flags) | 514 | if (!root_flags) |
556 | root_mountflags &= ~MS_RDONLY; | 515 | root_mountflags &= ~MS_RDONLY; |
@@ -625,6 +584,9 @@ extern void smp_info(struct seq_file *); | |||
625 | extern void smp_bogo(struct seq_file *); | 584 | extern void smp_bogo(struct seq_file *); |
626 | extern void mmu_info(struct seq_file *); | 585 | extern void mmu_info(struct seq_file *); |
627 | 586 | ||
587 | unsigned int dcache_parity_tl1_occurred; | ||
588 | unsigned int icache_parity_tl1_occurred; | ||
589 | |||
628 | static int show_cpuinfo(struct seq_file *m, void *__unused) | 590 | static int show_cpuinfo(struct seq_file *m, void *__unused) |
629 | { | 591 | { |
630 | seq_printf(m, | 592 | seq_printf(m, |
@@ -635,6 +597,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
635 | "type\t\t: sun4u\n" | 597 | "type\t\t: sun4u\n" |
636 | "ncpus probed\t: %ld\n" | 598 | "ncpus probed\t: %ld\n" |
637 | "ncpus active\t: %ld\n" | 599 | "ncpus active\t: %ld\n" |
600 | "D$ parity tl1\t: %u\n" | ||
601 | "I$ parity tl1\t: %u\n" | ||
638 | #ifndef CONFIG_SMP | 602 | #ifndef CONFIG_SMP |
639 | "Cpu0Bogo\t: %lu.%02lu\n" | 603 | "Cpu0Bogo\t: %lu.%02lu\n" |
640 | "Cpu0ClkTck\t: %016lx\n" | 604 | "Cpu0ClkTck\t: %016lx\n" |
@@ -647,7 +611,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) | |||
647 | (prom_prev >> 8) & 0xff, | 611 | (prom_prev >> 8) & 0xff, |
648 | prom_prev & 0xff, | 612 | prom_prev & 0xff, |
649 | (long)num_possible_cpus(), | 613 | (long)num_possible_cpus(), |
650 | (long)num_online_cpus() | 614 | (long)num_online_cpus(), |
615 | dcache_parity_tl1_occurred, | ||
616 | icache_parity_tl1_occurred | ||
651 | #ifndef CONFIG_SMP | 617 | #ifndef CONFIG_SMP |
652 | , cpu_data(0).udelay_val/(500000/HZ), | 618 | , cpu_data(0).udelay_val/(500000/HZ), |
653 | (cpu_data(0).udelay_val/(5000/HZ)) % 100, | 619 | (cpu_data(0).udelay_val/(5000/HZ)) % 100, |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b4fc6a5462b2..590df5a16f5a 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id) | |||
93 | cpu_data(id).pte_cache[1] = NULL; | 93 | cpu_data(id).pte_cache[1] = NULL; |
94 | cpu_data(id).pgd_cache = NULL; | 94 | cpu_data(id).pgd_cache = NULL; |
95 | cpu_data(id).idle_volume = 1; | 95 | cpu_data(id).idle_volume = 1; |
96 | |||
97 | cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", | ||
98 | 16 * 1024); | ||
99 | cpu_data(id).dcache_line_size = | ||
100 | prom_getintdefault(cpu_node, "dcache-line-size", 32); | ||
101 | cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", | ||
102 | 16 * 1024); | ||
103 | cpu_data(id).icache_line_size = | ||
104 | prom_getintdefault(cpu_node, "icache-line-size", 32); | ||
105 | cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", | ||
106 | 4 * 1024 * 1024); | ||
107 | cpu_data(id).ecache_line_size = | ||
108 | prom_getintdefault(cpu_node, "ecache-line-size", 64); | ||
109 | printk("CPU[%d]: Caches " | ||
110 | "D[sz(%d):line_sz(%d)] " | ||
111 | "I[sz(%d):line_sz(%d)] " | ||
112 | "E[sz(%d):line_sz(%d)]\n", | ||
113 | id, | ||
114 | cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, | ||
115 | cpu_data(id).icache_size, cpu_data(id).icache_line_size, | ||
116 | cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); | ||
96 | } | 117 | } |
97 | 118 | ||
98 | static void smp_setup_percpu_timer(void); | 119 | static void smp_setup_percpu_timer(void); |
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S index 5f9e4fae612e..9cd272ac3ac1 100644 --- a/arch/sparc64/kernel/sys32.S +++ b/arch/sparc64/kernel/sys32.S | |||
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */ | |||
157 | or %g2, %lo(__socketcall_table_begin), %g2 | 157 | or %g2, %lo(__socketcall_table_begin), %g2 |
158 | jmpl %g2 + %o0, %g0 | 158 | jmpl %g2 + %o0, %g0 |
159 | nop | 159 | nop |
160 | do_einval: | ||
161 | retl | ||
162 | mov -EINVAL, %o0 | ||
160 | 163 | ||
161 | /* Each entry is exactly 32 bytes. */ | ||
162 | .align 32 | 164 | .align 32 |
163 | __socketcall_table_begin: | 165 | __socketcall_table_begin: |
166 | |||
167 | /* Each entry is exactly 32 bytes. */ | ||
164 | do_sys_socket: /* sys_socket(int, int, int) */ | 168 | do_sys_socket: /* sys_socket(int, int, int) */ |
165 | ldswa [%o1 + 0x0] %asi, %o0 | 169 | 1: ldswa [%o1 + 0x0] %asi, %o0 |
166 | sethi %hi(sys_socket), %g1 | 170 | sethi %hi(sys_socket), %g1 |
167 | ldswa [%o1 + 0x8] %asi, %o2 | 171 | 2: ldswa [%o1 + 0x8] %asi, %o2 |
168 | jmpl %g1 + %lo(sys_socket), %g0 | 172 | jmpl %g1 + %lo(sys_socket), %g0 |
169 | ldswa [%o1 + 0x4] %asi, %o1 | 173 | 3: ldswa [%o1 + 0x4] %asi, %o1 |
170 | nop | 174 | nop |
171 | nop | 175 | nop |
172 | nop | 176 | nop |
173 | do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ | 177 | do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ |
174 | ldswa [%o1 + 0x0] %asi, %o0 | 178 | 4: ldswa [%o1 + 0x0] %asi, %o0 |
175 | sethi %hi(sys_bind), %g1 | 179 | sethi %hi(sys_bind), %g1 |
176 | ldswa [%o1 + 0x8] %asi, %o2 | 180 | 5: ldswa [%o1 + 0x8] %asi, %o2 |
177 | jmpl %g1 + %lo(sys_bind), %g0 | 181 | jmpl %g1 + %lo(sys_bind), %g0 |
178 | lduwa [%o1 + 0x4] %asi, %o1 | 182 | 6: lduwa [%o1 + 0x4] %asi, %o1 |
179 | nop | 183 | nop |
180 | nop | 184 | nop |
181 | nop | 185 | nop |
182 | do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ | 186 | do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ |
183 | ldswa [%o1 + 0x0] %asi, %o0 | 187 | 7: ldswa [%o1 + 0x0] %asi, %o0 |
184 | sethi %hi(sys_connect), %g1 | 188 | sethi %hi(sys_connect), %g1 |
185 | ldswa [%o1 + 0x8] %asi, %o2 | 189 | 8: ldswa [%o1 + 0x8] %asi, %o2 |
186 | jmpl %g1 + %lo(sys_connect), %g0 | 190 | jmpl %g1 + %lo(sys_connect), %g0 |
187 | lduwa [%o1 + 0x4] %asi, %o1 | 191 | 9: lduwa [%o1 + 0x4] %asi, %o1 |
188 | nop | 192 | nop |
189 | nop | 193 | nop |
190 | nop | 194 | nop |
191 | do_sys_listen: /* sys_listen(int, int) */ | 195 | do_sys_listen: /* sys_listen(int, int) */ |
192 | ldswa [%o1 + 0x0] %asi, %o0 | 196 | 10: ldswa [%o1 + 0x0] %asi, %o0 |
193 | sethi %hi(sys_listen), %g1 | 197 | sethi %hi(sys_listen), %g1 |
194 | jmpl %g1 + %lo(sys_listen), %g0 | 198 | jmpl %g1 + %lo(sys_listen), %g0 |
195 | ldswa [%o1 + 0x4] %asi, %o1 | 199 | 11: ldswa [%o1 + 0x4] %asi, %o1 |
196 | nop | 200 | nop |
197 | nop | 201 | nop |
198 | nop | 202 | nop |
199 | nop | 203 | nop |
200 | do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ | 204 | do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ |
201 | ldswa [%o1 + 0x0] %asi, %o0 | 205 | 12: ldswa [%o1 + 0x0] %asi, %o0 |
202 | sethi %hi(sys_accept), %g1 | 206 | sethi %hi(sys_accept), %g1 |
203 | lduwa [%o1 + 0x8] %asi, %o2 | 207 | 13: lduwa [%o1 + 0x8] %asi, %o2 |
204 | jmpl %g1 + %lo(sys_accept), %g0 | 208 | jmpl %g1 + %lo(sys_accept), %g0 |
205 | lduwa [%o1 + 0x4] %asi, %o1 | 209 | 14: lduwa [%o1 + 0x4] %asi, %o1 |
206 | nop | 210 | nop |
207 | nop | 211 | nop |
208 | nop | 212 | nop |
209 | do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ | 213 | do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ |
210 | ldswa [%o1 + 0x0] %asi, %o0 | 214 | 15: ldswa [%o1 + 0x0] %asi, %o0 |
211 | sethi %hi(sys_getsockname), %g1 | 215 | sethi %hi(sys_getsockname), %g1 |
212 | lduwa [%o1 + 0x8] %asi, %o2 | 216 | 16: lduwa [%o1 + 0x8] %asi, %o2 |
213 | jmpl %g1 + %lo(sys_getsockname), %g0 | 217 | jmpl %g1 + %lo(sys_getsockname), %g0 |
214 | lduwa [%o1 + 0x4] %asi, %o1 | 218 | 17: lduwa [%o1 + 0x4] %asi, %o1 |
215 | nop | 219 | nop |
216 | nop | 220 | nop |
217 | nop | 221 | nop |
218 | do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ | 222 | do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ |
219 | ldswa [%o1 + 0x0] %asi, %o0 | 223 | 18: ldswa [%o1 + 0x0] %asi, %o0 |
220 | sethi %hi(sys_getpeername), %g1 | 224 | sethi %hi(sys_getpeername), %g1 |
221 | lduwa [%o1 + 0x8] %asi, %o2 | 225 | 19: lduwa [%o1 + 0x8] %asi, %o2 |
222 | jmpl %g1 + %lo(sys_getpeername), %g0 | 226 | jmpl %g1 + %lo(sys_getpeername), %g0 |
223 | lduwa [%o1 + 0x4] %asi, %o1 | 227 | 20: lduwa [%o1 + 0x4] %asi, %o1 |
224 | nop | 228 | nop |
225 | nop | 229 | nop |
226 | nop | 230 | nop |
227 | do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ | 231 | do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ |
228 | ldswa [%o1 + 0x0] %asi, %o0 | 232 | 21: ldswa [%o1 + 0x0] %asi, %o0 |
229 | sethi %hi(sys_socketpair), %g1 | 233 | sethi %hi(sys_socketpair), %g1 |
230 | ldswa [%o1 + 0x8] %asi, %o2 | 234 | 22: ldswa [%o1 + 0x8] %asi, %o2 |
231 | lduwa [%o1 + 0xc] %asi, %o3 | 235 | 23: lduwa [%o1 + 0xc] %asi, %o3 |
232 | jmpl %g1 + %lo(sys_socketpair), %g0 | 236 | jmpl %g1 + %lo(sys_socketpair), %g0 |
233 | ldswa [%o1 + 0x4] %asi, %o1 | 237 | 24: ldswa [%o1 + 0x4] %asi, %o1 |
234 | nop | 238 | nop |
235 | nop | 239 | nop |
236 | do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ | 240 | do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ |
237 | ldswa [%o1 + 0x0] %asi, %o0 | 241 | 25: ldswa [%o1 + 0x0] %asi, %o0 |
238 | sethi %hi(sys_send), %g1 | 242 | sethi %hi(sys_send), %g1 |
239 | lduwa [%o1 + 0x8] %asi, %o2 | 243 | 26: lduwa [%o1 + 0x8] %asi, %o2 |
240 | lduwa [%o1 + 0xc] %asi, %o3 | 244 | 27: lduwa [%o1 + 0xc] %asi, %o3 |
241 | jmpl %g1 + %lo(sys_send), %g0 | 245 | jmpl %g1 + %lo(sys_send), %g0 |
242 | lduwa [%o1 + 0x4] %asi, %o1 | 246 | 28: lduwa [%o1 + 0x4] %asi, %o1 |
243 | nop | 247 | nop |
244 | nop | 248 | nop |
245 | do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ | 249 | do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ |
246 | ldswa [%o1 + 0x0] %asi, %o0 | 250 | 29: ldswa [%o1 + 0x0] %asi, %o0 |
247 | sethi %hi(sys_recv), %g1 | 251 | sethi %hi(sys_recv), %g1 |
248 | lduwa [%o1 + 0x8] %asi, %o2 | 252 | 30: lduwa [%o1 + 0x8] %asi, %o2 |
249 | lduwa [%o1 + 0xc] %asi, %o3 | 253 | 31: lduwa [%o1 + 0xc] %asi, %o3 |
250 | jmpl %g1 + %lo(sys_recv), %g0 | 254 | jmpl %g1 + %lo(sys_recv), %g0 |
251 | lduwa [%o1 + 0x4] %asi, %o1 | 255 | 32: lduwa [%o1 + 0x4] %asi, %o1 |
252 | nop | 256 | nop |
253 | nop | 257 | nop |
254 | do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ | 258 | do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ |
255 | ldswa [%o1 + 0x0] %asi, %o0 | 259 | 33: ldswa [%o1 + 0x0] %asi, %o0 |
256 | sethi %hi(sys_sendto), %g1 | 260 | sethi %hi(sys_sendto), %g1 |
257 | lduwa [%o1 + 0x8] %asi, %o2 | 261 | 34: lduwa [%o1 + 0x8] %asi, %o2 |
258 | lduwa [%o1 + 0xc] %asi, %o3 | 262 | 35: lduwa [%o1 + 0xc] %asi, %o3 |
259 | lduwa [%o1 + 0x10] %asi, %o4 | 263 | 36: lduwa [%o1 + 0x10] %asi, %o4 |
260 | ldswa [%o1 + 0x14] %asi, %o5 | 264 | 37: ldswa [%o1 + 0x14] %asi, %o5 |
261 | jmpl %g1 + %lo(sys_sendto), %g0 | 265 | jmpl %g1 + %lo(sys_sendto), %g0 |
262 | lduwa [%o1 + 0x4] %asi, %o1 | 266 | 38: lduwa [%o1 + 0x4] %asi, %o1 |
263 | do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ | 267 | do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ |
264 | ldswa [%o1 + 0x0] %asi, %o0 | 268 | 39: ldswa [%o1 + 0x0] %asi, %o0 |
265 | sethi %hi(sys_recvfrom), %g1 | 269 | sethi %hi(sys_recvfrom), %g1 |
266 | lduwa [%o1 + 0x8] %asi, %o2 | 270 | 40: lduwa [%o1 + 0x8] %asi, %o2 |
267 | lduwa [%o1 + 0xc] %asi, %o3 | 271 | 41: lduwa [%o1 + 0xc] %asi, %o3 |
268 | lduwa [%o1 + 0x10] %asi, %o4 | 272 | 42: lduwa [%o1 + 0x10] %asi, %o4 |
269 | lduwa [%o1 + 0x14] %asi, %o5 | 273 | 43: lduwa [%o1 + 0x14] %asi, %o5 |
270 | jmpl %g1 + %lo(sys_recvfrom), %g0 | 274 | jmpl %g1 + %lo(sys_recvfrom), %g0 |
271 | lduwa [%o1 + 0x4] %asi, %o1 | 275 | 44: lduwa [%o1 + 0x4] %asi, %o1 |
272 | do_sys_shutdown: /* sys_shutdown(int, int) */ | 276 | do_sys_shutdown: /* sys_shutdown(int, int) */ |
273 | ldswa [%o1 + 0x0] %asi, %o0 | 277 | 45: ldswa [%o1 + 0x0] %asi, %o0 |
274 | sethi %hi(sys_shutdown), %g1 | 278 | sethi %hi(sys_shutdown), %g1 |
275 | jmpl %g1 + %lo(sys_shutdown), %g0 | 279 | jmpl %g1 + %lo(sys_shutdown), %g0 |
276 | ldswa [%o1 + 0x4] %asi, %o1 | 280 | 46: ldswa [%o1 + 0x4] %asi, %o1 |
277 | nop | 281 | nop |
278 | nop | 282 | nop |
279 | nop | 283 | nop |
280 | nop | 284 | nop |
281 | do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ | 285 | do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ |
282 | ldswa [%o1 + 0x0] %asi, %o0 | 286 | 47: ldswa [%o1 + 0x0] %asi, %o0 |
283 | sethi %hi(compat_sys_setsockopt), %g1 | 287 | sethi %hi(compat_sys_setsockopt), %g1 |
284 | ldswa [%o1 + 0x8] %asi, %o2 | 288 | 48: ldswa [%o1 + 0x8] %asi, %o2 |
285 | lduwa [%o1 + 0xc] %asi, %o3 | 289 | 49: lduwa [%o1 + 0xc] %asi, %o3 |
286 | ldswa [%o1 + 0x10] %asi, %o4 | 290 | 50: ldswa [%o1 + 0x10] %asi, %o4 |
287 | jmpl %g1 + %lo(compat_sys_setsockopt), %g0 | 291 | jmpl %g1 + %lo(compat_sys_setsockopt), %g0 |
288 | ldswa [%o1 + 0x4] %asi, %o1 | 292 | 51: ldswa [%o1 + 0x4] %asi, %o1 |
289 | nop | 293 | nop |
290 | do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ | 294 | do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ |
291 | ldswa [%o1 + 0x0] %asi, %o0 | 295 | 52: ldswa [%o1 + 0x0] %asi, %o0 |
292 | sethi %hi(compat_sys_getsockopt), %g1 | 296 | sethi %hi(compat_sys_getsockopt), %g1 |
293 | ldswa [%o1 + 0x8] %asi, %o2 | 297 | 53: ldswa [%o1 + 0x8] %asi, %o2 |
294 | lduwa [%o1 + 0xc] %asi, %o3 | 298 | 54: lduwa [%o1 + 0xc] %asi, %o3 |
295 | lduwa [%o1 + 0x10] %asi, %o4 | 299 | 55: lduwa [%o1 + 0x10] %asi, %o4 |
296 | jmpl %g1 + %lo(compat_sys_getsockopt), %g0 | 300 | jmpl %g1 + %lo(compat_sys_getsockopt), %g0 |
297 | ldswa [%o1 + 0x4] %asi, %o1 | 301 | 56: ldswa [%o1 + 0x4] %asi, %o1 |
298 | nop | 302 | nop |
299 | do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ | 303 | do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ |
300 | ldswa [%o1 + 0x0] %asi, %o0 | 304 | 57: ldswa [%o1 + 0x0] %asi, %o0 |
301 | sethi %hi(compat_sys_sendmsg), %g1 | 305 | sethi %hi(compat_sys_sendmsg), %g1 |
302 | lduwa [%o1 + 0x8] %asi, %o2 | 306 | 58: lduwa [%o1 + 0x8] %asi, %o2 |
303 | jmpl %g1 + %lo(compat_sys_sendmsg), %g0 | 307 | jmpl %g1 + %lo(compat_sys_sendmsg), %g0 |
304 | lduwa [%o1 + 0x4] %asi, %o1 | 308 | 59: lduwa [%o1 + 0x4] %asi, %o1 |
305 | nop | 309 | nop |
306 | nop | 310 | nop |
307 | nop | 311 | nop |
308 | do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ | 312 | do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ |
309 | ldswa [%o1 + 0x0] %asi, %o0 | 313 | 60: ldswa [%o1 + 0x0] %asi, %o0 |
310 | sethi %hi(compat_sys_recvmsg), %g1 | 314 | sethi %hi(compat_sys_recvmsg), %g1 |
311 | lduwa [%o1 + 0x8] %asi, %o2 | 315 | 61: lduwa [%o1 + 0x8] %asi, %o2 |
312 | jmpl %g1 + %lo(compat_sys_recvmsg), %g0 | 316 | jmpl %g1 + %lo(compat_sys_recvmsg), %g0 |
313 | lduwa [%o1 + 0x4] %asi, %o1 | 317 | 62: lduwa [%o1 + 0x4] %asi, %o1 |
314 | nop | 318 | nop |
315 | nop | 319 | nop |
316 | nop | 320 | nop |
317 | __socketcall_table_end: | ||
318 | |||
319 | do_einval: | ||
320 | retl | ||
321 | mov -EINVAL, %o0 | ||
322 | do_efault: | ||
323 | retl | ||
324 | mov -EFAULT, %o0 | ||
325 | 321 | ||
326 | .section __ex_table | 322 | .section __ex_table |
327 | .align 4 | 323 | .align 4 |
328 | .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault | 324 | .word 1b, __retl_efault, 2b, __retl_efault |
325 | .word 3b, __retl_efault, 4b, __retl_efault | ||
326 | .word 5b, __retl_efault, 6b, __retl_efault | ||
327 | .word 7b, __retl_efault, 8b, __retl_efault | ||
328 | .word 9b, __retl_efault, 10b, __retl_efault | ||
329 | .word 11b, __retl_efault, 12b, __retl_efault | ||
330 | .word 13b, __retl_efault, 14b, __retl_efault | ||
331 | .word 15b, __retl_efault, 16b, __retl_efault | ||
332 | .word 17b, __retl_efault, 18b, __retl_efault | ||
333 | .word 19b, __retl_efault, 20b, __retl_efault | ||
334 | .word 21b, __retl_efault, 22b, __retl_efault | ||
335 | .word 23b, __retl_efault, 24b, __retl_efault | ||
336 | .word 25b, __retl_efault, 26b, __retl_efault | ||
337 | .word 27b, __retl_efault, 28b, __retl_efault | ||
338 | .word 29b, __retl_efault, 30b, __retl_efault | ||
339 | .word 31b, __retl_efault, 32b, __retl_efault | ||
340 | .word 33b, __retl_efault, 34b, __retl_efault | ||
341 | .word 35b, __retl_efault, 36b, __retl_efault | ||
342 | .word 37b, __retl_efault, 38b, __retl_efault | ||
343 | .word 39b, __retl_efault, 40b, __retl_efault | ||
344 | .word 41b, __retl_efault, 42b, __retl_efault | ||
345 | .word 43b, __retl_efault, 44b, __retl_efault | ||
346 | .word 45b, __retl_efault, 46b, __retl_efault | ||
347 | .word 47b, __retl_efault, 48b, __retl_efault | ||
348 | .word 49b, __retl_efault, 50b, __retl_efault | ||
349 | .word 51b, __retl_efault, 52b, __retl_efault | ||
350 | .word 53b, __retl_efault, 54b, __retl_efault | ||
351 | .word 55b, __retl_efault, 56b, __retl_efault | ||
352 | .word 57b, __retl_efault, 58b, __retl_efault | ||
353 | .word 59b, __retl_efault, 60b, __retl_efault | ||
354 | .word 61b, __retl_efault, 62b, __retl_efault | ||
329 | .previous | 355 | .previous |
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 3a145fc39cf2..89f2fcfcd662 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -119,8 +119,8 @@ startup_continue: | |||
119 | sethi %hi(itlb_load), %g2 | 119 | sethi %hi(itlb_load), %g2 |
120 | or %g2, %lo(itlb_load), %g2 | 120 | or %g2, %lo(itlb_load), %g2 |
121 | stx %g2, [%sp + 2047 + 128 + 0x18] | 121 | stx %g2, [%sp + 2047 + 128 + 0x18] |
122 | sethi %hi(mmu_ihandle_cache), %g2 | 122 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
123 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 123 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
124 | stx %g2, [%sp + 2047 + 128 + 0x20] | 124 | stx %g2, [%sp + 2047 + 128 + 0x20] |
125 | sethi %hi(KERNBASE), %g2 | 125 | sethi %hi(KERNBASE), %g2 |
126 | stx %g2, [%sp + 2047 + 128 + 0x28] | 126 | stx %g2, [%sp + 2047 + 128 + 0x28] |
@@ -156,8 +156,8 @@ startup_continue: | |||
156 | sethi %hi(itlb_load), %g2 | 156 | sethi %hi(itlb_load), %g2 |
157 | or %g2, %lo(itlb_load), %g2 | 157 | or %g2, %lo(itlb_load), %g2 |
158 | stx %g2, [%sp + 2047 + 128 + 0x18] | 158 | stx %g2, [%sp + 2047 + 128 + 0x18] |
159 | sethi %hi(mmu_ihandle_cache), %g2 | 159 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
160 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 160 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
161 | stx %g2, [%sp + 2047 + 128 + 0x20] | 161 | stx %g2, [%sp + 2047 + 128 + 0x20] |
162 | sethi %hi(KERNBASE + 0x400000), %g2 | 162 | sethi %hi(KERNBASE + 0x400000), %g2 |
163 | stx %g2, [%sp + 2047 + 128 + 0x28] | 163 | stx %g2, [%sp + 2047 + 128 + 0x28] |
@@ -190,8 +190,8 @@ do_dtlb: | |||
190 | sethi %hi(dtlb_load), %g2 | 190 | sethi %hi(dtlb_load), %g2 |
191 | or %g2, %lo(dtlb_load), %g2 | 191 | or %g2, %lo(dtlb_load), %g2 |
192 | stx %g2, [%sp + 2047 + 128 + 0x18] | 192 | stx %g2, [%sp + 2047 + 128 + 0x18] |
193 | sethi %hi(mmu_ihandle_cache), %g2 | 193 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
194 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 194 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
195 | stx %g2, [%sp + 2047 + 128 + 0x20] | 195 | stx %g2, [%sp + 2047 + 128 + 0x20] |
196 | sethi %hi(KERNBASE), %g2 | 196 | sethi %hi(KERNBASE), %g2 |
197 | stx %g2, [%sp + 2047 + 128 + 0x28] | 197 | stx %g2, [%sp + 2047 + 128 + 0x28] |
@@ -228,8 +228,8 @@ do_dtlb: | |||
228 | sethi %hi(dtlb_load), %g2 | 228 | sethi %hi(dtlb_load), %g2 |
229 | or %g2, %lo(dtlb_load), %g2 | 229 | or %g2, %lo(dtlb_load), %g2 |
230 | stx %g2, [%sp + 2047 + 128 + 0x18] | 230 | stx %g2, [%sp + 2047 + 128 + 0x18] |
231 | sethi %hi(mmu_ihandle_cache), %g2 | 231 | sethi %hi(prom_mmu_ihandle_cache), %g2 |
232 | lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 | 232 | lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 |
233 | stx %g2, [%sp + 2047 + 128 + 0x20] | 233 | stx %g2, [%sp + 2047 + 128 + 0x20] |
234 | sethi %hi(KERNBASE + 0x400000), %g2 | 234 | sethi %hi(KERNBASE + 0x400000), %g2 |
235 | stx %g2, [%sp + 2047 + 128 + 0x28] | 235 | stx %g2, [%sp + 2047 + 128 + 0x28] |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index b280b2ef674f..5570e7bb22bb 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un | |||
189 | 189 | ||
190 | if (regs->tstate & TSTATE_PRIV) { | 190 | if (regs->tstate & TSTATE_PRIV) { |
191 | /* Test if this comes from uaccess places. */ | 191 | /* Test if this comes from uaccess places. */ |
192 | unsigned long fixup; | 192 | const struct exception_table_entry *entry; |
193 | unsigned long g2 = regs->u_regs[UREG_G2]; | ||
194 | 193 | ||
195 | if ((fixup = search_extables_range(regs->tpc, &g2))) { | 194 | entry = search_exception_tables(regs->tpc); |
196 | /* Ouch, somebody is trying ugly VM hole tricks on us... */ | 195 | if (entry) { |
196 | /* Ouch, somebody is trying VM hole tricks on us... */ | ||
197 | #ifdef DEBUG_EXCEPTIONS | 197 | #ifdef DEBUG_EXCEPTIONS |
198 | printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); | 198 | printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); |
199 | printk("EX_TABLE: insn<%016lx> fixup<%016lx> " | 199 | printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", |
200 | "g2<%016lx>\n", regs->tpc, fixup, g2); | 200 | regs->tpc, entry->fixup); |
201 | #endif | 201 | #endif |
202 | regs->tpc = fixup; | 202 | regs->tpc = entry->fixup; |
203 | regs->tnpc = regs->tpc + 4; | 203 | regs->tnpc = regs->tpc + 4; |
204 | regs->u_regs[UREG_G2] = g2; | ||
205 | return; | 204 | return; |
206 | } | 205 | } |
207 | /* Shit... */ | 206 | /* Shit... */ |
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void) | |||
758 | ecache_flush_size = (2 * largest_size); | 757 | ecache_flush_size = (2 * largest_size); |
759 | ecache_flush_linesize = smallest_linesize; | 758 | ecache_flush_linesize = smallest_linesize; |
760 | 759 | ||
761 | /* Discover a physically contiguous chunk of physical | 760 | ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); |
762 | * memory in 'sp_banks' of size ecache_flush_size calculated | ||
763 | * above. Store the physical base of this area at | ||
764 | * ecache_flush_physbase. | ||
765 | */ | ||
766 | for (node = 0; ; node++) { | ||
767 | if (sp_banks[node].num_bytes == 0) | ||
768 | break; | ||
769 | if (sp_banks[node].num_bytes >= ecache_flush_size) { | ||
770 | ecache_flush_physbase = sp_banks[node].base_addr; | ||
771 | break; | ||
772 | } | ||
773 | } | ||
774 | 761 | ||
775 | /* Note: Zero would be a valid value of ecache_flush_physbase so | 762 | if (ecache_flush_physbase == ~0UL) { |
776 | * don't use that as the success test. :-) | ||
777 | */ | ||
778 | if (sp_banks[node].num_bytes == 0) { | ||
779 | prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " | 763 | prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " |
780 | "contiguous physical memory.\n", ecache_flush_size); | 764 | "contiguous physical memory.\n", |
765 | ecache_flush_size); | ||
781 | prom_halt(); | 766 | prom_halt(); |
782 | } | 767 | } |
783 | 768 | ||
@@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr) | |||
869 | */ | 854 | */ |
870 | static void __cheetah_flush_icache(void) | 855 | static void __cheetah_flush_icache(void) |
871 | { | 856 | { |
872 | unsigned long i; | 857 | unsigned int icache_size, icache_line_size; |
858 | unsigned long addr; | ||
859 | |||
860 | icache_size = local_cpu_data().icache_size; | ||
861 | icache_line_size = local_cpu_data().icache_line_size; | ||
873 | 862 | ||
874 | /* Clear the valid bits in all the tags. */ | 863 | /* Clear the valid bits in all the tags. */ |
875 | for (i = 0; i < (1 << 15); i += (1 << 5)) { | 864 | for (addr = 0; addr < icache_size; addr += icache_line_size) { |
876 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 865 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
877 | "membar #Sync" | 866 | "membar #Sync" |
878 | : /* no outputs */ | 867 | : /* no outputs */ |
879 | : "r" (i | (2 << 3)), "i" (ASI_IC_TAG)); | 868 | : "r" (addr | (2 << 3)), |
869 | "i" (ASI_IC_TAG)); | ||
880 | } | 870 | } |
881 | } | 871 | } |
882 | 872 | ||
@@ -904,13 +894,17 @@ static void cheetah_flush_icache(void) | |||
904 | 894 | ||
905 | static void cheetah_flush_dcache(void) | 895 | static void cheetah_flush_dcache(void) |
906 | { | 896 | { |
907 | unsigned long i; | 897 | unsigned int dcache_size, dcache_line_size; |
898 | unsigned long addr; | ||
908 | 899 | ||
909 | for (i = 0; i < (1 << 16); i += (1 << 5)) { | 900 | dcache_size = local_cpu_data().dcache_size; |
901 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
902 | |||
903 | for (addr = 0; addr < dcache_size; addr += dcache_line_size) { | ||
910 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 904 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
911 | "membar #Sync" | 905 | "membar #Sync" |
912 | : /* no outputs */ | 906 | : /* no outputs */ |
913 | : "r" (i), "i" (ASI_DCACHE_TAG)); | 907 | : "r" (addr), "i" (ASI_DCACHE_TAG)); |
914 | } | 908 | } |
915 | } | 909 | } |
916 | 910 | ||
@@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void) | |||
921 | */ | 915 | */ |
922 | static void cheetah_plus_zap_dcache_parity(void) | 916 | static void cheetah_plus_zap_dcache_parity(void) |
923 | { | 917 | { |
924 | unsigned long i; | 918 | unsigned int dcache_size, dcache_line_size; |
919 | unsigned long addr; | ||
920 | |||
921 | dcache_size = local_cpu_data().dcache_size; | ||
922 | dcache_line_size = local_cpu_data().dcache_line_size; | ||
925 | 923 | ||
926 | for (i = 0; i < (1 << 16); i += (1 << 5)) { | 924 | for (addr = 0; addr < dcache_size; addr += dcache_line_size) { |
927 | unsigned long tag = (i >> 14); | 925 | unsigned long tag = (addr >> 14); |
928 | unsigned long j; | 926 | unsigned long line; |
929 | 927 | ||
930 | __asm__ __volatile__("membar #Sync\n\t" | 928 | __asm__ __volatile__("membar #Sync\n\t" |
931 | "stxa %0, [%1] %2\n\t" | 929 | "stxa %0, [%1] %2\n\t" |
932 | "membar #Sync" | 930 | "membar #Sync" |
933 | : /* no outputs */ | 931 | : /* no outputs */ |
934 | : "r" (tag), "r" (i), | 932 | : "r" (tag), "r" (addr), |
935 | "i" (ASI_DCACHE_UTAG)); | 933 | "i" (ASI_DCACHE_UTAG)); |
936 | for (j = i; j < i + (1 << 5); j += (1 << 3)) | 934 | for (line = addr; line < addr + dcache_line_size; line += 8) |
937 | __asm__ __volatile__("membar #Sync\n\t" | 935 | __asm__ __volatile__("membar #Sync\n\t" |
938 | "stxa %%g0, [%0] %1\n\t" | 936 | "stxa %%g0, [%0] %1\n\t" |
939 | "membar #Sync" | 937 | "membar #Sync" |
940 | : /* no outputs */ | 938 | : /* no outputs */ |
941 | : "r" (j), "i" (ASI_DCACHE_DATA)); | 939 | : "r" (line), |
940 | "i" (ASI_DCACHE_DATA)); | ||
942 | } | 941 | } |
943 | } | 942 | } |
944 | 943 | ||
@@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr) | |||
1332 | /* Return non-zero if PADDR is a valid physical memory address. */ | 1331 | /* Return non-zero if PADDR is a valid physical memory address. */ |
1333 | static int cheetah_check_main_memory(unsigned long paddr) | 1332 | static int cheetah_check_main_memory(unsigned long paddr) |
1334 | { | 1333 | { |
1335 | int i; | 1334 | unsigned long vaddr = PAGE_OFFSET + paddr; |
1336 | 1335 | ||
1337 | for (i = 0; ; i++) { | 1336 | if (vaddr > (unsigned long) high_memory) |
1338 | if (sp_banks[i].num_bytes == 0) | 1337 | return 0; |
1339 | break; | 1338 | |
1340 | if (paddr >= sp_banks[i].base_addr && | 1339 | return kern_addr_valid(vaddr); |
1341 | paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes)) | ||
1342 | return 1; | ||
1343 | } | ||
1344 | return 0; | ||
1345 | } | 1340 | } |
1346 | 1341 | ||
1347 | void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) | 1342 | void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) |
@@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned | |||
1596 | /* OK, usermode access. */ | 1591 | /* OK, usermode access. */ |
1597 | recoverable = 1; | 1592 | recoverable = 1; |
1598 | } else { | 1593 | } else { |
1599 | unsigned long g2 = regs->u_regs[UREG_G2]; | 1594 | const struct exception_table_entry *entry; |
1600 | unsigned long fixup = search_extables_range(regs->tpc, &g2); | ||
1601 | 1595 | ||
1602 | if (fixup != 0UL) { | 1596 | entry = search_exception_tables(regs->tpc); |
1597 | if (entry) { | ||
1603 | /* OK, kernel access to userspace. */ | 1598 | /* OK, kernel access to userspace. */ |
1604 | recoverable = 1; | 1599 | recoverable = 1; |
1605 | 1600 | ||
@@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned | |||
1618 | * recoverable condition. | 1613 | * recoverable condition. |
1619 | */ | 1614 | */ |
1620 | if (recoverable) { | 1615 | if (recoverable) { |
1621 | regs->tpc = fixup; | 1616 | regs->tpc = entry->fixup; |
1622 | regs->tnpc = regs->tpc + 4; | 1617 | regs->tnpc = regs->tpc + 4; |
1623 | regs->u_regs[UREG_G2] = g2; | ||
1624 | } | 1618 | } |
1625 | } | 1619 | } |
1626 | } | 1620 | } |
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S index da48400bcc95..1f5b5b708ce7 100644 --- a/arch/sparc64/kernel/una_asm.S +++ b/arch/sparc64/kernel/una_asm.S | |||
@@ -6,13 +6,6 @@ | |||
6 | 6 | ||
7 | .text | 7 | .text |
8 | 8 | ||
9 | kernel_unaligned_trap_fault: | ||
10 | call kernel_mna_trap_fault | ||
11 | nop | ||
12 | retl | ||
13 | nop | ||
14 | .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault | ||
15 | |||
16 | .globl __do_int_store | 9 | .globl __do_int_store |
17 | __do_int_store: | 10 | __do_int_store: |
18 | rd %asi, %o4 | 11 | rd %asi, %o4 |
@@ -51,24 +44,24 @@ __do_int_store: | |||
51 | 0: | 44 | 0: |
52 | wr %o4, 0x0, %asi | 45 | wr %o4, 0x0, %asi |
53 | retl | 46 | retl |
54 | nop | 47 | mov 0, %o0 |
55 | .size __do_int_store, .-__do_int_store | 48 | .size __do_int_store, .-__do_int_store |
56 | 49 | ||
57 | .section __ex_table | 50 | .section __ex_table |
58 | .word 4b, kernel_unaligned_trap_fault | 51 | .word 4b, __retl_efault |
59 | .word 5b, kernel_unaligned_trap_fault | 52 | .word 5b, __retl_efault |
60 | .word 6b, kernel_unaligned_trap_fault | 53 | .word 6b, __retl_efault |
61 | .word 7b, kernel_unaligned_trap_fault | 54 | .word 7b, __retl_efault |
62 | .word 8b, kernel_unaligned_trap_fault | 55 | .word 8b, __retl_efault |
63 | .word 9b, kernel_unaligned_trap_fault | 56 | .word 9b, __retl_efault |
64 | .word 10b, kernel_unaligned_trap_fault | 57 | .word 10b, __retl_efault |
65 | .word 11b, kernel_unaligned_trap_fault | 58 | .word 11b, __retl_efault |
66 | .word 12b, kernel_unaligned_trap_fault | 59 | .word 12b, __retl_efault |
67 | .word 13b, kernel_unaligned_trap_fault | 60 | .word 13b, __retl_efault |
68 | .word 14b, kernel_unaligned_trap_fault | 61 | .word 14b, __retl_efault |
69 | .word 15b, kernel_unaligned_trap_fault | 62 | .word 15b, __retl_efault |
70 | .word 16b, kernel_unaligned_trap_fault | 63 | .word 16b, __retl_efault |
71 | .word 17b, kernel_unaligned_trap_fault | 64 | .word 17b, __retl_efault |
72 | .previous | 65 | .previous |
73 | 66 | ||
74 | .globl do_int_load | 67 | .globl do_int_load |
@@ -133,21 +126,21 @@ do_int_load: | |||
133 | 0: | 126 | 0: |
134 | wr %o5, 0x0, %asi | 127 | wr %o5, 0x0, %asi |
135 | retl | 128 | retl |
136 | nop | 129 | mov 0, %o0 |
137 | .size __do_int_load, .-__do_int_load | 130 | .size __do_int_load, .-__do_int_load |
138 | 131 | ||
139 | .section __ex_table | 132 | .section __ex_table |
140 | .word 4b, kernel_unaligned_trap_fault | 133 | .word 4b, __retl_efault |
141 | .word 5b, kernel_unaligned_trap_fault | 134 | .word 5b, __retl_efault |
142 | .word 6b, kernel_unaligned_trap_fault | 135 | .word 6b, __retl_efault |
143 | .word 7b, kernel_unaligned_trap_fault | 136 | .word 7b, __retl_efault |
144 | .word 8b, kernel_unaligned_trap_fault | 137 | .word 8b, __retl_efault |
145 | .word 9b, kernel_unaligned_trap_fault | 138 | .word 9b, __retl_efault |
146 | .word 10b, kernel_unaligned_trap_fault | 139 | .word 10b, __retl_efault |
147 | .word 11b, kernel_unaligned_trap_fault | 140 | .word 11b, __retl_efault |
148 | .word 12b, kernel_unaligned_trap_fault | 141 | .word 12b, __retl_efault |
149 | .word 13b, kernel_unaligned_trap_fault | 142 | .word 13b, __retl_efault |
150 | .word 14b, kernel_unaligned_trap_fault | 143 | .word 14b, __retl_efault |
151 | .word 15b, kernel_unaligned_trap_fault | 144 | .word 15b, __retl_efault |
152 | .word 16b, kernel_unaligned_trap_fault | 145 | .word 16b, __retl_efault |
153 | .previous | 146 | .previous |
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 42718f6a7d36..70faf630603b 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c | |||
@@ -180,14 +180,14 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs) | |||
180 | die_if_kernel(str, regs); | 180 | die_if_kernel(str, regs); |
181 | } | 181 | } |
182 | 182 | ||
183 | extern void do_int_load(unsigned long *dest_reg, int size, | 183 | extern int do_int_load(unsigned long *dest_reg, int size, |
184 | unsigned long *saddr, int is_signed, int asi); | 184 | unsigned long *saddr, int is_signed, int asi); |
185 | 185 | ||
186 | extern void __do_int_store(unsigned long *dst_addr, int size, | 186 | extern int __do_int_store(unsigned long *dst_addr, int size, |
187 | unsigned long src_val, int asi); | 187 | unsigned long src_val, int asi); |
188 | 188 | ||
189 | static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, | 189 | static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, |
190 | struct pt_regs *regs, int asi, int orig_asi) | 190 | struct pt_regs *regs, int asi, int orig_asi) |
191 | { | 191 | { |
192 | unsigned long zero = 0; | 192 | unsigned long zero = 0; |
193 | unsigned long *src_val_p = &zero; | 193 | unsigned long *src_val_p = &zero; |
@@ -219,7 +219,7 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, | |||
219 | break; | 219 | break; |
220 | }; | 220 | }; |
221 | } | 221 | } |
222 | __do_int_store(dst_addr, size, src_val, asi); | 222 | return __do_int_store(dst_addr, size, src_val, asi); |
223 | } | 223 | } |
224 | 224 | ||
225 | static inline void advance(struct pt_regs *regs) | 225 | static inline void advance(struct pt_regs *regs) |
@@ -242,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn) | |||
242 | return !floating_point_load_or_store_p(insn); | 242 | return !floating_point_load_or_store_p(insn); |
243 | } | 243 | } |
244 | 244 | ||
245 | void kernel_mna_trap_fault(void) | 245 | static void kernel_mna_trap_fault(void) |
246 | { | 246 | { |
247 | struct pt_regs *regs = current_thread_info()->kern_una_regs; | 247 | struct pt_regs *regs = current_thread_info()->kern_una_regs; |
248 | unsigned int insn = current_thread_info()->kern_una_insn; | 248 | unsigned int insn = current_thread_info()->kern_una_insn; |
249 | unsigned long g2 = regs->u_regs[UREG_G2]; | 249 | const struct exception_table_entry *entry; |
250 | unsigned long fixup = search_extables_range(regs->tpc, &g2); | ||
251 | 250 | ||
252 | if (!fixup) { | 251 | entry = search_exception_tables(regs->tpc); |
252 | if (!entry) { | ||
253 | unsigned long address; | 253 | unsigned long address; |
254 | 254 | ||
255 | address = compute_effective_address(regs, insn, | 255 | address = compute_effective_address(regs, insn, |
@@ -270,9 +270,8 @@ void kernel_mna_trap_fault(void) | |||
270 | die_if_kernel("Oops", regs); | 270 | die_if_kernel("Oops", regs); |
271 | /* Not reached */ | 271 | /* Not reached */ |
272 | } | 272 | } |
273 | regs->tpc = fixup; | 273 | regs->tpc = entry->fixup; |
274 | regs->tnpc = regs->tpc + 4; | 274 | regs->tnpc = regs->tpc + 4; |
275 | regs->u_regs [UREG_G2] = g2; | ||
276 | 275 | ||
277 | regs->tstate &= ~TSTATE_ASI; | 276 | regs->tstate &= ~TSTATE_ASI; |
278 | regs->tstate |= (ASI_AIUS << 24UL); | 277 | regs->tstate |= (ASI_AIUS << 24UL); |
@@ -294,8 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
294 | 293 | ||
295 | kernel_mna_trap_fault(); | 294 | kernel_mna_trap_fault(); |
296 | } else { | 295 | } else { |
297 | unsigned long addr; | 296 | unsigned long addr, *reg_addr; |
298 | int orig_asi, asi; | 297 | int orig_asi, asi, err; |
299 | 298 | ||
300 | addr = compute_effective_address(regs, insn, | 299 | addr = compute_effective_address(regs, insn, |
301 | ((insn >> 25) & 0x1f)); | 300 | ((insn >> 25) & 0x1f)); |
@@ -319,11 +318,12 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
319 | }; | 318 | }; |
320 | switch (dir) { | 319 | switch (dir) { |
321 | case load: | 320 | case load: |
322 | do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), | 321 | reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); |
323 | size, (unsigned long *) addr, | 322 | err = do_int_load(reg_addr, size, |
324 | decode_signedness(insn), asi); | 323 | (unsigned long *) addr, |
325 | if (unlikely(asi != orig_asi)) { | 324 | decode_signedness(insn), asi); |
326 | unsigned long val_in = *(unsigned long *) addr; | 325 | if (likely(!err) && unlikely(asi != orig_asi)) { |
326 | unsigned long val_in = *reg_addr; | ||
327 | switch (size) { | 327 | switch (size) { |
328 | case 2: | 328 | case 2: |
329 | val_in = swab16(val_in); | 329 | val_in = swab16(val_in); |
@@ -339,21 +339,24 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
339 | BUG(); | 339 | BUG(); |
340 | break; | 340 | break; |
341 | }; | 341 | }; |
342 | *(unsigned long *) addr = val_in; | 342 | *reg_addr = val_in; |
343 | } | 343 | } |
344 | break; | 344 | break; |
345 | 345 | ||
346 | case store: | 346 | case store: |
347 | do_int_store(((insn>>25)&0x1f), size, | 347 | err = do_int_store(((insn>>25)&0x1f), size, |
348 | (unsigned long *) addr, regs, | 348 | (unsigned long *) addr, regs, |
349 | asi, orig_asi); | 349 | asi, orig_asi); |
350 | break; | 350 | break; |
351 | 351 | ||
352 | default: | 352 | default: |
353 | panic("Impossible kernel unaligned trap."); | 353 | panic("Impossible kernel unaligned trap."); |
354 | /* Not reached... */ | 354 | /* Not reached... */ |
355 | } | 355 | } |
356 | advance(regs); | 356 | if (unlikely(err)) |
357 | kernel_mna_trap_fault(); | ||
358 | else | ||
359 | advance(regs); | ||
357 | } | 360 | } |
358 | } | 361 | } |
359 | 362 | ||
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 9080e7cd4bb0..0340041f6143 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c | |||
@@ -208,7 +208,10 @@ static int __init us3_freq_init(void) | |||
208 | impl = ((ver >> 32) & 0xffff); | 208 | impl = ((ver >> 32) & 0xffff); |
209 | 209 | ||
210 | if (manuf == CHEETAH_MANUF && | 210 | if (manuf == CHEETAH_MANUF && |
211 | (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { | 211 | (impl == CHEETAH_IMPL || |
212 | impl == CHEETAH_PLUS_IMPL || | ||
213 | impl == JAGUAR_IMPL || | ||
214 | impl == PANTHER_IMPL)) { | ||
212 | struct cpufreq_driver *driver; | 215 | struct cpufreq_driver *driver; |
213 | 216 | ||
214 | ret = -ENOMEM; | 217 | ret = -ENOMEM; |
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index f47d0be39378..2af0cf0a8640 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S | |||
@@ -9,8 +9,7 @@ ENTRY(_start) | |||
9 | jiffies = jiffies_64; | 9 | jiffies = jiffies_64; |
10 | SECTIONS | 10 | SECTIONS |
11 | { | 11 | { |
12 | swapper_pmd_dir = 0x0000000000402000; | 12 | swapper_low_pmd_dir = 0x0000000000402000; |
13 | empty_pg_dir = 0x0000000000403000; | ||
14 | . = 0x4000; | 13 | . = 0x4000; |
15 | .text 0x0000000000404000 : | 14 | .text 0x0000000000404000 : |
16 | { | 15 | { |