aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/entry.S')
-rw-r--r--arch/sparc64/kernel/entry.S262
1 files changed, 47 insertions, 215 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index b48349527853..11a848402fb1 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,163 +30,10 @@
30 .text 30 .text
31 .align 32 31 .align 32
32 32
33 .globl sparc64_vpte_patchme1
34 .globl sparc64_vpte_patchme2
35/*
36 * On a second level vpte miss, check whether the original fault is to the OBP
37 * range (note that this is only possible for instruction miss, data misses to
38 * obp range do not use vpte). If so, go back directly to the faulting address.
39 * This is because we want to read the tpc, otherwise we have no way of knowing
40 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
41 * also ensures no vpte range addresses are dropped into tlb while obp is
42 * executing (see inherit_locked_prom_mappings() rant).
43 */
44sparc64_vpte_nucleus:
45 /* Note that kvmap below has verified that the address is
46 * in the range MODULES_VADDR --> VMALLOC_END already. So
47 * here we need only check if it is an OBP address or not.
48 */
49 sethi %hi(LOW_OBP_ADDRESS), %g5
50 cmp %g4, %g5
51 blu,pn %xcc, sparc64_vpte_patchme1
52 mov 0x1, %g5
53 sllx %g5, 32, %g5
54 cmp %g4, %g5
55 blu,pn %xcc, obp_iaddr_patch
56 nop
57
58 /* These two instructions are patched by paginig_init(). */
59sparc64_vpte_patchme1:
60 sethi %hi(0), %g5
61sparc64_vpte_patchme2:
62 or %g5, %lo(0), %g5
63
64 /* With kernel PGD in %g5, branch back into dtlb_backend. */
65 ba,pt %xcc, sparc64_kpte_continue
66 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
67
68vpte_noent:
69 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
70 * skip over the trap instruction so that the top level
71 * TLB miss handler will thing this %g5 value is just an
72 * invalid PTE, thus branching to full fault processing.
73 */
74 mov TLB_SFSR, %g1
75 stxa %g4, [%g1 + %g1] ASI_DMMU
76 done
77
78 .globl obp_iaddr_patch
79obp_iaddr_patch:
80 /* These two instructions patched by inherit_prom_mappings(). */
81 sethi %hi(0), %g5
82 or %g5, %lo(0), %g5
83
84 /* Behave as if we are at TL0. */
85 wrpr %g0, 1, %tl
86 rdpr %tpc, %g4 /* Find original faulting iaddr */
87 srlx %g4, 13, %g4 /* Throw out context bits */
88 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
89
90 /* Restore previous TAG_ACCESS. */
91 mov TLB_SFSR, %g1
92 stxa %g4, [%g1 + %g1] ASI_IMMU
93
94 /* Get PMD offset. */
95 srlx %g4, 23, %g6
96 and %g6, 0x7ff, %g6
97 sllx %g6, 2, %g6
98
99 /* Load PMD, is it valid? */
100 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
101 brz,pn %g5, longpath
102 sllx %g5, 11, %g5
103
104 /* Get PTE offset. */
105 srlx %g4, 13, %g6
106 and %g6, 0x3ff, %g6
107 sllx %g6, 3, %g6
108
109 /* Load PTE. */
110 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
111 brgez,pn %g5, longpath
112 nop
113
114 /* TLB load and return from trap. */
115 stxa %g5, [%g0] ASI_ITLB_DATA_IN
116 retry
117
118 .globl obp_daddr_patch
119obp_daddr_patch:
120 /* These two instructions patched by inherit_prom_mappings(). */
121 sethi %hi(0), %g5
122 or %g5, %lo(0), %g5
123
124 /* Get PMD offset. */
125 srlx %g4, 23, %g6
126 and %g6, 0x7ff, %g6
127 sllx %g6, 2, %g6
128
129 /* Load PMD, is it valid? */
130 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
131 brz,pn %g5, longpath
132 sllx %g5, 11, %g5
133
134 /* Get PTE offset. */
135 srlx %g4, 13, %g6
136 and %g6, 0x3ff, %g6
137 sllx %g6, 3, %g6
138
139 /* Load PTE. */
140 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
141 brgez,pn %g5, longpath
142 nop
143
144 /* TLB load and return from trap. */
145 stxa %g5, [%g0] ASI_DTLB_DATA_IN
146 retry
147
148/*
149 * On a first level data miss, check whether this is to the OBP range (note
150 * that such accesses can be made by prom, as well as by kernel using
151 * prom_getproperty on "address"), and if so, do not use vpte access ...
152 * rather, use information saved during inherit_prom_mappings() using 8k
153 * pagesize.
154 */
155 .align 32
156kvmap:
157 sethi %hi(MODULES_VADDR), %g5
158 cmp %g4, %g5
159 blu,pn %xcc, longpath
160 mov (VMALLOC_END >> 24), %g5
161 sllx %g5, 24, %g5
162 cmp %g4, %g5
163 bgeu,pn %xcc, longpath
164 nop
165
166kvmap_check_obp:
167 sethi %hi(LOW_OBP_ADDRESS), %g5
168 cmp %g4, %g5
169 blu,pn %xcc, kvmap_vmalloc_addr
170 mov 0x1, %g5
171 sllx %g5, 32, %g5
172 cmp %g4, %g5
173 blu,pn %xcc, obp_daddr_patch
174 nop
175
176kvmap_vmalloc_addr:
177 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
178 ldxa [%g3 + %g6] ASI_N, %g5
179 brgez,pn %g5, longpath
180 nop
181
182 /* PTE is valid, load into TLB and return from trap. */
183 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
184 retry
185
186 /* This is trivial with the new code... */ 33 /* This is trivial with the new code... */
187 .globl do_fpdis 34 .globl do_fpdis
188do_fpdis: 35do_fpdis:
189 sethi %hi(TSTATE_PEF), %g4 ! IEU0 36 sethi %hi(TSTATE_PEF), %g4
190 rdpr %tstate, %g5 37 rdpr %tstate, %g5
191 andcc %g5, %g4, %g0 38 andcc %g5, %g4, %g0
192 be,pt %xcc, 1f 39 be,pt %xcc, 1f
@@ -203,18 +50,18 @@ do_fpdis:
203 add %g0, %g0, %g0 50 add %g0, %g0, %g0
204 ba,a,pt %xcc, rtrap_clr_l6 51 ba,a,pt %xcc, rtrap_clr_l6
205 52
2061: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group 531: ldub [%g6 + TI_FPSAVED], %g5
207 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles 54 wr %g0, FPRS_FEF, %fprs
208 andcc %g5, FPRS_FEF, %g0 ! IEU1 Group 55 andcc %g5, FPRS_FEF, %g0
209 be,a,pt %icc, 1f ! CTI 56 be,a,pt %icc, 1f
210 clr %g7 ! IEU0 57 clr %g7
211 ldx [%g6 + TI_GSR], %g7 ! Load Group 58 ldx [%g6 + TI_GSR], %g7
2121: andcc %g5, FPRS_DL, %g0 ! IEU1 591: andcc %g5, FPRS_DL, %g0
213 bne,pn %icc, 2f ! CTI 60 bne,pn %icc, 2f
214 fzero %f0 ! FPA 61 fzero %f0
215 andcc %g5, FPRS_DU, %g0 ! IEU1 Group 62 andcc %g5, FPRS_DU, %g0
216 bne,pn %icc, 1f ! CTI 63 bne,pn %icc, 1f
217 fzero %f2 ! FPA 64 fzero %f2
218 faddd %f0, %f2, %f4 65 faddd %f0, %f2, %f4
219 fmuld %f0, %f2, %f6 66 fmuld %f0, %f2, %f6
220 faddd %f0, %f2, %f8 67 faddd %f0, %f2, %f8
@@ -250,15 +97,17 @@ do_fpdis:
250 faddd %f0, %f2, %f4 97 faddd %f0, %f2, %f4
251 fmuld %f0, %f2, %f6 98 fmuld %f0, %f2, %f6
252 ldxa [%g3] ASI_DMMU, %g5 99 ldxa [%g3] ASI_DMMU, %g5
253cplus_fptrap_insn_1: 100 sethi %hi(sparc64_kern_sec_context), %g2
254 sethi %hi(0), %g2 101 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
255 stxa %g2, [%g3] ASI_DMMU 102 stxa %g2, [%g3] ASI_DMMU
256 membar #Sync 103 membar #Sync
257 add %g6, TI_FPREGS + 0xc0, %g2 104 add %g6, TI_FPREGS + 0xc0, %g2
258 faddd %f0, %f2, %f8 105 faddd %f0, %f2, %f8
259 fmuld %f0, %f2, %f10 106 fmuld %f0, %f2, %f10
260 ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-( 107 membar #Sync
108 ldda [%g1] ASI_BLK_S, %f32
261 ldda [%g2] ASI_BLK_S, %f48 109 ldda [%g2] ASI_BLK_S, %f48
110 membar #Sync
262 faddd %f0, %f2, %f12 111 faddd %f0, %f2, %f12
263 fmuld %f0, %f2, %f14 112 fmuld %f0, %f2, %f14
264 faddd %f0, %f2, %f16 113 faddd %f0, %f2, %f16
@@ -269,7 +118,6 @@ cplus_fptrap_insn_1:
269 fmuld %f0, %f2, %f26 118 fmuld %f0, %f2, %f26
270 faddd %f0, %f2, %f28 119 faddd %f0, %f2, %f28
271 fmuld %f0, %f2, %f30 120 fmuld %f0, %f2, %f30
272 membar #Sync
273 b,pt %xcc, fpdis_exit 121 b,pt %xcc, fpdis_exit
274 nop 122 nop
2752: andcc %g5, FPRS_DU, %g0 1232: andcc %g5, FPRS_DU, %g0
@@ -279,15 +127,17 @@ cplus_fptrap_insn_1:
279 fzero %f34 127 fzero %f34
280 ldxa [%g3] ASI_DMMU, %g5 128 ldxa [%g3] ASI_DMMU, %g5
281 add %g6, TI_FPREGS, %g1 129 add %g6, TI_FPREGS, %g1
282cplus_fptrap_insn_2: 130 sethi %hi(sparc64_kern_sec_context), %g2
283 sethi %hi(0), %g2 131 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
284 stxa %g2, [%g3] ASI_DMMU 132 stxa %g2, [%g3] ASI_DMMU
285 membar #Sync 133 membar #Sync
286 add %g6, TI_FPREGS + 0x40, %g2 134 add %g6, TI_FPREGS + 0x40, %g2
287 faddd %f32, %f34, %f36 135 faddd %f32, %f34, %f36
288 fmuld %f32, %f34, %f38 136 fmuld %f32, %f34, %f38
289 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( 137 membar #Sync
138 ldda [%g1] ASI_BLK_S, %f0
290 ldda [%g2] ASI_BLK_S, %f16 139 ldda [%g2] ASI_BLK_S, %f16
140 membar #Sync
291 faddd %f32, %f34, %f40 141 faddd %f32, %f34, %f40
292 fmuld %f32, %f34, %f42 142 fmuld %f32, %f34, %f42
293 faddd %f32, %f34, %f44 143 faddd %f32, %f34, %f44
@@ -300,18 +150,18 @@ cplus_fptrap_insn_2:
300 fmuld %f32, %f34, %f58 150 fmuld %f32, %f34, %f58
301 faddd %f32, %f34, %f60 151 faddd %f32, %f34, %f60
302 fmuld %f32, %f34, %f62 152 fmuld %f32, %f34, %f62
303 membar #Sync
304 ba,pt %xcc, fpdis_exit 153 ba,pt %xcc, fpdis_exit
305 nop 154 nop
3063: mov SECONDARY_CONTEXT, %g3 1553: mov SECONDARY_CONTEXT, %g3
307 add %g6, TI_FPREGS, %g1 156 add %g6, TI_FPREGS, %g1
308 ldxa [%g3] ASI_DMMU, %g5 157 ldxa [%g3] ASI_DMMU, %g5
309cplus_fptrap_insn_3: 158 sethi %hi(sparc64_kern_sec_context), %g2
310 sethi %hi(0), %g2 159 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
311 stxa %g2, [%g3] ASI_DMMU 160 stxa %g2, [%g3] ASI_DMMU
312 membar #Sync 161 membar #Sync
313 mov 0x40, %g2 162 mov 0x40, %g2
314 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( 163 membar #Sync
164 ldda [%g1] ASI_BLK_S, %f0
315 ldda [%g1 + %g2] ASI_BLK_S, %f16 165 ldda [%g1 + %g2] ASI_BLK_S, %f16
316 add %g1, 0x80, %g1 166 add %g1, 0x80, %g1
317 ldda [%g1] ASI_BLK_S, %f32 167 ldda [%g1] ASI_BLK_S, %f32
@@ -472,8 +322,8 @@ do_fptrap_after_fsr:
472 stx %g3, [%g6 + TI_GSR] 322 stx %g3, [%g6 + TI_GSR]
473 mov SECONDARY_CONTEXT, %g3 323 mov SECONDARY_CONTEXT, %g3
474 ldxa [%g3] ASI_DMMU, %g5 324 ldxa [%g3] ASI_DMMU, %g5
475cplus_fptrap_insn_4: 325 sethi %hi(sparc64_kern_sec_context), %g2
476 sethi %hi(0), %g2 326 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
477 stxa %g2, [%g3] ASI_DMMU 327 stxa %g2, [%g3] ASI_DMMU
478 membar #Sync 328 membar #Sync
479 add %g6, TI_FPREGS, %g2 329 add %g6, TI_FPREGS, %g2
@@ -494,45 +344,17 @@ cplus_fptrap_insn_4:
494 ba,pt %xcc, etrap 344 ba,pt %xcc, etrap
495 wr %g0, 0, %fprs 345 wr %g0, 0, %fprs
496 346
497cplus_fptrap_1:
498 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
499
500 .globl cheetah_plus_patch_fpdis
501cheetah_plus_patch_fpdis:
502 /* We configure the dTLB512_0 for 4MB pages and the
503 * dTLB512_1 for 8K pages when in context zero.
504 */
505 sethi %hi(cplus_fptrap_1), %o0
506 lduw [%o0 + %lo(cplus_fptrap_1)], %o1
507
508 set cplus_fptrap_insn_1, %o2
509 stw %o1, [%o2]
510 flush %o2
511 set cplus_fptrap_insn_2, %o2
512 stw %o1, [%o2]
513 flush %o2
514 set cplus_fptrap_insn_3, %o2
515 stw %o1, [%o2]
516 flush %o2
517 set cplus_fptrap_insn_4, %o2
518 stw %o1, [%o2]
519 flush %o2
520
521 retl
522 nop
523
524 /* The registers for cross calls will be: 347 /* The registers for cross calls will be:
525 * 348 *
526 * DATA 0: [low 32-bits] Address of function to call, jmp to this 349 * DATA 0: [low 32-bits] Address of function to call, jmp to this
527 * [high 32-bits] MMU Context Argument 0, place in %g5 350 * [high 32-bits] MMU Context Argument 0, place in %g5
528 * DATA 1: Address Argument 1, place in %g6 351 * DATA 1: Address Argument 1, place in %g1
529 * DATA 2: Address Argument 2, place in %g7 352 * DATA 2: Address Argument 2, place in %g7
530 * 353 *
531 * With this method we can do most of the cross-call tlb/cache 354 * With this method we can do most of the cross-call tlb/cache
532 * flushing very quickly. 355 * flushing very quickly.
533 * 356 *
534 * Current CPU's IRQ worklist table is locked into %g1, 357 * Current CPU's IRQ worklist table is locked into %g6, don't touch.
535 * don't touch.
536 */ 358 */
537 .text 359 .text
538 .align 32 360 .align 32
@@ -1006,13 +828,14 @@ cheetah_plus_dcpe_trap_vector:
1006 nop 828 nop
1007 829
1008do_cheetah_plus_data_parity: 830do_cheetah_plus_data_parity:
1009 ba,pt %xcc, etrap 831 rdpr %pil, %g2
832 wrpr %g0, 15, %pil
833 ba,pt %xcc, etrap_irq
1010 rd %pc, %g7 834 rd %pc, %g7
1011 mov 0x0, %o0 835 mov 0x0, %o0
1012 call cheetah_plus_parity_error 836 call cheetah_plus_parity_error
1013 add %sp, PTREGS_OFF, %o1 837 add %sp, PTREGS_OFF, %o1
1014 ba,pt %xcc, rtrap 838 ba,a,pt %xcc, rtrap_irq
1015 clr %l6
1016 839
1017cheetah_plus_dcpe_trap_vector_tl1: 840cheetah_plus_dcpe_trap_vector_tl1:
1018 membar #Sync 841 membar #Sync
@@ -1036,13 +859,14 @@ cheetah_plus_icpe_trap_vector:
1036 nop 859 nop
1037 860
1038do_cheetah_plus_insn_parity: 861do_cheetah_plus_insn_parity:
1039 ba,pt %xcc, etrap 862 rdpr %pil, %g2
863 wrpr %g0, 15, %pil
864 ba,pt %xcc, etrap_irq
1040 rd %pc, %g7 865 rd %pc, %g7
1041 mov 0x1, %o0 866 mov 0x1, %o0
1042 call cheetah_plus_parity_error 867 call cheetah_plus_parity_error
1043 add %sp, PTREGS_OFF, %o1 868 add %sp, PTREGS_OFF, %o1
1044 ba,pt %xcc, rtrap 869 ba,a,pt %xcc, rtrap_irq
1045 clr %l6
1046 870
1047cheetah_plus_icpe_trap_vector_tl1: 871cheetah_plus_icpe_trap_vector_tl1:
1048 membar #Sync 872 membar #Sync
@@ -1075,6 +899,10 @@ do_dcpe_tl1:
1075 nop 899 nop
1076 wrpr %g1, %tl ! Restore original trap level 900 wrpr %g1, %tl ! Restore original trap level
1077do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ 901do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
902 sethi %hi(dcache_parity_tl1_occurred), %g2
903 lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
904 add %g1, 1, %g1
905 stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
1078 /* Reset D-cache parity */ 906 /* Reset D-cache parity */
1079 sethi %hi(1 << 16), %g1 ! D-cache size 907 sethi %hi(1 << 16), %g1 ! D-cache size
1080 mov (1 << 5), %g2 ! D-cache line size 908 mov (1 << 5), %g2 ! D-cache line size
@@ -1121,6 +949,10 @@ do_icpe_tl1:
1121 nop 949 nop
1122 wrpr %g1, %tl ! Restore original trap level 950 wrpr %g1, %tl ! Restore original trap level
1123do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ 951do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
952 sethi %hi(icache_parity_tl1_occurred), %g2
953 lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
954 add %g1, 1, %g1
955 stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
1124 /* Flush I-cache */ 956 /* Flush I-cache */
1125 sethi %hi(1 << 15), %g1 ! I-cache size 957 sethi %hi(1 << 15), %g1 ! I-cache size
1126 mov (1 << 5), %g2 ! I-cache line size 958 mov (1 << 5), %g2 ! I-cache line size