diff options
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
-rw-r--r-- | arch/blackfin/mach-common/entry.S | 113 |
1 files changed, 82 insertions, 31 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index f2fb87e9a46e..038f70e0be65 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -151,26 +151,62 @@ ENTRY(_ex_soft_bp) | |||
151 | ENDPROC(_ex_soft_bp) | 151 | ENDPROC(_ex_soft_bp) |
152 | 152 | ||
153 | ENTRY(_ex_single_step) | 153 | ENTRY(_ex_single_step) |
154 | /* If we just returned from an interrupt, the single step event is | ||
155 | for the RTI instruction. */ | ||
154 | r7 = retx; | 156 | r7 = retx; |
155 | r6 = reti; | 157 | r6 = reti; |
156 | cc = r7 == r6; | 158 | cc = r7 == r6; |
157 | if cc jump _bfin_return_from_exception | 159 | if cc jump _bfin_return_from_exception; |
158 | r7 = syscfg; | ||
159 | bitclr (r7, 0); | ||
160 | syscfg = R7; | ||
161 | 160 | ||
161 | /* If we were in user mode, do the single step normally. */ | ||
162 | p5.l = lo(IPEND); | 162 | p5.l = lo(IPEND); |
163 | p5.h = hi(IPEND); | 163 | p5.h = hi(IPEND); |
164 | r6 = [p5]; | 164 | r6 = [p5]; |
165 | cc = bittst(r6, 5); | 165 | r7 = 0xffe0 (z); |
166 | if !cc jump _ex_trap_c; | 166 | r7 = r7 & r6; |
167 | p4.l = lo(EVT5); | 167 | cc = r7 == 0; |
168 | p4.h = hi(EVT5); | 168 | if !cc jump 1f; |
169 | r6.h = _exception_to_level5; | 169 | |
170 | r6.l = _exception_to_level5; | 170 | /* Single stepping only a single instruction, so clear the trace |
171 | r7 = [p4]; | 171 | * bit here. */ |
172 | cc = r6 == r7; | 172 | r7 = syscfg; |
173 | if !cc jump _ex_trap_c; | 173 | bitclr (r7, 0); |
174 | syscfg = R7; | ||
175 | jump _ex_trap_c; | ||
176 | |||
177 | 1: | ||
178 | /* | ||
179 | * We were in an interrupt handler. By convention, all of them save | ||
180 | * SYSCFG with their first instruction, so by checking whether our | ||
181 | * RETX points at the entry point, we can determine whether to allow | ||
182 | * a single step, or whether to clear SYSCFG. | ||
183 | * | ||
184 | * First, find out the interrupt level and the event vector for it. | ||
185 | */ | ||
186 | p5.l = lo(EVT0); | ||
187 | p5.h = hi(EVT0); | ||
188 | p5 += -4; | ||
189 | 2: | ||
190 | r7 = rot r7 by -1; | ||
191 | p5 += 4; | ||
192 | if !cc jump 2b; | ||
193 | |||
194 | /* What we actually do is test for the _second_ instruction in the | ||
195 | * IRQ handler. That way, if there are insns following the restore | ||
196 | * of SYSCFG after leaving the handler, we will not turn off SYSCFG | ||
197 | * for them. */ | ||
198 | |||
199 | r7 = [p5]; | ||
200 | r7 += 2; | ||
201 | r6 = RETX; | ||
202 | cc = R7 == R6; | ||
203 | if !cc jump _bfin_return_from_exception; | ||
204 | |||
205 | r7 = syscfg; | ||
206 | bitclr (r7, 0); | ||
207 | syscfg = R7; | ||
208 | |||
209 | /* Fall through to _bfin_return_from_exception. */ | ||
174 | ENDPROC(_ex_single_step) | 210 | ENDPROC(_ex_single_step) |
175 | 211 | ||
176 | ENTRY(_bfin_return_from_exception) | 212 | ENTRY(_bfin_return_from_exception) |
@@ -234,20 +270,26 @@ ENTRY(_ex_trap_c) | |||
234 | p5.l = _saved_icplb_fault_addr; | 270 | p5.l = _saved_icplb_fault_addr; |
235 | [p5] = r7; | 271 | [p5] = r7; |
236 | 272 | ||
237 | p4.l = __retx; | 273 | p4.l = _excpt_saved_stuff; |
238 | p4.h = __retx; | 274 | p4.h = _excpt_saved_stuff; |
275 | |||
239 | r6 = retx; | 276 | r6 = retx; |
240 | [p4] = r6; | 277 | [p4] = r6; |
241 | p4.l = lo(SAFE_USER_INSTRUCTION); | 278 | |
242 | p4.h = hi(SAFE_USER_INSTRUCTION); | 279 | r6 = SYSCFG; |
243 | retx = p4; | 280 | [p4 + 4] = r6; |
281 | BITCLR(r6, 0); | ||
282 | SYSCFG = r6; | ||
244 | 283 | ||
245 | /* Disable all interrupts, but make sure level 5 is enabled so | 284 | /* Disable all interrupts, but make sure level 5 is enabled so |
246 | * we can switch to that level. Save the old mask. */ | 285 | * we can switch to that level. Save the old mask. */ |
247 | cli r6; | 286 | cli r6; |
248 | p4.l = _excpt_saved_imask; | 287 | [p4 + 8] = r6; |
249 | p4.h = _excpt_saved_imask; | 288 | |
250 | [p4] = r6; | 289 | p4.l = lo(SAFE_USER_INSTRUCTION); |
290 | p4.h = hi(SAFE_USER_INSTRUCTION); | ||
291 | retx = p4; | ||
292 | |||
251 | r6 = 0x3f; | 293 | r6 = 0x3f; |
252 | sti r6; | 294 | sti r6; |
253 | 295 | ||
@@ -295,6 +337,11 @@ ENTRY(_double_fault) | |||
295 | */ | 337 | */ |
296 | SAVE_ALL_SYS | 338 | SAVE_ALL_SYS |
297 | 339 | ||
340 | /* The dumping functions expect the return address in the RETI | ||
341 | * slot. */ | ||
342 | r6 = retx; | ||
343 | [sp + PT_PC] = r6; | ||
344 | |||
298 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ | 345 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
299 | SP += -12; | 346 | SP += -12; |
300 | call _double_fault_c; | 347 | call _double_fault_c; |
@@ -307,16 +354,17 @@ ENDPROC(_double_fault) | |||
307 | ENTRY(_exception_to_level5) | 354 | ENTRY(_exception_to_level5) |
308 | SAVE_ALL_SYS | 355 | SAVE_ALL_SYS |
309 | 356 | ||
310 | p4.l = __retx; | 357 | p4.l = _excpt_saved_stuff; |
311 | p4.h = __retx; | 358 | p4.h = _excpt_saved_stuff; |
312 | r6 = [p4]; | 359 | r6 = [p4]; |
313 | [sp + PT_PC] = r6; | 360 | [sp + PT_PC] = r6; |
314 | 361 | ||
362 | r6 = [p4 + 4]; | ||
363 | [sp + PT_SYSCFG] = r6; | ||
364 | |||
315 | /* Restore interrupt mask. We haven't pushed RETI, so this | 365 | /* Restore interrupt mask. We haven't pushed RETI, so this |
316 | * doesn't enable interrupts until we return from this handler. */ | 366 | * doesn't enable interrupts until we return from this handler. */ |
317 | p4.l = _excpt_saved_imask; | 367 | r6 = [p4 + 8]; |
318 | p4.h = _excpt_saved_imask; | ||
319 | r6 = [p4]; | ||
320 | sti r6; | 368 | sti r6; |
321 | 369 | ||
322 | /* Restore the hardware error vector. */ | 370 | /* Restore the hardware error vector. */ |
@@ -1344,7 +1392,14 @@ ENTRY(_sys_call_table) | |||
1344 | .rept NR_syscalls-(.-_sys_call_table)/4 | 1392 | .rept NR_syscalls-(.-_sys_call_table)/4 |
1345 | .long _sys_ni_syscall | 1393 | .long _sys_ni_syscall |
1346 | .endr | 1394 | .endr |
1347 | _excpt_saved_imask: | 1395 | |
1396 | /* | ||
1397 | * Used to save the real RETX, IMASK and SYSCFG when temporarily | ||
1398 | * storing safe values across the transition from exception to IRQ5. | ||
1399 | */ | ||
1400 | _excpt_saved_stuff: | ||
1401 | .long 0; | ||
1402 | .long 0; | ||
1348 | .long 0; | 1403 | .long 0; |
1349 | 1404 | ||
1350 | _exception_stack: | 1405 | _exception_stack: |
@@ -1358,7 +1413,3 @@ _exception_stack_top: | |||
1358 | _last_cplb_fault_retx: | 1413 | _last_cplb_fault_retx: |
1359 | .long 0; | 1414 | .long 0; |
1360 | #endif | 1415 | #endif |
1361 | /* Used to save the real RETX when temporarily storing a safe | ||
1362 | * return address. */ | ||
1363 | __retx: | ||
1364 | .long 0; | ||