aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBernd Schmidt <bernds_cb1@t-online.de>2008-05-06 23:41:26 -0400
committerBryan Wu <cooloney@kernel.org>2008-05-06 23:41:26 -0400
commit0893f1250f87e0a832f47bb60fb69ed0d52be7a3 (patch)
tree96dbea09cc618f4e6c4db2a671de215ba6b9784b
parent8513c42edb3f1c91a8418fae11846c87cf7b8581 (diff)
[Blackfin] arch: fix gdb testing regression
When transferring to IRQ5 from an exception, save SYSCFG in memory across the transfer and clear the trace bit. When we get a single step exception, check whether we can safely clear the trace bit in SYSCFG. We can (and should) clear it after the first instruction of the interrupt handler; the first insn saves SYSCFG to the stack in all handlers. Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de> Signed-off-by: Bryan Wu <cooloney@kernel.org>
-rw-r--r--arch/blackfin/mach-common/entry.S108
-rw-r--r--include/asm-blackfin/entry.h5
-rw-r--r--include/asm-blackfin/mach-common/context.S5
3 files changed, 87 insertions, 31 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 7365a17a6a81..038f70e0be65 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -151,26 +151,62 @@ ENTRY(_ex_soft_bp)
151ENDPROC(_ex_soft_bp) 151ENDPROC(_ex_soft_bp)
152 152
153ENTRY(_ex_single_step) 153ENTRY(_ex_single_step)
154 /* If we just returned from an interrupt, the single step event is
155 for the RTI instruction. */
154 r7 = retx; 156 r7 = retx;
155 r6 = reti; 157 r6 = reti;
156 cc = r7 == r6; 158 cc = r7 == r6;
157 if cc jump _bfin_return_from_exception 159 if cc jump _bfin_return_from_exception;
158 r7 = syscfg;
159 bitclr (r7, 0);
160 syscfg = R7;
161 160
161 /* If we were in user mode, do the single step normally. */
162 p5.l = lo(IPEND); 162 p5.l = lo(IPEND);
163 p5.h = hi(IPEND); 163 p5.h = hi(IPEND);
164 r6 = [p5]; 164 r6 = [p5];
165 cc = bittst(r6, 5); 165 r7 = 0xffe0 (z);
166 if !cc jump _ex_trap_c; 166 r7 = r7 & r6;
167 p4.l = lo(EVT5); 167 cc = r7 == 0;
168 p4.h = hi(EVT5); 168 if !cc jump 1f;
169 r6.h = _exception_to_level5; 169
170 r6.l = _exception_to_level5; 170 /* Single stepping only a single instruction, so clear the trace
171 r7 = [p4]; 171 * bit here. */
172 cc = r6 == r7; 172 r7 = syscfg;
173 if !cc jump _ex_trap_c; 173 bitclr (r7, 0);
174 syscfg = R7;
175 jump _ex_trap_c;
176
1771:
178 /*
179 * We were in an interrupt handler. By convention, all of them save
180 * SYSCFG with their first instruction, so by checking whether our
181 * RETX points at the entry point, we can determine whether to allow
182 * a single step, or whether to clear SYSCFG.
183 *
184 * First, find out the interrupt level and the event vector for it.
185 */
186 p5.l = lo(EVT0);
187 p5.h = hi(EVT0);
188 p5 += -4;
1892:
190 r7 = rot r7 by -1;
191 p5 += 4;
192 if !cc jump 2b;
193
194 /* What we actually do is test for the _second_ instruction in the
195 * IRQ handler. That way, if there are insns following the restore
196 * of SYSCFG after leaving the handler, we will not turn off SYSCFG
197 * for them. */
198
199 r7 = [p5];
200 r7 += 2;
201 r6 = RETX;
202 cc = R7 == R6;
203 if !cc jump _bfin_return_from_exception;
204
205 r7 = syscfg;
206 bitclr (r7, 0);
207 syscfg = R7;
208
209 /* Fall through to _bfin_return_from_exception. */
174ENDPROC(_ex_single_step) 210ENDPROC(_ex_single_step)
175 211
176ENTRY(_bfin_return_from_exception) 212ENTRY(_bfin_return_from_exception)
@@ -234,20 +270,26 @@ ENTRY(_ex_trap_c)
234 p5.l = _saved_icplb_fault_addr; 270 p5.l = _saved_icplb_fault_addr;
235 [p5] = r7; 271 [p5] = r7;
236 272
237 p4.l = __retx; 273 p4.l = _excpt_saved_stuff;
238 p4.h = __retx; 274 p4.h = _excpt_saved_stuff;
275
239 r6 = retx; 276 r6 = retx;
240 [p4] = r6; 277 [p4] = r6;
241 p4.l = lo(SAFE_USER_INSTRUCTION); 278
242 p4.h = hi(SAFE_USER_INSTRUCTION); 279 r6 = SYSCFG;
243 retx = p4; 280 [p4 + 4] = r6;
281 BITCLR(r6, 0);
282 SYSCFG = r6;
244 283
245 /* Disable all interrupts, but make sure level 5 is enabled so 284 /* Disable all interrupts, but make sure level 5 is enabled so
246 * we can switch to that level. Save the old mask. */ 285 * we can switch to that level. Save the old mask. */
247 cli r6; 286 cli r6;
248 p4.l = _excpt_saved_imask; 287 [p4 + 8] = r6;
249 p4.h = _excpt_saved_imask; 288
250 [p4] = r6; 289 p4.l = lo(SAFE_USER_INSTRUCTION);
290 p4.h = hi(SAFE_USER_INSTRUCTION);
291 retx = p4;
292
251 r6 = 0x3f; 293 r6 = 0x3f;
252 sti r6; 294 sti r6;
253 295
@@ -312,16 +354,17 @@ ENDPROC(_double_fault)
312ENTRY(_exception_to_level5) 354ENTRY(_exception_to_level5)
313 SAVE_ALL_SYS 355 SAVE_ALL_SYS
314 356
315 p4.l = __retx; 357 p4.l = _excpt_saved_stuff;
316 p4.h = __retx; 358 p4.h = _excpt_saved_stuff;
317 r6 = [p4]; 359 r6 = [p4];
318 [sp + PT_PC] = r6; 360 [sp + PT_PC] = r6;
319 361
362 r6 = [p4 + 4];
363 [sp + PT_SYSCFG] = r6;
364
320 /* Restore interrupt mask. We haven't pushed RETI, so this 365 /* Restore interrupt mask. We haven't pushed RETI, so this
321 * doesn't enable interrupts until we return from this handler. */ 366 * doesn't enable interrupts until we return from this handler. */
322 p4.l = _excpt_saved_imask; 367 r6 = [p4 + 8];
323 p4.h = _excpt_saved_imask;
324 r6 = [p4];
325 sti r6; 368 sti r6;
326 369
327 /* Restore the hardware error vector. */ 370 /* Restore the hardware error vector. */
@@ -1349,7 +1392,14 @@ ENTRY(_sys_call_table)
1349 .rept NR_syscalls-(.-_sys_call_table)/4 1392 .rept NR_syscalls-(.-_sys_call_table)/4
1350 .long _sys_ni_syscall 1393 .long _sys_ni_syscall
1351 .endr 1394 .endr
1352_excpt_saved_imask: 1395
1396 /*
1397 * Used to save the real RETX, IMASK and SYSCFG when temporarily
1398 * storing safe values across the transition from exception to IRQ5.
1399 */
1400_excpt_saved_stuff:
1401 .long 0;
1402 .long 0;
1353 .long 0; 1403 .long 0;
1354 1404
1355_exception_stack: 1405_exception_stack:
@@ -1363,7 +1413,3 @@ _exception_stack_top:
1363_last_cplb_fault_retx: 1413_last_cplb_fault_retx:
1364 .long 0; 1414 .long 0;
1365#endif 1415#endif
1366 /* Used to save the real RETX when temporarily storing a safe
1367 * return address. */
1368__retx:
1369 .long 0;
diff --git a/include/asm-blackfin/entry.h b/include/asm-blackfin/entry.h
index 562c6d3a3232..c4f721e0d00d 100644
--- a/include/asm-blackfin/entry.h
+++ b/include/asm-blackfin/entry.h
@@ -17,6 +17,11 @@
17#define PF_DTRACE_OFF 1 17#define PF_DTRACE_OFF 1
18#define PF_DTRACE_BIT 5 18#define PF_DTRACE_BIT 5
19 19
20/*
21 * NOTE! The single-stepping code assumes that all interrupt handlers
22 * start by saving SYSCFG on the stack with their first instruction.
23 */
24
20/* This one is used for exceptions, emulation, and NMI. It doesn't push 25/* This one is used for exceptions, emulation, and NMI. It doesn't push
21 RETI and doesn't do cli. */ 26 RETI and doesn't do cli. */
22#define SAVE_ALL_SYS save_context_no_interrupts 27#define SAVE_ALL_SYS save_context_no_interrupts
diff --git a/include/asm-blackfin/mach-common/context.S b/include/asm-blackfin/mach-common/context.S
index fd0ebe1862b8..c0e630edfb9a 100644
--- a/include/asm-blackfin/mach-common/context.S
+++ b/include/asm-blackfin/mach-common/context.S
@@ -28,6 +28,11 @@
28 */ 28 */
29 29
30/* 30/*
31 * NOTE! The single-stepping code assumes that all interrupt handlers
32 * start by saving SYSCFG on the stack with their first instruction.
33 */
34
35/*
31 * Code to save processor context. 36 * Code to save processor context.
32 * We even save the register which are preserved by a function call 37 * We even save the register which are preserved by a function call
33 * - r4, r5, r6, r7, p3, p4, p5 38 * - r4, r5, r6, r7, p3, p4, p5