aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaavard Skinnemoen <hskinnemoen@atmel.com>2007-11-28 09:04:01 -0500
committerHaavard Skinnemoen <hskinnemoen@atmel.com>2007-12-07 08:54:46 -0500
commit2507bc1338e43eadfef5b604d2c47e4f8180718f (patch)
tree4bc3c14114ba96efb8e9e4f2c0925fb92b669a19
parent8dfe8f29cd371affcc3c6b35658dc4bd95ee7b61 (diff)
[AVR32] Follow the rules when dealing with the OCD system
The current debug trap handling code does a number of things that are illegal according to the AVR32 Architecture manual. Most importantly, it may try to schedule from Debug Mode, thus clearing the D bit, which can lead to "undefined behaviour". It seems like this works in most cases, but several people have observed somewhat unstable behaviour when debugging programs, including soft lockups. So there's definitely something which is not right with the existing code. The new code will never schedule from Debug mode, it will always exit Debug mode with a "retd" instruction, and if something not running in Debug mode needs to do something debug-related (like doing a single step), it will enter debug mode through a "breakpoint" instruction. The monitor code will then return directly to user space, bypassing its own saved registers if necessary (since we don't actually care about the trapped context, only the one that came before.) This adds three instructions to the common exception handling code, including one branch. It does not touch super-hot paths like the TLB miss handler. Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
-rw-r--r--arch/avr32/kernel/asm-offsets.c2
-rw-r--r--arch/avr32/kernel/entry-avr32b.S275
-rw-r--r--arch/avr32/kernel/ptrace.c271
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S2
-rw-r--r--include/asm-avr32/processor.h3
-rw-r--r--include/asm-avr32/thread_info.h12
6 files changed, 334 insertions, 231 deletions
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c
index 97d865865667..078cd33f467b 100644
--- a/arch/avr32/kernel/asm-offsets.c
+++ b/arch/avr32/kernel/asm-offsets.c
@@ -21,5 +21,7 @@ void foo(void)
21 OFFSET(TI_flags, thread_info, flags); 21 OFFSET(TI_flags, thread_info, flags);
22 OFFSET(TI_cpu, thread_info, cpu); 22 OFFSET(TI_cpu, thread_info, cpu);
23 OFFSET(TI_preempt_count, thread_info, preempt_count); 23 OFFSET(TI_preempt_count, thread_info, preempt_count);
24 OFFSET(TI_rar_saved, thread_info, rar_saved);
25 OFFSET(TI_rsr_saved, thread_info, rsr_saved);
24 OFFSET(TI_restart_block, thread_info, restart_block); 26 OFFSET(TI_restart_block, thread_info, restart_block);
25} 27}
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index d7b93f12d8f7..df6c747658c1 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -264,16 +264,7 @@ syscall_exit_work:
264 264
2653: bld r1, TIF_BREAKPOINT 2653: bld r1, TIF_BREAKPOINT
266 brcc syscall_exit_cont 266 brcc syscall_exit_cont
267 mfsr r3, SYSREG_TLBEHI 267 rjmp enter_monitor_mode
268 lddsp r2, sp[REG_PC]
269 andl r3, 0xff, COH
270 lsl r3, 1
271 sbr r3, 30
272 sbr r3, 0
273 mtdr OCD_BWA2A, r2
274 mtdr OCD_BWC2A, r3
275 rjmp syscall_exit_cont
276
277 268
278 /* The slow path of the TLB miss handler */ 269 /* The slow path of the TLB miss handler */
279page_table_not_present: 270page_table_not_present:
@@ -288,11 +279,16 @@ page_not_present:
288 rjmp ret_from_exception 279 rjmp ret_from_exception
289 280
290 /* This function expects to find offending PC in SYSREG_RAR_EX */ 281 /* This function expects to find offending PC in SYSREG_RAR_EX */
282 .type save_full_context_ex, @function
283 .align 2
291save_full_context_ex: 284save_full_context_ex:
285 mfsr r11, SYSREG_RAR_EX
286 sub r9, pc, . - debug_trampoline
292 mfsr r8, SYSREG_RSR_EX 287 mfsr r8, SYSREG_RSR_EX
288 cp.w r9, r11
289 breq 3f
293 mov r12, r8 290 mov r12, r8
294 andh r8, (MODE_MASK >> 16), COH 291 andh r8, (MODE_MASK >> 16), COH
295 mfsr r11, SYSREG_RAR_EX
296 brne 2f 292 brne 2f
297 293
2981: pushm r11, r12 /* PC and SR */ 2941: pushm r11, r12 /* PC and SR */
@@ -303,6 +299,21 @@ save_full_context_ex:
303 stdsp sp[4], r10 /* replace saved SP */ 299 stdsp sp[4], r10 /* replace saved SP */
304 rjmp 1b 300 rjmp 1b
305 301
302 /*
303 * The debug handler set up a trampoline to make us
304 * automatically enter monitor mode upon return, but since
305 * we're saving the full context, we must assume that the
306 * exception handler might want to alter the return address
307 * and/or status register. So we need to restore the original
308 * context and enter monitor mode manually after the exception
309 * has been handled.
310 */
3113: get_thread_info r8
312 ld.w r11, r8[TI_rar_saved]
313 ld.w r12, r8[TI_rsr_saved]
314 rjmp 1b
315 .size save_full_context_ex, . - save_full_context_ex
316
306 /* Low-level exception handlers */ 317 /* Low-level exception handlers */
307handle_critical: 318handle_critical:
308 pushm r12 319 pushm r12
@@ -439,6 +450,7 @@ do_fpe_ll:
439ret_from_exception: 450ret_from_exception:
440 mask_interrupts 451 mask_interrupts
441 lddsp r4, sp[REG_SR] 452 lddsp r4, sp[REG_SR]
453
442 andh r4, (MODE_MASK >> 16), COH 454 andh r4, (MODE_MASK >> 16), COH
443 brne fault_resume_kernel 455 brne fault_resume_kernel
444 456
@@ -515,34 +527,76 @@ fault_exit_work:
515 527
5162: bld r1, TIF_BREAKPOINT 5282: bld r1, TIF_BREAKPOINT
517 brcc fault_resume_user 529 brcc fault_resume_user
518 mfsr r3, SYSREG_TLBEHI 530 rjmp enter_monitor_mode
519 lddsp r2, sp[REG_PC] 531
520 andl r3, 0xff, COH 532 .section .kprobes.text, "ax", @progbits
521 lsl r3, 1 533 .type handle_debug, @function
522 sbr r3, 30 534handle_debug:
523 sbr r3, 0 535 sub sp, 4 /* r12_orig */
524 mtdr OCD_BWA2A, r2 536 stmts --sp, r0-lr
525 mtdr OCD_BWC2A, r3 537 mfsr r8, SYSREG_RAR_DBG
526 rjmp fault_resume_user 538 mfsr r9, SYSREG_RSR_DBG
527 539 unmask_exceptions
528 /* If we get a debug trap from privileged context we end up here */ 540 pushm r8-r9
529handle_debug_priv: 541 bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
530 /* Fix up LR and SP in regs. r1 contains the mode we came from */ 542 brne debug_fixup_regs
531 mfsr r2, SYSREG_SR 543
532 mov r3, r2 544.Ldebug_fixup_cont:
533 bfins r2, r1, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE 545#ifdef CONFIG_TRACE_IRQFLAGS
534 mtsr SYSREG_SR, r2 546 rcall trace_hardirqs_off
547#endif
548 mov r12, sp
549 rcall do_debug
550 mov sp, r12
551
552 lddsp r2, sp[REG_SR]
553 bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
554 brne debug_resume_kernel
555
556 get_thread_info r0
557 ld.w r1, r0[TI_flags]
558 mov r2, _TIF_DBGWORK_MASK
559 tst r1, r2
560 brne debug_exit_work
561
562 bld r1, TIF_SINGLE_STEP
563 brcc 1f
564 mfdr r4, OCD_DC
565 sbr r4, OCD_DC_SS_BIT
566 mtdr OCD_DC, r4
567
5681: popm r10,r11
569 mask_exceptions
570 mtsr SYSREG_RSR_DBG, r11
571 mtsr SYSREG_RAR_DBG, r10
572#ifdef CONFIG_TRACE_IRQFLAGS
573 rcall trace_hardirqs_on
5741:
575#endif
576 ldmts sp++, r0-lr
577 sub sp, -4
578 retd
579 .size handle_debug, . - handle_debug
580
581 /* Mode of the trapped context is in r9 */
582 .type debug_fixup_regs, @function
583debug_fixup_regs:
584 mfsr r8, SYSREG_SR
585 mov r10, r8
586 bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
587 mtsr SYSREG_SR, r8
535 sub pc, -2 588 sub pc, -2
536 stdsp sp[REG_LR], lr 589 stdsp sp[REG_LR], lr
537 mtsr SYSREG_SR, r3 590 mtsr SYSREG_SR, r10
538 sub pc, -2 591 sub pc, -2
539 sub r10, sp, -FRAME_SIZE_FULL 592 sub r8, sp, -FRAME_SIZE_FULL
540 stdsp sp[REG_SP], r10 593 stdsp sp[REG_SP], r8
541 mov r12, sp 594 rjmp .Ldebug_fixup_cont
542 rcall do_debug_priv 595 .size debug_fixup_regs, . - debug_fixup_regs
543 596
544 /* Now, put everything back */ 597 .type debug_resume_kernel, @function
545 ssrf SR_EM_BIT 598debug_resume_kernel:
599 mask_exceptions
546 popm r10, r11 600 popm r10, r11
547 mtsr SYSREG_RAR_DBG, r10 601 mtsr SYSREG_RAR_DBG, r10
548 mtsr SYSREG_RSR_DBG, r11 602 mtsr SYSREG_RSR_DBG, r11
@@ -553,93 +607,44 @@ handle_debug_priv:
5531: 6071:
554#endif 608#endif
555 mfsr r2, SYSREG_SR 609 mfsr r2, SYSREG_SR
556 mov r3, r2 610 mov r1, r2
557 bfins r2, r1, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE 611 bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
558 mtsr SYSREG_SR, r2 612 mtsr SYSREG_SR, r2
559 sub pc, -2 613 sub pc, -2
560 popm lr 614 popm lr
561 mtsr SYSREG_SR, r3 615 mtsr SYSREG_SR, r1
562 sub pc, -2 616 sub pc, -2
563 sub sp, -4 /* skip SP */ 617 sub sp, -4 /* skip SP */
564 popm r0-r12 618 popm r0-r12
565 sub sp, -4 619 sub sp, -4
566 retd 620 retd
621 .size debug_resume_kernel, . - debug_resume_kernel
567 622
623 .type debug_exit_work, @function
624debug_exit_work:
568 /* 625 /*
569 * At this point, everything is masked, that is, interrupts, 626 * We must return from Monitor Mode using a retd, and we must
570 * exceptions and debugging traps. We might get called from 627 * not schedule since that involves the D bit in SR getting
571 * interrupt or exception context in some rare cases, but this 628 * cleared by something other than the debug hardware. This
572 * will be taken care of by do_debug(), so we're not going to 629 * may cause undefined behaviour according to the Architecture
573 * do a 100% correct context save here. 630 * manual.
631 *
632 * So we fix up the return address and status and return to a
633 * stub below in Exception mode. From there, we can follow the
634 * normal exception return path.
635 *
636 * The real return address and status registers are stored on
637 * the stack in the way the exception return path understands,
638 * so no need to fix anything up there.
574 */ 639 */
575handle_debug: 640 sub r8, pc, . - fault_exit_work
576 sub sp, 4 /* r12_orig */ 641 mtsr SYSREG_RAR_DBG, r8
577 stmts --sp, r0-lr 642 mov r9, 0
578 mfsr r0, SYSREG_RAR_DBG 643 orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
579 mfsr r1, SYSREG_RSR_DBG 644 mtsr SYSREG_RSR_DBG, r9
580#ifdef CONFIG_TRACE_IRQFLAGS 645 sub pc, -2
581 rcall trace_hardirqs_off
582#endif
583 unmask_exceptions
584 stm --sp, r0, r1
585 bfextu r1, r1, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
586 brne handle_debug_priv
587
588 mov r12, sp
589 rcall do_debug
590
591 lddsp r10, sp[REG_SR]
592 andh r10, (MODE_MASK >> 16), COH
593 breq debug_resume_user
594
595debug_restore_all:
596 popm r10,r11
597 mask_exceptions
598 mtsr SYSREG_RSR_DBG, r11
599 mtsr SYSREG_RAR_DBG, r10
600#ifdef CONFIG_TRACE_IRQFLAGS
601 bld r11, SYSREG_GM_OFFSET
602 brcc 1f
603 rcall trace_hardirqs_on
6041:
605#endif
606 ldmts sp++, r0-lr
607 sub sp, -4
608 retd 646 retd
609 647 .size debug_exit_work, . - debug_exit_work
610debug_resume_user:
611 get_thread_info r0
612 mask_interrupts
613
614 ld.w r1, r0[TI_flags]
615 andl r1, _TIF_DBGWORK_MASK, COH
616 breq debug_restore_all
617
6181: bld r1, TIF_NEED_RESCHED
619 brcc 2f
620 unmask_interrupts
621 rcall schedule
622 mask_interrupts
623 ld.w r1, r0[TI_flags]
624 rjmp 1b
625
6262: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
627 tst r1, r2
628 breq 3f
629 unmask_interrupts
630 mov r12, sp
631 mov r11, r0
632 rcall do_notify_resume
633 mask_interrupts
634 ld.w r1, r0[TI_flags]
635 rjmp 1b
636
6373: bld r1, TIF_SINGLE_STEP
638 brcc debug_restore_all
639 mfdr r2, OCD_DC
640 sbr r2, OCD_DC_SS_BIT
641 mtdr OCD_DC, r2
642 rjmp debug_restore_all
643 648
644 .set rsr_int0, SYSREG_RSR_INT0 649 .set rsr_int0, SYSREG_RSR_INT0
645 .set rsr_int1, SYSREG_RSR_INT1 650 .set rsr_int1, SYSREG_RSR_INT1
@@ -764,3 +769,53 @@ cpu_idle_enable_int_and_exit:
764 IRQ_LEVEL 1 769 IRQ_LEVEL 1
765 IRQ_LEVEL 2 770 IRQ_LEVEL 2
766 IRQ_LEVEL 3 771 IRQ_LEVEL 3
772
773 .section .kprobes.text, "ax", @progbits
774 .type enter_monitor_mode, @function
775enter_monitor_mode:
776 /*
777 * We need to enter monitor mode to do a single step. The
778 * monitor code will alter the return address so that we
779 * return directly to the user instead of returning here.
780 */
781 breakpoint
782 rjmp breakpoint_failed
783
784 .size enter_monitor_mode, . - enter_monitor_mode
785
786 .type debug_trampoline, @function
787 .global debug_trampoline
788debug_trampoline:
789 /*
790 * Save the registers on the stack so that the monitor code
791 * can find them easily.
792 */
793 sub sp, 4 /* r12_orig */
794 stmts --sp, r0-lr
795 get_thread_info r0
796 ld.w r8, r0[TI_rar_saved]
797 ld.w r9, r0[TI_rsr_saved]
798 pushm r8-r9
799
800 /*
801 * The monitor code will alter the return address so we don't
802 * return here.
803 */
804 breakpoint
805 rjmp breakpoint_failed
806 .size debug_trampoline, . - debug_trampoline
807
808 .type breakpoint_failed, @function
809breakpoint_failed:
810 /*
811 * Something went wrong. Perhaps the debug hardware isn't
812 * enabled?
813 */
814 lda.w r12, msg_breakpoint_failed
815 mov r11, sp
816 mov r10, 9 /* SIGKILL */
817 call die
8181: rjmp 1b
819
820msg_breakpoint_failed:
821 .asciz "Failed to enter Debug Mode"
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 0de9a6eeb5bb..002369e44093 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -30,20 +30,22 @@ static struct pt_regs *get_user_regs(struct task_struct *tsk)
30 30
31static void ptrace_single_step(struct task_struct *tsk) 31static void ptrace_single_step(struct task_struct *tsk)
32{ 32{
33 pr_debug("ptrace_single_step: pid=%u, SR=0x%08lx\n", 33 pr_debug("ptrace_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n",
34 tsk->pid, tsk->thread.cpu_context.sr); 34 tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr);
35 if (!(tsk->thread.cpu_context.sr & SR_D)) {
36 /*
37 * Set a breakpoint at the current pc to force the
38 * process into debug mode. The syscall/exception
39 * exit code will set a breakpoint at the return
40 * address when this flag is set.
41 */
42 pr_debug("ptrace_single_step: Setting TIF_BREAKPOINT\n");
43 set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
44 }
45 35
46 /* The monitor code will do the actual step for us */ 36 /*
37 * We can't schedule in Debug mode, so when TIF_BREAKPOINT is
38 * set, the system call or exception handler will do a
39 * breakpoint to enter monitor mode before returning to
40 * userspace.
41 *
42 * The monitor code will then notice that TIF_SINGLE_STEP is
43 * set and return to userspace with single stepping enabled.
44 * The CPU will then enter monitor mode again after exactly
45 * one instruction has been executed, and the monitor code
46 * will then send a SIGTRAP to the process.
47 */
48 set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
47 set_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 49 set_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
48} 50}
49 51
@@ -55,23 +57,7 @@ static void ptrace_single_step(struct task_struct *tsk)
55void ptrace_disable(struct task_struct *child) 57void ptrace_disable(struct task_struct *child)
56{ 58{
57 clear_tsk_thread_flag(child, TIF_SINGLE_STEP); 59 clear_tsk_thread_flag(child, TIF_SINGLE_STEP);
58} 60 clear_tsk_thread_flag(child, TIF_BREAKPOINT);
59
60/*
61 * Handle hitting a breakpoint
62 */
63static void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
64{
65 siginfo_t info;
66
67 info.si_signo = SIGTRAP;
68 info.si_errno = 0;
69 info.si_code = TRAP_BRKPT;
70 info.si_addr = (void __user *)instruction_pointer(regs);
71
72 pr_debug("ptrace_break: Sending SIGTRAP to PID %u (pc = 0x%p)\n",
73 tsk->pid, info.si_addr);
74 force_sig_info(SIGTRAP, &info, tsk);
75} 61}
76 62
77/* 63/*
@@ -84,9 +70,6 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long offset,
84 unsigned long *regs; 70 unsigned long *regs;
85 unsigned long value; 71 unsigned long value;
86 72
87 pr_debug("ptrace_read_user(%p, %#lx, %p)\n",
88 tsk, offset, data);
89
90 if (offset & 3 || offset >= sizeof(struct user)) { 73 if (offset & 3 || offset >= sizeof(struct user)) {
91 printk("ptrace_read_user: invalid offset 0x%08lx\n", offset); 74 printk("ptrace_read_user: invalid offset 0x%08lx\n", offset);
92 return -EIO; 75 return -EIO;
@@ -98,6 +81,9 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long offset,
98 if (offset < sizeof(struct pt_regs)) 81 if (offset < sizeof(struct pt_regs))
99 value = regs[offset / sizeof(regs[0])]; 82 value = regs[offset / sizeof(regs[0])];
100 83
84 pr_debug("ptrace_read_user(%s[%u], %#lx, %p) -> %#lx\n",
85 tsk->comm, tsk->pid, offset, data, value);
86
101 return put_user(value, data); 87 return put_user(value, data);
102} 88}
103 89
@@ -111,8 +97,11 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long offset,
111{ 97{
112 unsigned long *regs; 98 unsigned long *regs;
113 99
100 pr_debug("ptrace_write_user(%s[%u], %#lx, %#lx)\n",
101 tsk->comm, tsk->pid, offset, value);
102
114 if (offset & 3 || offset >= sizeof(struct user)) { 103 if (offset & 3 || offset >= sizeof(struct user)) {
115 printk("ptrace_write_user: invalid offset 0x%08lx\n", offset); 104 pr_debug(" invalid offset 0x%08lx\n", offset);
116 return -EIO; 105 return -EIO;
117 } 106 }
118 107
@@ -155,9 +144,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
155{ 144{
156 int ret; 145 int ret;
157 146
158 pr_debug("arch_ptrace(%ld, %d, %#lx, %#lx)\n",
159 request, child->pid, addr, data);
160
161 pr_debug("ptrace: Enabling monitor mode...\n"); 147 pr_debug("ptrace: Enabling monitor mode...\n");
162 ocd_write(DC, ocd_read(DC) | (1 << OCD_DC_MM_BIT) 148 ocd_write(DC, ocd_read(DC) | (1 << OCD_DC_MM_BIT)
163 | (1 << OCD_DC_DBE_BIT)); 149 | (1 << OCD_DC_DBE_BIT));
@@ -241,20 +227,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
241 break; 227 break;
242 } 228 }
243 229
244 pr_debug("sys_ptrace returning %d (DC = 0x%08lx)\n",
245 ret, ocd_read(DC));
246 return ret; 230 return ret;
247} 231}
248 232
249asmlinkage void syscall_trace(void) 233asmlinkage void syscall_trace(void)
250{ 234{
251 pr_debug("syscall_trace called\n");
252 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 235 if (!test_thread_flag(TIF_SYSCALL_TRACE))
253 return; 236 return;
254 if (!(current->ptrace & PT_PTRACED)) 237 if (!(current->ptrace & PT_PTRACED))
255 return; 238 return;
256 239
257 pr_debug("syscall_trace: notifying parent\n");
258 /* The 0x80 provides a way for the tracing parent to 240 /* The 0x80 provides a way for the tracing parent to
259 * distinguish between a syscall stop and SIGTRAP delivery */ 241 * distinguish between a syscall stop and SIGTRAP delivery */
260 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 242 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
@@ -273,86 +255,143 @@ asmlinkage void syscall_trace(void)
273 } 255 }
274} 256}
275 257
276asmlinkage void do_debug_priv(struct pt_regs *regs)
277{
278 unsigned long dc, ds;
279 unsigned long die_val;
280
281 ds = ocd_read(DS);
282
283 pr_debug("do_debug_priv: pc = %08lx, ds = %08lx\n", regs->pc, ds);
284
285 if (ds & (1 << OCD_DS_SSS_BIT))
286 die_val = DIE_SSTEP;
287 else
288 die_val = DIE_BREAKPOINT;
289
290 if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
291 return;
292
293 if (likely(ds & (1 << OCD_DS_SSS_BIT))) {
294 extern void itlb_miss(void);
295 extern void tlb_miss_common(void);
296 struct thread_info *ti;
297
298 dc = ocd_read(DC);
299 dc &= ~(1 << OCD_DC_SS_BIT);
300 ocd_write(DC, dc);
301
302 ti = current_thread_info();
303 set_ti_thread_flag(ti, TIF_BREAKPOINT);
304
305 /* The TLB miss handlers don't check thread flags */
306 if ((regs->pc >= (unsigned long)&itlb_miss)
307 && (regs->pc <= (unsigned long)&tlb_miss_common)) {
308 ocd_write(BWA2A, sysreg_read(RAR_EX));
309 ocd_write(BWC2A, 0x40000001 | (get_asid() << 1));
310 }
311
312 /*
313 * If we're running in supervisor mode, the breakpoint
314 * will take us where we want directly, no need to
315 * single step.
316 */
317 if ((regs->sr & MODE_MASK) != MODE_SUPERVISOR)
318 set_ti_thread_flag(ti, TIF_SINGLE_STEP);
319 } else {
320 panic("Unable to handle debug trap at pc = %08lx\n",
321 regs->pc);
322 }
323}
324
325/* 258/*
326 * Handle breakpoints, single steps and other debuggy things. To keep 259 * debug_trampoline() is an assembly stub which will store all user
327 * things simple initially, we run with interrupts and exceptions 260 * registers on the stack and execute a breakpoint instruction.
328 * disabled all the time. 261 *
262 * If we single-step into an exception handler which runs with
263 * interrupts disabled the whole time so it doesn't have to check for
264 * pending work, its return address will be modified so that it ends
265 * up returning to debug_trampoline.
266 *
267 * If the exception handler decides to store the user context and
268 * enable interrupts after all, it will restore the original return
269 * address and status register value. Before it returns, it will
270 * notice that TIF_BREAKPOINT is set and execute a breakpoint
271 * instruction.
329 */ 272 */
330asmlinkage void do_debug(struct pt_regs *regs) 273extern void debug_trampoline(void);
331{
332 unsigned long dc, ds;
333 274
334 ds = ocd_read(DS); 275asmlinkage struct pt_regs *do_debug(struct pt_regs *regs)
335 pr_debug("do_debug: pc = %08lx, ds = %08lx\n", regs->pc, ds); 276{
277 struct thread_info *ti;
278 unsigned long trampoline_addr;
279 u32 status;
280 u32 ctrl;
281 int code;
282
283 status = ocd_read(DS);
284 ti = current_thread_info();
285 code = TRAP_BRKPT;
286
287 pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n",
288 status, regs->pc, regs->sr, ti->flags);
289
290 if (!user_mode(regs)) {
291 unsigned long die_val = DIE_BREAKPOINT;
292
293 if (status & (1 << OCD_DS_SSS_BIT))
294 die_val = DIE_SSTEP;
295
296 if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP)
297 == NOTIFY_STOP)
298 return regs;
299
300 if ((status & (1 << OCD_DS_SWB_BIT))
301 && test_and_clear_ti_thread_flag(
302 ti, TIF_BREAKPOINT)) {
303 /*
304 * Explicit breakpoint from trampoline or
305 * exception/syscall/interrupt handler.
306 *
307 * The real saved regs are on the stack right
308 * after the ones we saved on entry.
309 */
310 regs++;
311 pr_debug(" -> TIF_BREAKPOINT done, adjusted regs:"
312 "PC=0x%08lx SR=0x%08lx\n",
313 regs->pc, regs->sr);
314 BUG_ON(!user_mode(regs));
315
316 if (test_thread_flag(TIF_SINGLE_STEP)) {
317 pr_debug("Going to do single step...\n");
318 return regs;
319 }
320
321 /*
322 * No TIF_SINGLE_STEP means we're done
323 * stepping over a syscall. Do the trap now.
324 */
325 code = TRAP_TRACE;
326 } else if ((status & (1 << OCD_DS_SSS_BIT))
327 && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) {
328
329 pr_debug("Stepped into something, "
330 "setting TIF_BREAKPOINT...\n");
331 set_ti_thread_flag(ti, TIF_BREAKPOINT);
332
333 /*
334 * We stepped into an exception, interrupt or
335 * syscall handler. Some exception handlers
336 * don't check for pending work, so we need to
337 * set up a trampoline just in case.
338 *
339 * The exception entry code will undo the
340 * trampoline stuff if it does a full context
341 * save (which also means that it'll check for
342 * pending work later.)
343 */
344 if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) {
345 trampoline_addr
346 = (unsigned long)&debug_trampoline;
347
348 pr_debug("Setting up trampoline...\n");
349 ti->rar_saved = sysreg_read(RAR_EX);
350 ti->rsr_saved = sysreg_read(RSR_EX);
351 sysreg_write(RAR_EX, trampoline_addr);
352 sysreg_write(RSR_EX, (MODE_EXCEPTION
353 | SR_EM | SR_GM));
354 BUG_ON(ti->rsr_saved & MODE_MASK);
355 }
356
357 /*
358 * If we stepped into a system call, we
359 * shouldn't do a single step after we return
360 * since the return address is right after the
361 * "scall" instruction we were told to step
362 * over.
363 */
364 if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) {
365 pr_debug("Supervisor; no single step\n");
366 clear_ti_thread_flag(ti, TIF_SINGLE_STEP);
367 }
368
369 ctrl = ocd_read(DC);
370 ctrl &= ~(1 << OCD_DC_SS_BIT);
371 ocd_write(DC, ctrl);
372
373 return regs;
374 } else {
375 printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n",
376 status);
377 printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags);
378 die("Unhandled debug trap in kernel mode",
379 regs, SIGTRAP);
380 }
381 } else if (status & (1 << OCD_DS_SSS_BIT)) {
382 /* Single step in user mode */
383 code = TRAP_TRACE;
336 384
337 if (test_thread_flag(TIF_BREAKPOINT)) { 385 ctrl = ocd_read(DC);
338 pr_debug("TIF_BREAKPOINT set\n"); 386 ctrl &= ~(1 << OCD_DC_SS_BIT);
339 /* We're taking care of it */ 387 ocd_write(DC, ctrl);
340 clear_thread_flag(TIF_BREAKPOINT);
341 ocd_write(BWC2A, 0);
342 } 388 }
343 389
344 if (test_thread_flag(TIF_SINGLE_STEP)) { 390 pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n",
345 pr_debug("TIF_SINGLE_STEP set, ds = 0x%08lx\n", ds); 391 code, regs->pc, regs->sr);
346 if (ds & (1 << OCD_DS_SSS_BIT)) {
347 dc = ocd_read(DC);
348 dc &= ~(1 << OCD_DC_SS_BIT);
349 ocd_write(DC, dc);
350 392
351 clear_thread_flag(TIF_SINGLE_STEP); 393 clear_thread_flag(TIF_SINGLE_STEP);
352 ptrace_break(current, regs); 394 _exception(SIGTRAP, regs, code, instruction_pointer(regs));
353 } 395
354 } else { 396 return regs;
355 /* regular breakpoint */
356 ptrace_break(current, regs);
357 }
358} 397}
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index ce9ac9659883..11f08e35a2eb 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -77,10 +77,10 @@ SECTIONS
77 . = 0x100; 77 . = 0x100;
78 *(.scall.text) 78 *(.scall.text)
79 *(.irq.text) 79 *(.irq.text)
80 KPROBES_TEXT
80 TEXT_TEXT 81 TEXT_TEXT
81 SCHED_TEXT 82 SCHED_TEXT
82 LOCK_TEXT 83 LOCK_TEXT
83 KPROBES_TEXT
84 *(.fixup) 84 *(.fixup)
85 *(.gnu.warning) 85 *(.gnu.warning)
86 _etext = .; 86 _etext = .;
diff --git a/include/asm-avr32/processor.h b/include/asm-avr32/processor.h
index 6a64833756a6..a52576b25afe 100644
--- a/include/asm-avr32/processor.h
+++ b/include/asm-avr32/processor.h
@@ -139,6 +139,9 @@ extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
139extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp, 139extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
140 struct pt_regs *regs, const char *log_lvl); 140 struct pt_regs *regs, const char *log_lvl);
141 141
142#define task_pt_regs(p) \
143 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
144
142#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc) 145#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
143#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp) 146#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
144 147
diff --git a/include/asm-avr32/thread_info.h b/include/asm-avr32/thread_info.h
index 67e7aae89e1f..184b574289b4 100644
--- a/include/asm-avr32/thread_info.h
+++ b/include/asm-avr32/thread_info.h
@@ -25,6 +25,11 @@ struct thread_info {
25 unsigned long flags; /* low level flags */ 25 unsigned long flags; /* low level flags */
26 __u32 cpu; 26 __u32 cpu;
27 __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ 27 __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
28 __u32 rar_saved; /* return address... */
29 __u32 rsr_saved; /* ...and status register
30 saved by debug handler
31 when setting up
32 trampoline */
28 struct restart_block restart_block; 33 struct restart_block restart_block;
29 __u8 supervisor_stack[0]; 34 __u8 supervisor_stack[0];
30}; 35};
@@ -78,8 +83,8 @@ static inline struct thread_info *current_thread_info(void)
78#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 83#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
79#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling 84#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
80 TIF_NEED_RESCHED */ 85 TIF_NEED_RESCHED */
81#define TIF_BREAKPOINT 4 /* true if we should break after return */ 86#define TIF_BREAKPOINT 4 /* enter monitor mode on return */
82#define TIF_SINGLE_STEP 5 /* single step after next break */ 87#define TIF_SINGLE_STEP 5 /* single step in progress */
83#define TIF_MEMDIE 6 88#define TIF_MEMDIE 6
84#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ 89#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
85#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ 90#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
@@ -89,7 +94,6 @@ static inline struct thread_info *current_thread_info(void)
89#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 94#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
90#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 95#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
91#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 96#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
92#define _TIF_BREAKPOINT (1 << TIF_BREAKPOINT)
93#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP) 97#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP)
94#define _TIF_MEMDIE (1 << TIF_MEMDIE) 98#define _TIF_MEMDIE (1 << TIF_MEMDIE)
95#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 99#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
@@ -108,6 +112,6 @@ static inline struct thread_info *current_thread_info(void)
108/* work to do on any return to userspace */ 112/* work to do on any return to userspace */
109#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE)) 113#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE))
110/* work to do on return from debug mode */ 114/* work to do on return from debug mode */
111#define _TIF_DBGWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SINGLE_STEP)) 115#define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT))
112 116
113#endif /* __ASM_AVR32_THREAD_INFO_H */ 117#endif /* __ASM_AVR32_THREAD_INFO_H */