aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-10-14 16:32:41 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-10-15 15:38:26 -0400
commit233325b94999d4bb8df227bb39904a57509e4995 (patch)
tree1f195bded03ce5aa483b41531e739a8cc61ce392
parenta78c942df64ef4cf495fd4d8715e48501bd7f8a4 (diff)
arch/tile: enable single-step support for TILE-Gx
This is not quite the complete support, since we're not yet shipping intvec_64.S, but it is the support relevant to the set of files we are currently shipping, and makes it easier to track changes between our internal sources and our public GIT repository. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
-rw-r--r--arch/tile/include/asm/traps.h4
-rw-r--r--arch/tile/kernel/intvec_32.S7
-rw-r--r--arch/tile/kernel/single_step.c73
-rw-r--r--arch/tile/kernel/traps.c2
4 files changed, 83 insertions, 3 deletions
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 432a9c15c8a2..d06e35f57201 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -59,4 +59,8 @@ void do_hardwall_trap(struct pt_regs *, int fault_num);
59void do_breakpoint(struct pt_regs *, int fault_num); 59void do_breakpoint(struct pt_regs *, int fault_num);
60 60
61 61
62#ifdef __tilegx__
63void gx_singlestep_handle(struct pt_regs *, int fault_num);
64#endif
65
62#endif /* _ASM_TILE_SYSCALLS_H */ 66#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 206dc7e1fe36..f5821626247f 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1472,7 +1472,12 @@ handle_ill:
1472 lw r26, r24 1472 lw r26, r24
1473 sw r28, r26 1473 sw r28, r26
1474 1474
1475 /* Clear TIF_SINGLESTEP */ 1475 /*
1476 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
1477 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
1478 * need to clear it here and can't really impose on all other arches.
1479 * So what's another write between friends?
1480 */
1476 GET_THREAD_INFO(r0) 1481 GET_THREAD_INFO(r0)
1477 1482
1478 addi r1, r0, THREAD_INFO_FLAGS_OFFSET 1483 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 5ec4b9c651f2..1eb3b39e36c7 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -15,7 +15,7 @@
15 * Derived from iLib's single-stepping code. 15 * Derived from iLib's single-stepping code.
16 */ 16 */
17 17
18#ifndef __tilegx__ /* No support for single-step yet. */ 18#ifndef __tilegx__ /* Hardware support for single step unavailable. */
19 19
20/* These functions are only used on the TILE platform */ 20/* These functions are only used on the TILE platform */
21#include <linux/slab.h> 21#include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
660 regs->pc += 8; 660 regs->pc += 8;
661} 661}
662 662
663#else
664#include <linux/smp.h>
665#include <linux/ptrace.h>
666#include <arch/spr_def.h>
667
668static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
669
670
671/*
672 * Called directly on the occasion of an interrupt.
673 *
674 * If the process doesn't have single step set, then we use this as an
675 * opportunity to turn single step off.
676 *
677 * It has been mentioned that we could conditionally turn off single stepping
678 * on each entry into the kernel and rely on single_step_once to turn it
679 * on for the processes that matter (as we already do), but this
680 * implementation is somewhat more efficient in that we muck with registers
681 * once on a bum interrupt rather than on every entry into the kernel.
682 *
683 * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
684 * so we have to run through this process again before we can say that an
685 * instruction has executed.
686 *
687 * swint will set CANCELED, but it's a legitimate instruction. Fortunately
688 * it changes the PC. If it hasn't changed, then we know that the interrupt
689 * wasn't generated by swint and we'll need to run this process again before
690 * we can say an instruction has executed.
691 *
692 * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
693 * on with our lives.
694 */
695
696void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
697{
698 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
699 struct thread_info *info = (void *)current_thread_info();
700 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
701 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
702
703 if (is_single_step == 0) {
704 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
705
706 } else if ((*ss_pc != regs->pc) ||
707 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
708
709 ptrace_notify(SIGTRAP);
710 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
711 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
712 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
713 }
714}
715
716
717/*
718 * Called from need_singlestep. Set up the control registers and the enable
719 * register, then return back.
720 */
721
722void single_step_once(struct pt_regs *regs)
723{
724 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
725 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
726
727 *ss_pc = regs->pc;
728 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
729 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
730 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
731 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
732}
733
663#endif /* !__tilegx__ */ 734#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 7826a8b17997..5474fc2e77e8 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
260 address = regs->pc; 260 address = regs->pc;
261 break; 261 break;
262 case INT_UNALIGN_DATA: 262 case INT_UNALIGN_DATA:
263#ifndef __tilegx__ /* FIXME: GX: no single-step yet */ 263#ifndef __tilegx__ /* Emulated support for single step debugging */
264 if (unaligned_fixup >= 0) { 264 if (unaligned_fixup >= 0) {
265 struct single_step_state *state = 265 struct single_step_state *state =
266 current_thread_info()->step_state; 266 current_thread_info()->step_state;