diff options
Diffstat (limited to 'arch/arm/kernel/hw_breakpoint.c')
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 275 |
1 files changed, 170 insertions, 105 deletions
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index a927ca1f5566..814a52a9dc39 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -45,7 +45,6 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); | |||
45 | 45 | ||
46 | /* Number of BRP/WRP registers on this CPU. */ | 46 | /* Number of BRP/WRP registers on this CPU. */ |
47 | static int core_num_brps; | 47 | static int core_num_brps; |
48 | static int core_num_reserved_brps; | ||
49 | static int core_num_wrps; | 48 | static int core_num_wrps; |
50 | 49 | ||
51 | /* Debug architecture version. */ | 50 | /* Debug architecture version. */ |
@@ -137,10 +136,11 @@ static u8 get_debug_arch(void) | |||
137 | u32 didr; | 136 | u32 didr; |
138 | 137 | ||
139 | /* Do we implement the extended CPUID interface? */ | 138 | /* Do we implement the extended CPUID interface? */ |
140 | if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), | 139 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { |
141 | "CPUID feature registers not supported. " | 140 | pr_warning("CPUID feature registers not supported. " |
142 | "Assuming v6 debug is present.\n")) | 141 | "Assuming v6 debug is present.\n"); |
143 | return ARM_DEBUG_ARCH_V6; | 142 | return ARM_DEBUG_ARCH_V6; |
143 | } | ||
144 | 144 | ||
145 | ARM_DBG_READ(c0, 0, didr); | 145 | ARM_DBG_READ(c0, 0, didr); |
146 | return (didr >> 16) & 0xf; | 146 | return (didr >> 16) & 0xf; |
@@ -154,10 +154,21 @@ u8 arch_get_debug_arch(void) | |||
154 | static int debug_arch_supported(void) | 154 | static int debug_arch_supported(void) |
155 | { | 155 | { |
156 | u8 arch = get_debug_arch(); | 156 | u8 arch = get_debug_arch(); |
157 | return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; | 157 | |
158 | /* We don't support the memory-mapped interface. */ | ||
159 | return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || | ||
160 | arch >= ARM_DEBUG_ARCH_V7_1; | ||
161 | } | ||
162 | |||
163 | /* Determine number of WRP registers available. */ | ||
164 | static int get_num_wrp_resources(void) | ||
165 | { | ||
166 | u32 didr; | ||
167 | ARM_DBG_READ(c0, 0, didr); | ||
168 | return ((didr >> 28) & 0xf) + 1; | ||
158 | } | 169 | } |
159 | 170 | ||
160 | /* Determine number of BRP register available. */ | 171 | /* Determine number of BRP registers available. */ |
161 | static int get_num_brp_resources(void) | 172 | static int get_num_brp_resources(void) |
162 | { | 173 | { |
163 | u32 didr; | 174 | u32 didr; |
@@ -176,9 +187,10 @@ static int core_has_mismatch_brps(void) | |||
176 | static int get_num_wrps(void) | 187 | static int get_num_wrps(void) |
177 | { | 188 | { |
178 | /* | 189 | /* |
179 | * FIXME: When a watchpoint fires, the only way to work out which | 190 | * On debug architectures prior to 7.1, when a watchpoint fires, the |
180 | * watchpoint it was is by disassembling the faulting instruction | 191 | * only way to work out which watchpoint it was is by disassembling |
181 | * and working out the address of the memory access. | 192 | * the faulting instruction and working out the address of the memory |
193 | * access. | ||
182 | * | 194 | * |
183 | * Furthermore, we can only do this if the watchpoint was precise | 195 | * Furthermore, we can only do this if the watchpoint was precise |
184 | * since imprecise watchpoints prevent us from calculating register | 196 | * since imprecise watchpoints prevent us from calculating register |
@@ -192,36 +204,17 @@ static int get_num_wrps(void) | |||
192 | * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows | 204 | * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows |
193 | * that it is set on some implementations]. | 205 | * that it is set on some implementations]. |
194 | */ | 206 | */ |
207 | if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) | ||
208 | return 1; | ||
195 | 209 | ||
196 | #if 0 | 210 | return get_num_wrp_resources(); |
197 | int wrps; | ||
198 | u32 didr; | ||
199 | ARM_DBG_READ(c0, 0, didr); | ||
200 | wrps = ((didr >> 28) & 0xf) + 1; | ||
201 | #endif | ||
202 | int wrps = 1; | ||
203 | |||
204 | if (core_has_mismatch_brps() && wrps >= get_num_brp_resources()) | ||
205 | wrps = get_num_brp_resources() - 1; | ||
206 | |||
207 | return wrps; | ||
208 | } | ||
209 | |||
210 | /* We reserve one breakpoint for each watchpoint. */ | ||
211 | static int get_num_reserved_brps(void) | ||
212 | { | ||
213 | if (core_has_mismatch_brps()) | ||
214 | return get_num_wrps(); | ||
215 | return 0; | ||
216 | } | 211 | } |
217 | 212 | ||
218 | /* Determine number of usable BRPs available. */ | 213 | /* Determine number of usable BRPs available. */ |
219 | static int get_num_brps(void) | 214 | static int get_num_brps(void) |
220 | { | 215 | { |
221 | int brps = get_num_brp_resources(); | 216 | int brps = get_num_brp_resources(); |
222 | if (core_has_mismatch_brps()) | 217 | return core_has_mismatch_brps() ? brps - 1 : brps; |
223 | brps -= get_num_reserved_brps(); | ||
224 | return brps; | ||
225 | } | 218 | } |
226 | 219 | ||
227 | /* | 220 | /* |
@@ -239,7 +232,7 @@ static int enable_monitor_mode(void) | |||
239 | 232 | ||
240 | /* Ensure that halting mode is disabled. */ | 233 | /* Ensure that halting mode is disabled. */ |
241 | if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, | 234 | if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, |
242 | "halting debug mode enabled. Unable to access hardware resources.\n")) { | 235 | "halting debug mode enabled. Unable to access hardware resources.\n")) { |
243 | ret = -EPERM; | 236 | ret = -EPERM; |
244 | goto out; | 237 | goto out; |
245 | } | 238 | } |
@@ -255,6 +248,7 @@ static int enable_monitor_mode(void) | |||
255 | ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); | 248 | ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); |
256 | break; | 249 | break; |
257 | case ARM_DEBUG_ARCH_V7_ECP14: | 250 | case ARM_DEBUG_ARCH_V7_ECP14: |
251 | case ARM_DEBUG_ARCH_V7_1: | ||
258 | ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); | 252 | ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); |
259 | break; | 253 | break; |
260 | default: | 254 | default: |
@@ -346,24 +340,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
346 | val_base = ARM_BASE_BVR; | 340 | val_base = ARM_BASE_BVR; |
347 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); | 341 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); |
348 | max_slots = core_num_brps; | 342 | max_slots = core_num_brps; |
349 | if (info->step_ctrl.enabled) { | ||
350 | /* Override the breakpoint data with the step data. */ | ||
351 | addr = info->trigger & ~0x3; | ||
352 | ctrl = encode_ctrl_reg(info->step_ctrl); | ||
353 | } | ||
354 | } else { | 343 | } else { |
355 | /* Watchpoint */ | 344 | /* Watchpoint */ |
356 | if (info->step_ctrl.enabled) { | 345 | ctrl_base = ARM_BASE_WCR; |
357 | /* Install into the reserved breakpoint region. */ | 346 | val_base = ARM_BASE_WVR; |
358 | ctrl_base = ARM_BASE_BCR + core_num_brps; | ||
359 | val_base = ARM_BASE_BVR + core_num_brps; | ||
360 | /* Override the watchpoint data with the step data. */ | ||
361 | addr = info->trigger & ~0x3; | ||
362 | ctrl = encode_ctrl_reg(info->step_ctrl); | ||
363 | } else { | ||
364 | ctrl_base = ARM_BASE_WCR; | ||
365 | val_base = ARM_BASE_WVR; | ||
366 | } | ||
367 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); | 347 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
368 | max_slots = core_num_wrps; | 348 | max_slots = core_num_wrps; |
369 | } | 349 | } |
@@ -382,6 +362,17 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
382 | goto out; | 362 | goto out; |
383 | } | 363 | } |
384 | 364 | ||
365 | /* Override the breakpoint data with the step data. */ | ||
366 | if (info->step_ctrl.enabled) { | ||
367 | addr = info->trigger & ~0x3; | ||
368 | ctrl = encode_ctrl_reg(info->step_ctrl); | ||
369 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { | ||
370 | i = 0; | ||
371 | ctrl_base = ARM_BASE_BCR + core_num_brps; | ||
372 | val_base = ARM_BASE_BVR + core_num_brps; | ||
373 | } | ||
374 | } | ||
375 | |||
385 | /* Setup the address register. */ | 376 | /* Setup the address register. */ |
386 | write_wb_reg(val_base + i, addr); | 377 | write_wb_reg(val_base + i, addr); |
387 | 378 | ||
@@ -405,10 +396,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
405 | max_slots = core_num_brps; | 396 | max_slots = core_num_brps; |
406 | } else { | 397 | } else { |
407 | /* Watchpoint */ | 398 | /* Watchpoint */ |
408 | if (info->step_ctrl.enabled) | 399 | base = ARM_BASE_WCR; |
409 | base = ARM_BASE_BCR + core_num_brps; | ||
410 | else | ||
411 | base = ARM_BASE_WCR; | ||
412 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); | 400 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
413 | max_slots = core_num_wrps; | 401 | max_slots = core_num_wrps; |
414 | } | 402 | } |
@@ -426,6 +414,13 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
426 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) | 414 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) |
427 | return; | 415 | return; |
428 | 416 | ||
417 | /* Ensure that we disable the mismatch breakpoint. */ | ||
418 | if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && | ||
419 | info->step_ctrl.enabled) { | ||
420 | i = 0; | ||
421 | base = ARM_BASE_BCR + core_num_brps; | ||
422 | } | ||
423 | |||
429 | /* Reset the control register. */ | 424 | /* Reset the control register. */ |
430 | write_wb_reg(base + i, 0); | 425 | write_wb_reg(base + i, 0); |
431 | } | 426 | } |
@@ -632,10 +627,9 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
632 | * we can use the mismatch feature as a poor-man's hardware | 627 | * we can use the mismatch feature as a poor-man's hardware |
633 | * single-step, but this only works for per-task breakpoints. | 628 | * single-step, but this only works for per-task breakpoints. |
634 | */ | 629 | */ |
635 | if (WARN_ONCE(!bp->overflow_handler && | 630 | if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || |
636 | (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() | 631 | !core_has_mismatch_brps() || !bp->hw.bp_target)) { |
637 | || !bp->hw.bp_target), | 632 | pr_warning("overflow handler required but none found\n"); |
638 | "overflow handler required but none found\n")) { | ||
639 | ret = -EINVAL; | 633 | ret = -EINVAL; |
640 | } | 634 | } |
641 | out: | 635 | out: |
@@ -666,34 +660,62 @@ static void disable_single_step(struct perf_event *bp) | |||
666 | arch_install_hw_breakpoint(bp); | 660 | arch_install_hw_breakpoint(bp); |
667 | } | 661 | } |
668 | 662 | ||
669 | static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) | 663 | static void watchpoint_handler(unsigned long addr, unsigned int fsr, |
664 | struct pt_regs *regs) | ||
670 | { | 665 | { |
671 | int i; | 666 | int i, access; |
667 | u32 val, ctrl_reg, alignment_mask; | ||
672 | struct perf_event *wp, **slots; | 668 | struct perf_event *wp, **slots; |
673 | struct arch_hw_breakpoint *info; | 669 | struct arch_hw_breakpoint *info; |
670 | struct arch_hw_breakpoint_ctrl ctrl; | ||
674 | 671 | ||
675 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); | 672 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
676 | 673 | ||
677 | /* Without a disassembler, we can only handle 1 watchpoint. */ | ||
678 | BUG_ON(core_num_wrps > 1); | ||
679 | |||
680 | for (i = 0; i < core_num_wrps; ++i) { | 674 | for (i = 0; i < core_num_wrps; ++i) { |
681 | rcu_read_lock(); | 675 | rcu_read_lock(); |
682 | 676 | ||
683 | wp = slots[i]; | 677 | wp = slots[i]; |
684 | 678 | ||
685 | if (wp == NULL) { | 679 | if (wp == NULL) |
686 | rcu_read_unlock(); | 680 | goto unlock; |
687 | continue; | ||
688 | } | ||
689 | 681 | ||
682 | info = counter_arch_bp(wp); | ||
690 | /* | 683 | /* |
691 | * The DFAR is an unknown value. Since we only allow a | 684 | * The DFAR is an unknown value on debug architectures prior |
692 | * single watchpoint, we can set the trigger to the lowest | 685 | * to 7.1. Since we only allow a single watchpoint on these |
693 | * possible faulting address. | 686 | * older CPUs, we can set the trigger to the lowest possible |
687 | * faulting address. | ||
694 | */ | 688 | */ |
695 | info = counter_arch_bp(wp); | 689 | if (debug_arch < ARM_DEBUG_ARCH_V7_1) { |
696 | info->trigger = wp->attr.bp_addr; | 690 | BUG_ON(i > 0); |
691 | info->trigger = wp->attr.bp_addr; | ||
692 | } else { | ||
693 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) | ||
694 | alignment_mask = 0x7; | ||
695 | else | ||
696 | alignment_mask = 0x3; | ||
697 | |||
698 | /* Check if the watchpoint value matches. */ | ||
699 | val = read_wb_reg(ARM_BASE_WVR + i); | ||
700 | if (val != (addr & ~alignment_mask)) | ||
701 | goto unlock; | ||
702 | |||
703 | /* Possible match, check the byte address select. */ | ||
704 | ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); | ||
705 | decode_ctrl_reg(ctrl_reg, &ctrl); | ||
706 | if (!((1 << (addr & alignment_mask)) & ctrl.len)) | ||
707 | goto unlock; | ||
708 | |||
709 | /* Check that the access type matches. */ | ||
710 | access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : | ||
711 | HW_BREAKPOINT_R; | ||
712 | if (!(access & hw_breakpoint_type(wp))) | ||
713 | goto unlock; | ||
714 | |||
715 | /* We have a winner. */ | ||
716 | info->trigger = addr; | ||
717 | } | ||
718 | |||
697 | pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); | 719 | pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); |
698 | perf_bp_event(wp, regs); | 720 | perf_bp_event(wp, regs); |
699 | 721 | ||
@@ -705,6 +727,7 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) | |||
705 | if (!wp->overflow_handler) | 727 | if (!wp->overflow_handler) |
706 | enable_single_step(wp, instruction_pointer(regs)); | 728 | enable_single_step(wp, instruction_pointer(regs)); |
707 | 729 | ||
730 | unlock: | ||
708 | rcu_read_unlock(); | 731 | rcu_read_unlock(); |
709 | } | 732 | } |
710 | } | 733 | } |
@@ -717,7 +740,7 @@ static void watchpoint_single_step_handler(unsigned long pc) | |||
717 | 740 | ||
718 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); | 741 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); |
719 | 742 | ||
720 | for (i = 0; i < core_num_reserved_brps; ++i) { | 743 | for (i = 0; i < core_num_wrps; ++i) { |
721 | rcu_read_lock(); | 744 | rcu_read_lock(); |
722 | 745 | ||
723 | wp = slots[i]; | 746 | wp = slots[i]; |
@@ -820,7 +843,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
820 | case ARM_ENTRY_ASYNC_WATCHPOINT: | 843 | case ARM_ENTRY_ASYNC_WATCHPOINT: |
821 | WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); | 844 | WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); |
822 | case ARM_ENTRY_SYNC_WATCHPOINT: | 845 | case ARM_ENTRY_SYNC_WATCHPOINT: |
823 | watchpoint_handler(addr, regs); | 846 | watchpoint_handler(addr, fsr, regs); |
824 | break; | 847 | break; |
825 | default: | 848 | default: |
826 | ret = 1; /* Unhandled fault. */ | 849 | ret = 1; /* Unhandled fault. */ |
@@ -834,11 +857,31 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
834 | /* | 857 | /* |
835 | * One-time initialisation. | 858 | * One-time initialisation. |
836 | */ | 859 | */ |
837 | static void reset_ctrl_regs(void *info) | 860 | static cpumask_t debug_err_mask; |
861 | |||
862 | static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) | ||
838 | { | 863 | { |
839 | int i, cpu = smp_processor_id(); | 864 | int cpu = smp_processor_id(); |
865 | |||
866 | pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", | ||
867 | instr, cpu); | ||
868 | |||
869 | /* Set the error flag for this CPU and skip the faulting instruction. */ | ||
870 | cpumask_set_cpu(cpu, &debug_err_mask); | ||
871 | instruction_pointer(regs) += 4; | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | static struct undef_hook debug_reg_hook = { | ||
876 | .instr_mask = 0x0fe80f10, | ||
877 | .instr_val = 0x0e000e10, | ||
878 | .fn = debug_reg_trap, | ||
879 | }; | ||
880 | |||
881 | static void reset_ctrl_regs(void *unused) | ||
882 | { | ||
883 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); | ||
840 | u32 dbg_power; | 884 | u32 dbg_power; |
841 | cpumask_t *cpumask = info; | ||
842 | 885 | ||
843 | /* | 886 | /* |
844 | * v7 debug contains save and restore registers so that debug state | 887 | * v7 debug contains save and restore registers so that debug state |
@@ -848,38 +891,57 @@ static void reset_ctrl_regs(void *info) | |||
848 | * Access Register to avoid taking undefined instruction exceptions | 891 | * Access Register to avoid taking undefined instruction exceptions |
849 | * later on. | 892 | * later on. |
850 | */ | 893 | */ |
851 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { | 894 | switch (debug_arch) { |
895 | case ARM_DEBUG_ARCH_V6: | ||
896 | case ARM_DEBUG_ARCH_V6_1: | ||
897 | /* ARMv6 cores just need to reset the registers. */ | ||
898 | goto reset_regs; | ||
899 | case ARM_DEBUG_ARCH_V7_ECP14: | ||
852 | /* | 900 | /* |
853 | * Ensure sticky power-down is clear (i.e. debug logic is | 901 | * Ensure sticky power-down is clear (i.e. debug logic is |
854 | * powered up). | 902 | * powered up). |
855 | */ | 903 | */ |
856 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); | 904 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); |
857 | if ((dbg_power & 0x1) == 0) { | 905 | if ((dbg_power & 0x1) == 0) |
858 | pr_warning("CPU %d debug is powered down!\n", cpu); | 906 | err = -EPERM; |
859 | cpumask_or(cpumask, cpumask, cpumask_of(cpu)); | 907 | break; |
860 | return; | 908 | case ARM_DEBUG_ARCH_V7_1: |
861 | } | ||
862 | |||
863 | /* | 909 | /* |
864 | * Unconditionally clear the lock by writing a value | 910 | * Ensure the OS double lock is clear. |
865 | * other than 0xC5ACCE55 to the access register. | ||
866 | */ | 911 | */ |
867 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | 912 | asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); |
868 | isb(); | 913 | if ((dbg_power & 0x1) == 1) |
914 | err = -EPERM; | ||
915 | break; | ||
916 | } | ||
869 | 917 | ||
870 | /* | 918 | if (err) { |
871 | * Clear any configured vector-catch events before | 919 | pr_warning("CPU %d debug is powered down!\n", cpu); |
872 | * enabling monitor mode. | 920 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); |
873 | */ | 921 | return; |
874 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); | ||
875 | isb(); | ||
876 | } | 922 | } |
877 | 923 | ||
924 | /* | ||
925 | * Unconditionally clear the lock by writing a value | ||
926 | * other than 0xC5ACCE55 to the access register. | ||
927 | */ | ||
928 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | ||
929 | isb(); | ||
930 | |||
931 | /* | ||
932 | * Clear any configured vector-catch events before | ||
933 | * enabling monitor mode. | ||
934 | */ | ||
935 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); | ||
936 | isb(); | ||
937 | |||
938 | reset_regs: | ||
878 | if (enable_monitor_mode()) | 939 | if (enable_monitor_mode()) |
879 | return; | 940 | return; |
880 | 941 | ||
881 | /* We must also reset any reserved registers. */ | 942 | /* We must also reset any reserved registers. */ |
882 | for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { | 943 | raw_num_brps = get_num_brp_resources(); |
944 | for (i = 0; i < raw_num_brps; ++i) { | ||
883 | write_wb_reg(ARM_BASE_BCR + i, 0UL); | 945 | write_wb_reg(ARM_BASE_BCR + i, 0UL); |
884 | write_wb_reg(ARM_BASE_BVR + i, 0UL); | 946 | write_wb_reg(ARM_BASE_BVR + i, 0UL); |
885 | } | 947 | } |
@@ -895,6 +957,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self, | |||
895 | { | 957 | { |
896 | if (action == CPU_ONLINE) | 958 | if (action == CPU_ONLINE) |
897 | smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); | 959 | smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); |
960 | |||
898 | return NOTIFY_OK; | 961 | return NOTIFY_OK; |
899 | } | 962 | } |
900 | 963 | ||
@@ -905,7 +968,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
905 | static int __init arch_hw_breakpoint_init(void) | 968 | static int __init arch_hw_breakpoint_init(void) |
906 | { | 969 | { |
907 | u32 dscr; | 970 | u32 dscr; |
908 | cpumask_t cpumask = { CPU_BITS_NONE }; | ||
909 | 971 | ||
910 | debug_arch = get_debug_arch(); | 972 | debug_arch = get_debug_arch(); |
911 | 973 | ||
@@ -916,28 +978,31 @@ static int __init arch_hw_breakpoint_init(void) | |||
916 | 978 | ||
917 | /* Determine how many BRPs/WRPs are available. */ | 979 | /* Determine how many BRPs/WRPs are available. */ |
918 | core_num_brps = get_num_brps(); | 980 | core_num_brps = get_num_brps(); |
919 | core_num_reserved_brps = get_num_reserved_brps(); | ||
920 | core_num_wrps = get_num_wrps(); | 981 | core_num_wrps = get_num_wrps(); |
921 | 982 | ||
922 | pr_info("found %d breakpoint and %d watchpoint registers.\n", | 983 | /* |
923 | core_num_brps + core_num_reserved_brps, core_num_wrps); | 984 | * We need to tread carefully here because DBGSWENABLE may be |
924 | 985 | * driven low on this core and there isn't an architected way to | |
925 | if (core_num_reserved_brps) | 986 | * determine that. |
926 | pr_info("%d breakpoint(s) reserved for watchpoint " | 987 | */ |
927 | "single-step.\n", core_num_reserved_brps); | 988 | register_undef_hook(&debug_reg_hook); |
928 | 989 | ||
929 | /* | 990 | /* |
930 | * Reset the breakpoint resources. We assume that a halting | 991 | * Reset the breakpoint resources. We assume that a halting |
931 | * debugger will leave the world in a nice state for us. | 992 | * debugger will leave the world in a nice state for us. |
932 | */ | 993 | */ |
933 | on_each_cpu(reset_ctrl_regs, &cpumask, 1); | 994 | on_each_cpu(reset_ctrl_regs, NULL, 1); |
934 | if (!cpumask_empty(&cpumask)) { | 995 | unregister_undef_hook(&debug_reg_hook); |
996 | if (!cpumask_empty(&debug_err_mask)) { | ||
935 | core_num_brps = 0; | 997 | core_num_brps = 0; |
936 | core_num_reserved_brps = 0; | ||
937 | core_num_wrps = 0; | 998 | core_num_wrps = 0; |
938 | return 0; | 999 | return 0; |
939 | } | 1000 | } |
940 | 1001 | ||
1002 | pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", | ||
1003 | core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : | ||
1004 | "", core_num_wrps); | ||
1005 | |||
941 | ARM_DBG_READ(c1, 0, dscr); | 1006 | ARM_DBG_READ(c1, 0, dscr); |
942 | if (dscr & ARM_DSCR_HDBGEN) { | 1007 | if (dscr & ARM_DSCR_HDBGEN) { |
943 | max_watchpoint_len = 4; | 1008 | max_watchpoint_len = 4; |