aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-12-18 09:27:55 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-12-18 09:27:55 -0500
commit2f841ed13b9f10037e25ddf417d01700ecd886d0 (patch)
tree123448d98b3be03ac90fbb6e32f224235063c8bf /arch/arm/kernel
parent961ec6daa7b14f376c30d447a830fa4783a2112c (diff)
parent8fbf397c3389c1dedfa9ee412715046ab28fd82d (diff)
Merge branch 'hw-breakpoint' of git://repo.or.cz/linux-2.6/linux-wd into devel-stable
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-armv.S4
-rw-r--r--arch/arm/kernel/entry-header.S19
-rw-r--r--arch/arm/kernel/hw_breakpoint.c543
-rw-r--r--arch/arm/kernel/ptrace.c4
4 files changed, 344 insertions, 226 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index c09e3573c5d..34bbef0d2e7 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -198,6 +198,7 @@ __dabt_svc:
198 @ 198 @
199 @ set desired IRQ state, then call main handler 199 @ set desired IRQ state, then call main handler
200 @ 200 @
201 debug_entry r1
201 msr cpsr_c, r9 202 msr cpsr_c, r9
202 mov r2, sp 203 mov r2, sp
203 bl do_DataAbort 204 bl do_DataAbort
@@ -324,6 +325,7 @@ __pabt_svc:
324#else 325#else
325 bl CPU_PABORT_HANDLER 326 bl CPU_PABORT_HANDLER
326#endif 327#endif
328 debug_entry r1
327 msr cpsr_c, r9 @ Maybe enable interrupts 329 msr cpsr_c, r9 @ Maybe enable interrupts
328 mov r2, sp @ regs 330 mov r2, sp @ regs
329 bl do_PrefetchAbort @ call abort handler 331 bl do_PrefetchAbort @ call abort handler
@@ -439,6 +441,7 @@ __dabt_usr:
439 @ 441 @
440 @ IRQs on, then call the main handler 442 @ IRQs on, then call the main handler
441 @ 443 @
444 debug_entry r1
442 enable_irq 445 enable_irq
443 mov r2, sp 446 mov r2, sp
444 adr lr, BSYM(ret_from_exception) 447 adr lr, BSYM(ret_from_exception)
@@ -703,6 +706,7 @@ __pabt_usr:
703#else 706#else
704 bl CPU_PABORT_HANDLER 707 bl CPU_PABORT_HANDLER
705#endif 708#endif
709 debug_entry r1
706 enable_irq @ Enable interrupts 710 enable_irq @ Enable interrupts
707 mov r2, sp @ regs 711 mov r2, sp @ regs
708 bl do_PrefetchAbort @ call abort handler 712 bl do_PrefetchAbort @ call abort handler
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index d93f976fb38..ae946490016 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -165,6 +165,25 @@
165 .endm 165 .endm
166#endif /* !CONFIG_THUMB2_KERNEL */ 166#endif /* !CONFIG_THUMB2_KERNEL */
167 167
168 @
169 @ Debug exceptions are taken as prefetch or data aborts.
170 @ We must disable preemption during the handler so that
171 @ we can access the debug registers safely.
172 @
173 .macro debug_entry, fsr
174#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
175 ldr r4, =0x40f @ mask out fsr.fs
176 and r5, r4, \fsr
177 cmp r5, #2 @ debug exception
178 bne 1f
179 get_thread_info r10
180 ldr r6, [r10, #TI_PREEMPT] @ get preempt count
181 add r11, r6, #1 @ increment it
182 str r11, [r10, #TI_PREEMPT]
1831:
184#endif
185 .endm
186
168/* 187/*
169 * These are the registers used in the syscall handler, and allow us to 188 * These are the registers used in the syscall handler, and allow us to
170 * have in theory up to 7 arguments to a function - r0 to r6. 189 * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 21e3a4ab3b8..c9f3f046757 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -24,6 +24,7 @@
24#define pr_fmt(fmt) "hw-breakpoint: " fmt 24#define pr_fmt(fmt) "hw-breakpoint: " fmt
25 25
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/hardirq.h>
27#include <linux/perf_event.h> 28#include <linux/perf_event.h>
28#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
29#include <linux/smp.h> 30#include <linux/smp.h>
@@ -44,6 +45,7 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
44 45
45/* Number of BRP/WRP registers on this CPU. */ 46/* Number of BRP/WRP registers on this CPU. */
46static int core_num_brps; 47static int core_num_brps;
48static int core_num_reserved_brps;
47static int core_num_wrps; 49static int core_num_wrps;
48 50
49/* Debug architecture version. */ 51/* Debug architecture version. */
@@ -52,87 +54,6 @@ static u8 debug_arch;
52/* Maximum supported watchpoint length. */ 54/* Maximum supported watchpoint length. */
53static u8 max_watchpoint_len; 55static u8 max_watchpoint_len;
54 56
55/* Determine number of BRP registers available. */
56static int get_num_brps(void)
57{
58 u32 didr;
59 ARM_DBG_READ(c0, 0, didr);
60 return ((didr >> 24) & 0xf) + 1;
61}
62
63/* Determine number of WRP registers available. */
64static int get_num_wrps(void)
65{
66 /*
67 * FIXME: When a watchpoint fires, the only way to work out which
68 * watchpoint it was is by disassembling the faulting instruction
69 * and working out the address of the memory access.
70 *
71 * Furthermore, we can only do this if the watchpoint was precise
72 * since imprecise watchpoints prevent us from calculating register
73 * based addresses.
74 *
75 * For the time being, we only report 1 watchpoint register so we
76 * always know which watchpoint fired. In the future we can either
77 * add a disassembler and address generation emulator, or we can
78 * insert a check to see if the DFAR is set on watchpoint exception
79 * entry [the ARM ARM states that the DFAR is UNKNOWN, but
80 * experience shows that it is set on some implementations].
81 */
82
83#if 0
84 u32 didr, wrps;
85 ARM_DBG_READ(c0, 0, didr);
86 return ((didr >> 28) & 0xf) + 1;
87#endif
88
89 return 1;
90}
91
92int hw_breakpoint_slots(int type)
93{
94 /*
95 * We can be called early, so don't rely on
96 * our static variables being initialised.
97 */
98 switch (type) {
99 case TYPE_INST:
100 return get_num_brps();
101 case TYPE_DATA:
102 return get_num_wrps();
103 default:
104 pr_warning("unknown slot type: %d\n", type);
105 return 0;
106 }
107}
108
109/* Determine debug architecture. */
110static u8 get_debug_arch(void)
111{
112 u32 didr;
113
114 /* Do we implement the extended CPUID interface? */
115 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
116 pr_warning("CPUID feature registers not supported. "
117 "Assuming v6 debug is present.\n");
118 return ARM_DEBUG_ARCH_V6;
119 }
120
121 ARM_DBG_READ(c0, 0, didr);
122 return (didr >> 16) & 0xf;
123}
124
125/* Does this core support mismatch breakpoints? */
126static int core_has_mismatch_bps(void)
127{
128 return debug_arch >= ARM_DEBUG_ARCH_V7_ECP14 && core_num_brps > 1;
129}
130
131u8 arch_get_debug_arch(void)
132{
133 return debug_arch;
134}
135
136#define READ_WB_REG_CASE(OP2, M, VAL) \ 57#define READ_WB_REG_CASE(OP2, M, VAL) \
137 case ((OP2 << 4) + M): \ 58 case ((OP2 << 4) + M): \
138 ARM_DBG_READ(c ## M, OP2, VAL); \ 59 ARM_DBG_READ(c ## M, OP2, VAL); \
@@ -210,6 +131,94 @@ static void write_wb_reg(int n, u32 val)
210 isb(); 131 isb();
211} 132}
212 133
134/* Determine debug architecture. */
135static u8 get_debug_arch(void)
136{
137 u32 didr;
138
139 /* Do we implement the extended CPUID interface? */
140 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
141 pr_warning("CPUID feature registers not supported. "
142 "Assuming v6 debug is present.\n");
143 return ARM_DEBUG_ARCH_V6;
144 }
145
146 ARM_DBG_READ(c0, 0, didr);
147 return (didr >> 16) & 0xf;
148}
149
150u8 arch_get_debug_arch(void)
151{
152 return debug_arch;
153}
154
155/* Determine number of BRP register available. */
156static int get_num_brp_resources(void)
157{
158 u32 didr;
159 ARM_DBG_READ(c0, 0, didr);
160 return ((didr >> 24) & 0xf) + 1;
161}
162
163/* Does this core support mismatch breakpoints? */
164static int core_has_mismatch_brps(void)
165{
166 return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
167 get_num_brp_resources() > 1);
168}
169
170/* Determine number of usable WRPs available. */
171static int get_num_wrps(void)
172{
173 /*
174 * FIXME: When a watchpoint fires, the only way to work out which
175 * watchpoint it was is by disassembling the faulting instruction
176 * and working out the address of the memory access.
177 *
178 * Furthermore, we can only do this if the watchpoint was precise
179 * since imprecise watchpoints prevent us from calculating register
180 * based addresses.
181 *
182 * Providing we have more than 1 breakpoint register, we only report
183 * a single watchpoint register for the time being. This way, we always
184 * know which watchpoint fired. In the future we can either add a
185 * disassembler and address generation emulator, or we can insert a
186 * check to see if the DFAR is set on watchpoint exception entry
187 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
188 * that it is set on some implementations].
189 */
190
191#if 0
192 int wrps;
193 u32 didr;
194 ARM_DBG_READ(c0, 0, didr);
195 wrps = ((didr >> 28) & 0xf) + 1;
196#endif
197 int wrps = 1;
198
199 if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
200 wrps = get_num_brp_resources() - 1;
201
202 return wrps;
203}
204
205/* We reserve one breakpoint for each watchpoint. */
206static int get_num_reserved_brps(void)
207{
208 if (core_has_mismatch_brps())
209 return get_num_wrps();
210 return 0;
211}
212
213/* Determine number of usable BRPs available. */
214static int get_num_brps(void)
215{
216 int brps = get_num_brp_resources();
217 if (core_has_mismatch_brps())
218 brps -= get_num_reserved_brps();
219 return brps;
220}
221
213/* 222/*
214 * In order to access the breakpoint/watchpoint control registers, 223 * In order to access the breakpoint/watchpoint control registers,
215 * we must be running in debug monitor mode. Unfortunately, we can 224 * we must be running in debug monitor mode. Unfortunately, we can
@@ -230,8 +239,12 @@ static int enable_monitor_mode(void)
230 goto out; 239 goto out;
231 } 240 }
232 241
242 /* If monitor mode is already enabled, just return. */
243 if (dscr & ARM_DSCR_MDBGEN)
244 goto out;
245
233 /* Write to the corresponding DSCR. */ 246 /* Write to the corresponding DSCR. */
234 switch (debug_arch) { 247 switch (get_debug_arch()) {
235 case ARM_DEBUG_ARCH_V6: 248 case ARM_DEBUG_ARCH_V6:
236 case ARM_DEBUG_ARCH_V6_1: 249 case ARM_DEBUG_ARCH_V6_1:
237 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 250 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
@@ -246,15 +259,30 @@ static int enable_monitor_mode(void)
246 259
247 /* Check that the write made it through. */ 260 /* Check that the write made it through. */
248 ARM_DBG_READ(c1, 0, dscr); 261 ARM_DBG_READ(c1, 0, dscr);
249 if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN), 262 if (!(dscr & ARM_DSCR_MDBGEN))
250 "failed to enable monitor mode.")) {
251 ret = -EPERM; 263 ret = -EPERM;
252 }
253 264
254out: 265out:
255 return ret; 266 return ret;
256} 267}
257 268
269int hw_breakpoint_slots(int type)
270{
271 /*
272 * We can be called early, so don't rely on
273 * our static variables being initialised.
274 */
275 switch (type) {
276 case TYPE_INST:
277 return get_num_brps();
278 case TYPE_DATA:
279 return get_num_wrps();
280 default:
281 pr_warning("unknown slot type: %d\n", type);
282 return 0;
283 }
284}
285
258/* 286/*
259 * Check if 8-bit byte-address select is available. 287 * Check if 8-bit byte-address select is available.
260 * This clobbers WRP 0. 288 * This clobbers WRP 0.
@@ -268,9 +296,6 @@ static u8 get_max_wp_len(void)
268 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) 296 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
269 goto out; 297 goto out;
270 298
271 if (enable_monitor_mode())
272 goto out;
273
274 memset(&ctrl, 0, sizeof(ctrl)); 299 memset(&ctrl, 0, sizeof(ctrl));
275 ctrl.len = ARM_BREAKPOINT_LEN_8; 300 ctrl.len = ARM_BREAKPOINT_LEN_8;
276 ctrl_reg = encode_ctrl_reg(ctrl); 301 ctrl_reg = encode_ctrl_reg(ctrl);
@@ -290,23 +315,6 @@ u8 arch_get_max_wp_len(void)
290} 315}
291 316
292/* 317/*
293 * Handler for reactivating a suspended watchpoint when the single
294 * step `mismatch' breakpoint is triggered.
295 */
296static void wp_single_step_handler(struct perf_event *bp, int unused,
297 struct perf_sample_data *data,
298 struct pt_regs *regs)
299{
300 perf_event_enable(counter_arch_bp(bp)->suspended_wp);
301 unregister_hw_breakpoint(bp);
302}
303
304static int bp_is_single_step(struct perf_event *bp)
305{
306 return bp->overflow_handler == wp_single_step_handler;
307}
308
309/*
310 * Install a perf counter breakpoint. 318 * Install a perf counter breakpoint.
311 */ 319 */
312int arch_install_hw_breakpoint(struct perf_event *bp) 320int arch_install_hw_breakpoint(struct perf_event *bp)
@@ -314,30 +322,41 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
314 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 322 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
315 struct perf_event **slot, **slots; 323 struct perf_event **slot, **slots;
316 int i, max_slots, ctrl_base, val_base, ret = 0; 324 int i, max_slots, ctrl_base, val_base, ret = 0;
325 u32 addr, ctrl;
317 326
318 /* Ensure that we are in monitor mode and halting mode is disabled. */ 327 /* Ensure that we are in monitor mode and halting mode is disabled. */
319 ret = enable_monitor_mode(); 328 ret = enable_monitor_mode();
320 if (ret) 329 if (ret)
321 goto out; 330 goto out;
322 331
332 addr = info->address;
333 ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
334
323 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 335 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
324 /* Breakpoint */ 336 /* Breakpoint */
325 ctrl_base = ARM_BASE_BCR; 337 ctrl_base = ARM_BASE_BCR;
326 val_base = ARM_BASE_BVR; 338 val_base = ARM_BASE_BVR;
327 slots = __get_cpu_var(bp_on_reg); 339 slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
328 max_slots = core_num_brps - 1; 340 max_slots = core_num_brps;
329 341 if (info->step_ctrl.enabled) {
330 if (bp_is_single_step(bp)) { 342 /* Override the breakpoint data with the step data. */
331 info->ctrl.mismatch = 1; 343 addr = info->trigger & ~0x3;
332 i = max_slots; 344 ctrl = encode_ctrl_reg(info->step_ctrl);
333 slots[i] = bp;
334 goto setup;
335 } 345 }
336 } else { 346 } else {
337 /* Watchpoint */ 347 /* Watchpoint */
338 ctrl_base = ARM_BASE_WCR; 348 if (info->step_ctrl.enabled) {
339 val_base = ARM_BASE_WVR; 349 /* Install into the reserved breakpoint region. */
340 slots = __get_cpu_var(wp_on_reg); 350 ctrl_base = ARM_BASE_BCR + core_num_brps;
351 val_base = ARM_BASE_BVR + core_num_brps;
352 /* Override the watchpoint data with the step data. */
353 addr = info->trigger & ~0x3;
354 ctrl = encode_ctrl_reg(info->step_ctrl);
355 } else {
356 ctrl_base = ARM_BASE_WCR;
357 val_base = ARM_BASE_WVR;
358 }
359 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
341 max_slots = core_num_wrps; 360 max_slots = core_num_wrps;
342 } 361 }
343 362
@@ -355,12 +374,11 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
355 goto out; 374 goto out;
356 } 375 }
357 376
358setup:
359 /* Setup the address register. */ 377 /* Setup the address register. */
360 write_wb_reg(val_base + i, info->address); 378 write_wb_reg(val_base + i, addr);
361 379
362 /* Setup the control register. */ 380 /* Setup the control register. */
363 write_wb_reg(ctrl_base + i, encode_ctrl_reg(info->ctrl) | 0x1); 381 write_wb_reg(ctrl_base + i, ctrl);
364 382
365out: 383out:
366 return ret; 384 return ret;
@@ -375,18 +393,15 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
375 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 393 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
376 /* Breakpoint */ 394 /* Breakpoint */
377 base = ARM_BASE_BCR; 395 base = ARM_BASE_BCR;
378 slots = __get_cpu_var(bp_on_reg); 396 slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
379 max_slots = core_num_brps - 1; 397 max_slots = core_num_brps;
380
381 if (bp_is_single_step(bp)) {
382 i = max_slots;
383 slots[i] = NULL;
384 goto reset;
385 }
386 } else { 398 } else {
387 /* Watchpoint */ 399 /* Watchpoint */
388 base = ARM_BASE_WCR; 400 if (info->step_ctrl.enabled)
389 slots = __get_cpu_var(wp_on_reg); 401 base = ARM_BASE_BCR + core_num_brps;
402 else
403 base = ARM_BASE_WCR;
404 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
390 max_slots = core_num_wrps; 405 max_slots = core_num_wrps;
391 } 406 }
392 407
@@ -403,7 +418,6 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
403 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) 418 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
404 return; 419 return;
405 420
406reset:
407 /* Reset the control register. */ 421 /* Reset the control register. */
408 write_wb_reg(base + i, 0); 422 write_wb_reg(base + i, 0);
409} 423}
@@ -537,12 +551,23 @@ static int arch_build_bp_info(struct perf_event *bp)
537 return -EINVAL; 551 return -EINVAL;
538 } 552 }
539 553
554 /*
555 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
556 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
557 * by the hardware and must be aligned to the appropriate number of
558 * bytes.
559 */
560 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
561 info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
562 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
563 return -EINVAL;
564
540 /* Address */ 565 /* Address */
541 info->address = bp->attr.bp_addr; 566 info->address = bp->attr.bp_addr;
542 567
543 /* Privilege */ 568 /* Privilege */
544 info->ctrl.privilege = ARM_BREAKPOINT_USER; 569 info->ctrl.privilege = ARM_BREAKPOINT_USER;
545 if (arch_check_bp_in_kernelspace(bp) && !bp_is_single_step(bp)) 570 if (arch_check_bp_in_kernelspace(bp))
546 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; 571 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
547 572
548 /* Enabled? */ 573 /* Enabled? */
@@ -561,7 +586,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
561{ 586{
562 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 587 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
563 int ret = 0; 588 int ret = 0;
564 u32 bytelen, max_len, offset, alignment_mask = 0x3; 589 u32 offset, alignment_mask = 0x3;
565 590
566 /* Build the arch_hw_breakpoint. */ 591 /* Build the arch_hw_breakpoint. */
567 ret = arch_build_bp_info(bp); 592 ret = arch_build_bp_info(bp);
@@ -571,84 +596,85 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
571 /* Check address alignment. */ 596 /* Check address alignment. */
572 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 597 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
573 alignment_mask = 0x7; 598 alignment_mask = 0x7;
574 if (info->address & alignment_mask) { 599 offset = info->address & alignment_mask;
575 /* 600 switch (offset) {
576 * Try to fix the alignment. This may result in a length 601 case 0:
577 * that is too large, so we must check for that. 602 /* Aligned */
578 */ 603 break;
579 bytelen = get_hbp_len(info->ctrl.len); 604 case 1:
580 max_len = info->ctrl.type == ARM_BREAKPOINT_EXECUTE ? 4 : 605 /* Allow single byte watchpoint. */
581 max_watchpoint_len; 606 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
582 607 break;
583 if (max_len >= 8) 608 case 2:
584 offset = info->address & 0x7; 609 /* Allow halfword watchpoints and breakpoints. */
585 else 610 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
586 offset = info->address & 0x3; 611 break;
587 612 default:
588 if (bytelen > (1 << ((max_len - (offset + 1)) >> 1))) { 613 ret = -EINVAL;
589 ret = -EFBIG; 614 goto out;
590 goto out;
591 }
592
593 info->ctrl.len <<= offset;
594 info->address &= ~offset;
595
596 pr_debug("breakpoint alignment fixup: length = 0x%x, "
597 "address = 0x%x\n", info->ctrl.len, info->address);
598 } 615 }
599 616
617 info->address &= ~alignment_mask;
618 info->ctrl.len <<= offset;
619
600 /* 620 /*
601 * Currently we rely on an overflow handler to take 621 * Currently we rely on an overflow handler to take
602 * care of single-stepping the breakpoint when it fires. 622 * care of single-stepping the breakpoint when it fires.
603 * In the case of userspace breakpoints on a core with V7 debug, 623 * In the case of userspace breakpoints on a core with V7 debug,
604 * we can use the mismatch feature as a poor-man's hardware single-step. 624 * we can use the mismatch feature as a poor-man's hardware
625 * single-step, but this only works for per-task breakpoints.
605 */ 626 */
606 if (WARN_ONCE(!bp->overflow_handler && 627 if (WARN_ONCE(!bp->overflow_handler &&
607 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_bps()), 628 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
629 || !bp->hw.bp_target),
608 "overflow handler required but none found")) { 630 "overflow handler required but none found")) {
609 ret = -EINVAL; 631 ret = -EINVAL;
610 goto out;
611 } 632 }
612out: 633out:
613 return ret; 634 return ret;
614} 635}
615 636
616static void update_mismatch_flag(int idx, int flag) 637/*
638 * Enable/disable single-stepping over the breakpoint bp at address addr.
639 */
640static void enable_single_step(struct perf_event *bp, u32 addr)
617{ 641{
618 struct perf_event *bp = __get_cpu_var(bp_on_reg[idx]); 642 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
619 struct arch_hw_breakpoint *info;
620
621 if (bp == NULL)
622 return;
623 643
624 info = counter_arch_bp(bp); 644 arch_uninstall_hw_breakpoint(bp);
645 info->step_ctrl.mismatch = 1;
646 info->step_ctrl.len = ARM_BREAKPOINT_LEN_4;
647 info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE;
648 info->step_ctrl.privilege = info->ctrl.privilege;
649 info->step_ctrl.enabled = 1;
650 info->trigger = addr;
651 arch_install_hw_breakpoint(bp);
652}
625 653
626 /* Update the mismatch field to enter/exit `single-step' mode */ 654static void disable_single_step(struct perf_event *bp)
627 if (!bp->overflow_handler && info->ctrl.mismatch != flag) { 655{
628 info->ctrl.mismatch = flag; 656 arch_uninstall_hw_breakpoint(bp);
629 write_wb_reg(ARM_BASE_BCR + idx, encode_ctrl_reg(info->ctrl) | 0x1); 657 counter_arch_bp(bp)->step_ctrl.enabled = 0;
630 } 658 arch_install_hw_breakpoint(bp);
631} 659}
632 660
633static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) 661static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
634{ 662{
635 int i; 663 int i;
636 struct perf_event *bp, **slots = __get_cpu_var(wp_on_reg); 664 struct perf_event *wp, **slots;
637 struct arch_hw_breakpoint *info; 665 struct arch_hw_breakpoint *info;
638 struct perf_event_attr attr; 666
667 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
639 668
640 /* Without a disassembler, we can only handle 1 watchpoint. */ 669 /* Without a disassembler, we can only handle 1 watchpoint. */
641 BUG_ON(core_num_wrps > 1); 670 BUG_ON(core_num_wrps > 1);
642 671
643 hw_breakpoint_init(&attr);
644 attr.bp_addr = regs->ARM_pc & ~0x3;
645 attr.bp_len = HW_BREAKPOINT_LEN_4;
646 attr.bp_type = HW_BREAKPOINT_X;
647
648 for (i = 0; i < core_num_wrps; ++i) { 672 for (i = 0; i < core_num_wrps; ++i) {
649 rcu_read_lock(); 673 rcu_read_lock();
650 674
651 if (slots[i] == NULL) { 675 wp = slots[i];
676
677 if (wp == NULL) {
652 rcu_read_unlock(); 678 rcu_read_unlock();
653 continue; 679 continue;
654 } 680 }
@@ -658,24 +684,51 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
658 * single watchpoint, we can set the trigger to the lowest 684 * single watchpoint, we can set the trigger to the lowest
659 * possible faulting address. 685 * possible faulting address.
660 */ 686 */
661 info = counter_arch_bp(slots[i]); 687 info = counter_arch_bp(wp);
662 info->trigger = slots[i]->attr.bp_addr; 688 info->trigger = wp->attr.bp_addr;
663 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 689 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
664 perf_bp_event(slots[i], regs); 690 perf_bp_event(wp, regs);
665 691
666 /* 692 /*
667 * If no overflow handler is present, insert a temporary 693 * If no overflow handler is present, insert a temporary
668 * mismatch breakpoint so we can single-step over the 694 * mismatch breakpoint so we can single-step over the
669 * watchpoint trigger. 695 * watchpoint trigger.
670 */ 696 */
671 if (!slots[i]->overflow_handler) { 697 if (!wp->overflow_handler)
672 bp = register_user_hw_breakpoint(&attr, 698 enable_single_step(wp, instruction_pointer(regs));
673 wp_single_step_handler, 699
674 current); 700 rcu_read_unlock();
675 counter_arch_bp(bp)->suspended_wp = slots[i]; 701 }
676 perf_event_disable(slots[i]); 702}
677 }
678 703
704static void watchpoint_single_step_handler(unsigned long pc)
705{
706 int i;
707 struct perf_event *wp, **slots;
708 struct arch_hw_breakpoint *info;
709
710 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
711
712 for (i = 0; i < core_num_reserved_brps; ++i) {
713 rcu_read_lock();
714
715 wp = slots[i];
716
717 if (wp == NULL)
718 goto unlock;
719
720 info = counter_arch_bp(wp);
721 if (!info->step_ctrl.enabled)
722 goto unlock;
723
724 /*
725 * Restore the original watchpoint if we've completed the
726 * single-step.
727 */
728 if (info->trigger != pc)
729 disable_single_step(wp);
730
731unlock:
679 rcu_read_unlock(); 732 rcu_read_unlock();
680 } 733 }
681} 734}
@@ -683,62 +736,69 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
683static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) 736static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
684{ 737{
685 int i; 738 int i;
686 int mismatch;
687 u32 ctrl_reg, val, addr; 739 u32 ctrl_reg, val, addr;
688 struct perf_event *bp, **slots = __get_cpu_var(bp_on_reg); 740 struct perf_event *bp, **slots;
689 struct arch_hw_breakpoint *info; 741 struct arch_hw_breakpoint *info;
690 struct arch_hw_breakpoint_ctrl ctrl; 742 struct arch_hw_breakpoint_ctrl ctrl;
691 743
744 slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
745
692 /* The exception entry code places the amended lr in the PC. */ 746 /* The exception entry code places the amended lr in the PC. */
693 addr = regs->ARM_pc; 747 addr = regs->ARM_pc;
694 748
749 /* Check the currently installed breakpoints first. */
695 for (i = 0; i < core_num_brps; ++i) { 750 for (i = 0; i < core_num_brps; ++i) {
696 rcu_read_lock(); 751 rcu_read_lock();
697 752
698 bp = slots[i]; 753 bp = slots[i];
699 754
700 if (bp == NULL) { 755 if (bp == NULL)
701 rcu_read_unlock(); 756 goto unlock;
702 continue;
703 }
704 757
705 mismatch = 0; 758 info = counter_arch_bp(bp);
706 759
707 /* Check if the breakpoint value matches. */ 760 /* Check if the breakpoint value matches. */
708 val = read_wb_reg(ARM_BASE_BVR + i); 761 val = read_wb_reg(ARM_BASE_BVR + i);
709 if (val != (addr & ~0x3)) 762 if (val != (addr & ~0x3))
710 goto unlock; 763 goto mismatch;
711 764
712 /* Possible match, check the byte address select to confirm. */ 765 /* Possible match, check the byte address select to confirm. */
713 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); 766 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
714 decode_ctrl_reg(ctrl_reg, &ctrl); 767 decode_ctrl_reg(ctrl_reg, &ctrl);
715 if ((1 << (addr & 0x3)) & ctrl.len) { 768 if ((1 << (addr & 0x3)) & ctrl.len) {
716 mismatch = 1;
717 info = counter_arch_bp(bp);
718 info->trigger = addr; 769 info->trigger = addr;
719 }
720
721unlock:
722 if ((mismatch && !info->ctrl.mismatch) || bp_is_single_step(bp)) {
723 pr_debug("breakpoint fired: address = 0x%x\n", addr); 770 pr_debug("breakpoint fired: address = 0x%x\n", addr);
724 perf_bp_event(bp, regs); 771 perf_bp_event(bp, regs);
772 if (!bp->overflow_handler)
773 enable_single_step(bp, addr);
774 goto unlock;
725 } 775 }
726 776
727 update_mismatch_flag(i, mismatch); 777mismatch:
778 /* If we're stepping a breakpoint, it can now be restored. */
779 if (info->step_ctrl.enabled)
780 disable_single_step(bp);
781unlock:
728 rcu_read_unlock(); 782 rcu_read_unlock();
729 } 783 }
784
785 /* Handle any pending watchpoint single-step breakpoints. */
786 watchpoint_single_step_handler(addr);
730} 787}
731 788
732/* 789/*
733 * Called from either the Data Abort Handler [watchpoint] or the 790 * Called from either the Data Abort Handler [watchpoint] or the
734 * Prefetch Abort Handler [breakpoint]. 791 * Prefetch Abort Handler [breakpoint] with preemption disabled.
735 */ 792 */
736static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 793static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
737 struct pt_regs *regs) 794 struct pt_regs *regs)
738{ 795{
739 int ret = 1; /* Unhandled fault. */ 796 int ret = 0;
740 u32 dscr; 797 u32 dscr;
741 798
799 /* We must be called with preemption disabled. */
800 WARN_ON(preemptible());
801
742 /* We only handle watchpoints and hardware breakpoints. */ 802 /* We only handle watchpoints and hardware breakpoints. */
743 ARM_DBG_READ(c1, 0, dscr); 803 ARM_DBG_READ(c1, 0, dscr);
744 804
@@ -753,25 +813,47 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
753 watchpoint_handler(addr, regs); 813 watchpoint_handler(addr, regs);
754 break; 814 break;
755 default: 815 default:
756 goto out; 816 ret = 1; /* Unhandled fault. */
757 } 817 }
758 818
759 ret = 0; 819 /*
760out: 820 * Re-enable preemption after it was disabled in the
821 * low-level exception handling code.
822 */
823 preempt_enable();
824
761 return ret; 825 return ret;
762} 826}
763 827
764/* 828/*
765 * One-time initialisation. 829 * One-time initialisation.
766 */ 830 */
767static void __init reset_ctrl_regs(void *unused) 831static void reset_ctrl_regs(void *unused)
768{ 832{
769 int i; 833 int i;
770 834
835 /*
836 * v7 debug contains save and restore registers so that debug state
837 * can be maintained across low-power modes without leaving
838 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
839 * we can write to the debug registers out of reset, so we must
840 * unlock the OS Lock Access Register to avoid taking undefined
841 * instruction exceptions later on.
842 */
843 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
844 /*
845 * Unconditionally clear the lock by writing a value
846 * other than 0xC5ACCE55 to the access register.
847 */
848 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
849 isb();
850 }
851
771 if (enable_monitor_mode()) 852 if (enable_monitor_mode())
772 return; 853 return;
773 854
774 for (i = 0; i < core_num_brps; ++i) { 855 /* We must also reset any reserved registers. */
856 for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
775 write_wb_reg(ARM_BASE_BCR + i, 0UL); 857 write_wb_reg(ARM_BASE_BCR + i, 0UL);
776 write_wb_reg(ARM_BASE_BVR + i, 0UL); 858 write_wb_reg(ARM_BASE_BVR + i, 0UL);
777 } 859 }
@@ -782,45 +864,57 @@ static void __init reset_ctrl_regs(void *unused)
782 } 864 }
783} 865}
784 866
867static int __cpuinit dbg_reset_notify(struct notifier_block *self,
868 unsigned long action, void *cpu)
869{
870 if (action == CPU_ONLINE)
871 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
872 return NOTIFY_OK;
873}
874
875static struct notifier_block __cpuinitdata dbg_reset_nb = {
876 .notifier_call = dbg_reset_notify,
877};
878
785static int __init arch_hw_breakpoint_init(void) 879static int __init arch_hw_breakpoint_init(void)
786{ 880{
787 int ret = 0;
788 u32 dscr; 881 u32 dscr;
789 882
790 debug_arch = get_debug_arch(); 883 debug_arch = get_debug_arch();
791 884
792 if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { 885 if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
793 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 886 pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
794 ret = -ENODEV; 887 return 0;
795 goto out;
796 } 888 }
797 889
798 /* Determine how many BRPs/WRPs are available. */ 890 /* Determine how many BRPs/WRPs are available. */
799 core_num_brps = get_num_brps(); 891 core_num_brps = get_num_brps();
892 core_num_reserved_brps = get_num_reserved_brps();
800 core_num_wrps = get_num_wrps(); 893 core_num_wrps = get_num_wrps();
801 894
802 pr_info("found %d breakpoint and %d watchpoint registers.\n", 895 pr_info("found %d breakpoint and %d watchpoint registers.\n",
803 core_num_brps, core_num_wrps); 896 core_num_brps + core_num_reserved_brps, core_num_wrps);
804 897
805 if (core_has_mismatch_bps()) 898 if (core_num_reserved_brps)
806 pr_info("1 breakpoint reserved for watchpoint single-step.\n"); 899 pr_info("%d breakpoint(s) reserved for watchpoint "
900 "single-step.\n", core_num_reserved_brps);
807 901
808 ARM_DBG_READ(c1, 0, dscr); 902 ARM_DBG_READ(c1, 0, dscr);
809 if (dscr & ARM_DSCR_HDBGEN) { 903 if (dscr & ARM_DSCR_HDBGEN) {
810 pr_warning("halting debug mode enabled. Assuming maximum " 904 pr_warning("halting debug mode enabled. Assuming maximum "
811 "watchpoint size of 4 bytes."); 905 "watchpoint size of 4 bytes.");
812 } else { 906 } else {
813 /* Work out the maximum supported watchpoint length. */
814 max_watchpoint_len = get_max_wp_len();
815 pr_info("maximum watchpoint size is %u bytes.\n",
816 max_watchpoint_len);
817
818 /* 907 /*
819 * Reset the breakpoint resources. We assume that a halting 908 * Reset the breakpoint resources. We assume that a halting
820 * debugger will leave the world in a nice state for us. 909 * debugger will leave the world in a nice state for us.
821 */ 910 */
822 smp_call_function(reset_ctrl_regs, NULL, 1); 911 smp_call_function(reset_ctrl_regs, NULL, 1);
823 reset_ctrl_regs(NULL); 912 reset_ctrl_regs(NULL);
913
914 /* Work out the maximum supported watchpoint length. */
915 max_watchpoint_len = get_max_wp_len();
916 pr_info("maximum watchpoint size is %u bytes.\n",
917 max_watchpoint_len);
824 } 918 }
825 919
826 /* Register debug fault handler. */ 920 /* Register debug fault handler. */
@@ -829,8 +923,9 @@ static int __init arch_hw_breakpoint_init(void)
829 hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, 923 hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
830 "breakpoint debug exception"); 924 "breakpoint debug exception");
831 925
832out: 926 /* Register hotplug notifier. */
833 return ret; 927 register_cpu_notifier(&dbg_reset_nb);
928 return 0;
834} 929}
835arch_initcall(arch_hw_breakpoint_init); 930arch_initcall(arch_hw_breakpoint_init);
836 931
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 3e97483abcf..19c6816db61 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -1060,8 +1060,8 @@ static int ptrace_sethbpregs(struct task_struct *tsk, long num,
1060 goto out; 1060 goto out;
1061 1061
1062 if ((gen_type & implied_type) != gen_type) { 1062 if ((gen_type & implied_type) != gen_type) {
1063 ret = -EINVAL; 1063 ret = -EINVAL;
1064 goto out; 1064 goto out;
1065 } 1065 }
1066 1066
1067 attr.bp_len = gen_len; 1067 attr.bp_len = gen_len;