diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-01 13:45:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-01 13:45:00 -0500 |
commit | 834db333eda46246f6290f0e1a7525af04fc8cb4 (patch) | |
tree | 4809b5da82a76a9924cde7a2db8986636f8be727 | |
parent | 8ea85c2817301adb986b3b86dc20414595b776be (diff) | |
parent | 5352ae638e2d7d5c9b2e4d528676bbf2af6fd6f3 (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf, hw_breakpoint, kgdb: Do not take mutex for kernel debugger
x86, hw_breakpoints, kgdb: Fix kgdb to use hw_breakpoint API
hw_breakpoints: Release the bp slot if arch_validate_hwbkpt_settings() fails.
perf: Ignore perf.data.old
perf report: Fix segmentation fault when running with '-g none'
-rw-r--r-- | arch/x86/kernel/kgdb.c | 222 | ||||
-rw-r--r-- | include/linux/hw_breakpoint.h | 2 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 56 | ||||
-rw-r--r-- | kernel/kgdb.c | 3 | ||||
-rw-r--r-- | tools/perf/.gitignore | 1 | ||||
-rw-r--r-- | tools/perf/builtin-report.c | 2 |
6 files changed, 218 insertions, 68 deletions
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index dd74fe7273b1..bfba6019d762 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/smp.h> | 43 | #include <linux/smp.h> |
44 | #include <linux/nmi.h> | 44 | #include <linux/nmi.h> |
45 | #include <linux/hw_breakpoint.h> | ||
45 | 46 | ||
46 | #include <asm/debugreg.h> | 47 | #include <asm/debugreg.h> |
47 | #include <asm/apicdef.h> | 48 | #include <asm/apicdef.h> |
@@ -204,40 +205,81 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
204 | 205 | ||
205 | static struct hw_breakpoint { | 206 | static struct hw_breakpoint { |
206 | unsigned enabled; | 207 | unsigned enabled; |
207 | unsigned type; | ||
208 | unsigned len; | ||
209 | unsigned long addr; | 208 | unsigned long addr; |
209 | int len; | ||
210 | int type; | ||
211 | struct perf_event **pev; | ||
210 | } breakinfo[4]; | 212 | } breakinfo[4]; |
211 | 213 | ||
212 | static void kgdb_correct_hw_break(void) | 214 | static void kgdb_correct_hw_break(void) |
213 | { | 215 | { |
214 | unsigned long dr7; | ||
215 | int correctit = 0; | ||
216 | int breakbit; | ||
217 | int breakno; | 216 | int breakno; |
218 | 217 | ||
219 | get_debugreg(dr7, 7); | ||
220 | for (breakno = 0; breakno < 4; breakno++) { | 218 | for (breakno = 0; breakno < 4; breakno++) { |
221 | breakbit = 2 << (breakno << 1); | 219 | struct perf_event *bp; |
222 | if (!(dr7 & breakbit) && breakinfo[breakno].enabled) { | 220 | struct arch_hw_breakpoint *info; |
223 | correctit = 1; | 221 | int val; |
224 | dr7 |= breakbit; | 222 | int cpu = raw_smp_processor_id(); |
225 | dr7 &= ~(0xf0000 << (breakno << 2)); | 223 | if (!breakinfo[breakno].enabled) |
226 | dr7 |= ((breakinfo[breakno].len << 2) | | 224 | continue; |
227 | breakinfo[breakno].type) << | 225 | bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); |
228 | ((breakno << 2) + 16); | 226 | info = counter_arch_bp(bp); |
229 | set_debugreg(breakinfo[breakno].addr, breakno); | 227 | if (bp->attr.disabled != 1) |
230 | 228 | continue; | |
231 | } else { | 229 | bp->attr.bp_addr = breakinfo[breakno].addr; |
232 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { | 230 | bp->attr.bp_len = breakinfo[breakno].len; |
233 | correctit = 1; | 231 | bp->attr.bp_type = breakinfo[breakno].type; |
234 | dr7 &= ~breakbit; | 232 | info->address = breakinfo[breakno].addr; |
235 | dr7 &= ~(0xf0000 << (breakno << 2)); | 233 | info->len = breakinfo[breakno].len; |
236 | } | 234 | info->type = breakinfo[breakno].type; |
237 | } | 235 | val = arch_install_hw_breakpoint(bp); |
236 | if (!val) | ||
237 | bp->attr.disabled = 0; | ||
238 | } | ||
239 | hw_breakpoint_restore(); | ||
240 | } | ||
241 | |||
242 | static int hw_break_reserve_slot(int breakno) | ||
243 | { | ||
244 | int cpu; | ||
245 | int cnt = 0; | ||
246 | struct perf_event **pevent; | ||
247 | |||
248 | for_each_online_cpu(cpu) { | ||
249 | cnt++; | ||
250 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
251 | if (dbg_reserve_bp_slot(*pevent)) | ||
252 | goto fail; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | |||
257 | fail: | ||
258 | for_each_online_cpu(cpu) { | ||
259 | cnt--; | ||
260 | if (!cnt) | ||
261 | break; | ||
262 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
263 | dbg_release_bp_slot(*pevent); | ||
238 | } | 264 | } |
239 | if (correctit) | 265 | return -1; |
240 | set_debugreg(dr7, 7); | 266 | } |
267 | |||
268 | static int hw_break_release_slot(int breakno) | ||
269 | { | ||
270 | struct perf_event **pevent; | ||
271 | int cpu; | ||
272 | |||
273 | for_each_online_cpu(cpu) { | ||
274 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
275 | if (dbg_release_bp_slot(*pevent)) | ||
276 | /* | ||
277 | * The debugger is responisble for handing the retry on | ||
278 | * remove failure. | ||
279 | */ | ||
280 | return -1; | ||
281 | } | ||
282 | return 0; | ||
241 | } | 283 | } |
242 | 284 | ||
243 | static int | 285 | static int |
@@ -251,6 +293,10 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
251 | if (i == 4) | 293 | if (i == 4) |
252 | return -1; | 294 | return -1; |
253 | 295 | ||
296 | if (hw_break_release_slot(i)) { | ||
297 | printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); | ||
298 | return -1; | ||
299 | } | ||
254 | breakinfo[i].enabled = 0; | 300 | breakinfo[i].enabled = 0; |
255 | 301 | ||
256 | return 0; | 302 | return 0; |
@@ -259,15 +305,23 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
259 | static void kgdb_remove_all_hw_break(void) | 305 | static void kgdb_remove_all_hw_break(void) |
260 | { | 306 | { |
261 | int i; | 307 | int i; |
308 | int cpu = raw_smp_processor_id(); | ||
309 | struct perf_event *bp; | ||
262 | 310 | ||
263 | for (i = 0; i < 4; i++) | 311 | for (i = 0; i < 4; i++) { |
264 | memset(&breakinfo[i], 0, sizeof(struct hw_breakpoint)); | 312 | if (!breakinfo[i].enabled) |
313 | continue; | ||
314 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | ||
315 | if (bp->attr.disabled == 1) | ||
316 | continue; | ||
317 | arch_uninstall_hw_breakpoint(bp); | ||
318 | bp->attr.disabled = 1; | ||
319 | } | ||
265 | } | 320 | } |
266 | 321 | ||
267 | static int | 322 | static int |
268 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | 323 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) |
269 | { | 324 | { |
270 | unsigned type; | ||
271 | int i; | 325 | int i; |
272 | 326 | ||
273 | for (i = 0; i < 4; i++) | 327 | for (i = 0; i < 4; i++) |
@@ -278,27 +332,42 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
278 | 332 | ||
279 | switch (bptype) { | 333 | switch (bptype) { |
280 | case BP_HARDWARE_BREAKPOINT: | 334 | case BP_HARDWARE_BREAKPOINT: |
281 | type = 0; | 335 | len = 1; |
282 | len = 1; | 336 | breakinfo[i].type = X86_BREAKPOINT_EXECUTE; |
283 | break; | 337 | break; |
284 | case BP_WRITE_WATCHPOINT: | 338 | case BP_WRITE_WATCHPOINT: |
285 | type = 1; | 339 | breakinfo[i].type = X86_BREAKPOINT_WRITE; |
286 | break; | 340 | break; |
287 | case BP_ACCESS_WATCHPOINT: | 341 | case BP_ACCESS_WATCHPOINT: |
288 | type = 3; | 342 | breakinfo[i].type = X86_BREAKPOINT_RW; |
289 | break; | 343 | break; |
290 | default: | 344 | default: |
291 | return -1; | 345 | return -1; |
292 | } | 346 | } |
293 | 347 | switch (len) { | |
294 | if (len == 1 || len == 2 || len == 4) | 348 | case 1: |
295 | breakinfo[i].len = len - 1; | 349 | breakinfo[i].len = X86_BREAKPOINT_LEN_1; |
296 | else | 350 | break; |
351 | case 2: | ||
352 | breakinfo[i].len = X86_BREAKPOINT_LEN_2; | ||
353 | break; | ||
354 | case 4: | ||
355 | breakinfo[i].len = X86_BREAKPOINT_LEN_4; | ||
356 | break; | ||
357 | #ifdef CONFIG_X86_64 | ||
358 | case 8: | ||
359 | breakinfo[i].len = X86_BREAKPOINT_LEN_8; | ||
360 | break; | ||
361 | #endif | ||
362 | default: | ||
297 | return -1; | 363 | return -1; |
298 | 364 | } | |
299 | breakinfo[i].enabled = 1; | ||
300 | breakinfo[i].addr = addr; | 365 | breakinfo[i].addr = addr; |
301 | breakinfo[i].type = type; | 366 | if (hw_break_reserve_slot(i)) { |
367 | breakinfo[i].addr = 0; | ||
368 | return -1; | ||
369 | } | ||
370 | breakinfo[i].enabled = 1; | ||
302 | 371 | ||
303 | return 0; | 372 | return 0; |
304 | } | 373 | } |
@@ -313,8 +382,21 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
313 | */ | 382 | */ |
314 | void kgdb_disable_hw_debug(struct pt_regs *regs) | 383 | void kgdb_disable_hw_debug(struct pt_regs *regs) |
315 | { | 384 | { |
385 | int i; | ||
386 | int cpu = raw_smp_processor_id(); | ||
387 | struct perf_event *bp; | ||
388 | |||
316 | /* Disable hardware debugging while we are in kgdb: */ | 389 | /* Disable hardware debugging while we are in kgdb: */ |
317 | set_debugreg(0UL, 7); | 390 | set_debugreg(0UL, 7); |
391 | for (i = 0; i < 4; i++) { | ||
392 | if (!breakinfo[i].enabled) | ||
393 | continue; | ||
394 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | ||
395 | if (bp->attr.disabled == 1) | ||
396 | continue; | ||
397 | arch_uninstall_hw_breakpoint(bp); | ||
398 | bp->attr.disabled = 1; | ||
399 | } | ||
318 | } | 400 | } |
319 | 401 | ||
320 | /** | 402 | /** |
@@ -378,7 +460,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
378 | struct pt_regs *linux_regs) | 460 | struct pt_regs *linux_regs) |
379 | { | 461 | { |
380 | unsigned long addr; | 462 | unsigned long addr; |
381 | unsigned long dr6; | ||
382 | char *ptr; | 463 | char *ptr; |
383 | int newPC; | 464 | int newPC; |
384 | 465 | ||
@@ -404,20 +485,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
404 | raw_smp_processor_id()); | 485 | raw_smp_processor_id()); |
405 | } | 486 | } |
406 | 487 | ||
407 | get_debugreg(dr6, 6); | ||
408 | if (!(dr6 & 0x4000)) { | ||
409 | int breakno; | ||
410 | |||
411 | for (breakno = 0; breakno < 4; breakno++) { | ||
412 | if (dr6 & (1 << breakno) && | ||
413 | breakinfo[breakno].type == 0) { | ||
414 | /* Set restore flag: */ | ||
415 | linux_regs->flags |= X86_EFLAGS_RF; | ||
416 | break; | ||
417 | } | ||
418 | } | ||
419 | } | ||
420 | set_debugreg(0UL, 6); | ||
421 | kgdb_correct_hw_break(); | 488 | kgdb_correct_hw_break(); |
422 | 489 | ||
423 | return 0; | 490 | return 0; |
@@ -485,8 +552,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
485 | break; | 552 | break; |
486 | 553 | ||
487 | case DIE_DEBUG: | 554 | case DIE_DEBUG: |
488 | if (atomic_read(&kgdb_cpu_doing_single_step) == | 555 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
489 | raw_smp_processor_id()) { | ||
490 | if (user_mode(regs)) | 556 | if (user_mode(regs)) |
491 | return single_step_cont(regs, args); | 557 | return single_step_cont(regs, args); |
492 | break; | 558 | break; |
@@ -539,7 +605,42 @@ static struct notifier_block kgdb_notifier = { | |||
539 | */ | 605 | */ |
540 | int kgdb_arch_init(void) | 606 | int kgdb_arch_init(void) |
541 | { | 607 | { |
542 | return register_die_notifier(&kgdb_notifier); | 608 | int i, cpu; |
609 | int ret; | ||
610 | struct perf_event_attr attr; | ||
611 | struct perf_event **pevent; | ||
612 | |||
613 | ret = register_die_notifier(&kgdb_notifier); | ||
614 | if (ret != 0) | ||
615 | return ret; | ||
616 | /* | ||
617 | * Pre-allocate the hw breakpoint structions in the non-atomic | ||
618 | * portion of kgdb because this operation requires mutexs to | ||
619 | * complete. | ||
620 | */ | ||
621 | attr.bp_addr = (unsigned long)kgdb_arch_init; | ||
622 | attr.type = PERF_TYPE_BREAKPOINT; | ||
623 | attr.bp_len = HW_BREAKPOINT_LEN_1; | ||
624 | attr.bp_type = HW_BREAKPOINT_W; | ||
625 | attr.disabled = 1; | ||
626 | for (i = 0; i < 4; i++) { | ||
627 | breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); | ||
628 | if (IS_ERR(breakinfo[i].pev)) { | ||
629 | printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n"); | ||
630 | breakinfo[i].pev = NULL; | ||
631 | kgdb_arch_exit(); | ||
632 | return -1; | ||
633 | } | ||
634 | for_each_online_cpu(cpu) { | ||
635 | pevent = per_cpu_ptr(breakinfo[i].pev, cpu); | ||
636 | pevent[0]->hw.sample_period = 1; | ||
637 | if (pevent[0]->destroy != NULL) { | ||
638 | pevent[0]->destroy = NULL; | ||
639 | release_bp_slot(*pevent); | ||
640 | } | ||
641 | } | ||
642 | } | ||
643 | return ret; | ||
543 | } | 644 | } |
544 | 645 | ||
545 | /** | 646 | /** |
@@ -550,6 +651,13 @@ int kgdb_arch_init(void) | |||
550 | */ | 651 | */ |
551 | void kgdb_arch_exit(void) | 652 | void kgdb_arch_exit(void) |
552 | { | 653 | { |
654 | int i; | ||
655 | for (i = 0; i < 4; i++) { | ||
656 | if (breakinfo[i].pev) { | ||
657 | unregister_wide_hw_breakpoint(breakinfo[i].pev); | ||
658 | breakinfo[i].pev = NULL; | ||
659 | } | ||
660 | } | ||
553 | unregister_die_notifier(&kgdb_notifier); | 661 | unregister_die_notifier(&kgdb_notifier); |
554 | } | 662 | } |
555 | 663 | ||
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 41235c93e4e9..070ba0621738 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h | |||
@@ -75,6 +75,8 @@ extern int __register_perf_hw_breakpoint(struct perf_event *bp); | |||
75 | extern void unregister_hw_breakpoint(struct perf_event *bp); | 75 | extern void unregister_hw_breakpoint(struct perf_event *bp); |
76 | extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); | 76 | extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); |
77 | 77 | ||
78 | extern int dbg_reserve_bp_slot(struct perf_event *bp); | ||
79 | extern int dbg_release_bp_slot(struct perf_event *bp); | ||
78 | extern int reserve_bp_slot(struct perf_event *bp); | 80 | extern int reserve_bp_slot(struct perf_event *bp); |
79 | extern void release_bp_slot(struct perf_event *bp); | 81 | extern void release_bp_slot(struct perf_event *bp); |
80 | 82 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 50dbd5999588..8a5c7d55ac9f 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM | 244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
245 | */ | 245 | */ |
246 | int reserve_bp_slot(struct perf_event *bp) | 246 | static int __reserve_bp_slot(struct perf_event *bp) |
247 | { | 247 | { |
248 | struct bp_busy_slots slots = {0}; | 248 | struct bp_busy_slots slots = {0}; |
249 | int ret = 0; | ||
250 | |||
251 | mutex_lock(&nr_bp_mutex); | ||
252 | 249 | ||
253 | fetch_bp_busy_slots(&slots, bp); | 250 | fetch_bp_busy_slots(&slots, bp); |
254 | 251 | ||
255 | /* Flexible counters need to keep at least one slot */ | 252 | /* Flexible counters need to keep at least one slot */ |
256 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 253 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) |
257 | ret = -ENOSPC; | 254 | return -ENOSPC; |
258 | goto end; | ||
259 | } | ||
260 | 255 | ||
261 | toggle_bp_slot(bp, true); | 256 | toggle_bp_slot(bp, true); |
262 | 257 | ||
263 | end: | 258 | return 0; |
259 | } | ||
260 | |||
261 | int reserve_bp_slot(struct perf_event *bp) | ||
262 | { | ||
263 | int ret; | ||
264 | |||
265 | mutex_lock(&nr_bp_mutex); | ||
266 | |||
267 | ret = __reserve_bp_slot(bp); | ||
268 | |||
264 | mutex_unlock(&nr_bp_mutex); | 269 | mutex_unlock(&nr_bp_mutex); |
265 | 270 | ||
266 | return ret; | 271 | return ret; |
267 | } | 272 | } |
268 | 273 | ||
274 | static void __release_bp_slot(struct perf_event *bp) | ||
275 | { | ||
276 | toggle_bp_slot(bp, false); | ||
277 | } | ||
278 | |||
269 | void release_bp_slot(struct perf_event *bp) | 279 | void release_bp_slot(struct perf_event *bp) |
270 | { | 280 | { |
271 | mutex_lock(&nr_bp_mutex); | 281 | mutex_lock(&nr_bp_mutex); |
272 | 282 | ||
273 | toggle_bp_slot(bp, false); | 283 | __release_bp_slot(bp); |
274 | 284 | ||
275 | mutex_unlock(&nr_bp_mutex); | 285 | mutex_unlock(&nr_bp_mutex); |
276 | } | 286 | } |
277 | 287 | ||
288 | /* | ||
289 | * Allow the kernel debugger to reserve breakpoint slots without | ||
290 | * taking a lock using the dbg_* variant of for the reserve and | ||
291 | * release breakpoint slots. | ||
292 | */ | ||
293 | int dbg_reserve_bp_slot(struct perf_event *bp) | ||
294 | { | ||
295 | if (mutex_is_locked(&nr_bp_mutex)) | ||
296 | return -1; | ||
297 | |||
298 | return __reserve_bp_slot(bp); | ||
299 | } | ||
300 | |||
301 | int dbg_release_bp_slot(struct perf_event *bp) | ||
302 | { | ||
303 | if (mutex_is_locked(&nr_bp_mutex)) | ||
304 | return -1; | ||
305 | |||
306 | __release_bp_slot(bp); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
278 | 310 | ||
279 | int register_perf_hw_breakpoint(struct perf_event *bp) | 311 | int register_perf_hw_breakpoint(struct perf_event *bp) |
280 | { | 312 | { |
@@ -296,6 +328,10 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
296 | if (!bp->attr.disabled || !bp->overflow_handler) | 328 | if (!bp->attr.disabled || !bp->overflow_handler) |
297 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 329 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
298 | 330 | ||
331 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ | ||
332 | if (ret) | ||
333 | release_bp_slot(bp); | ||
334 | |||
299 | return ret; | 335 | return ret; |
300 | } | 336 | } |
301 | 337 | ||
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 2eb517e23514..c7ade62e4ef0 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -583,6 +583,9 @@ static void kgdb_wait(struct pt_regs *regs) | |||
583 | smp_wmb(); | 583 | smp_wmb(); |
584 | atomic_set(&cpu_in_kgdb[cpu], 1); | 584 | atomic_set(&cpu_in_kgdb[cpu], 1); |
585 | 585 | ||
586 | /* Disable any cpu specific hw breakpoints */ | ||
587 | kgdb_disable_hw_debug(regs); | ||
588 | |||
586 | /* Wait till primary CPU is done with debugging */ | 589 | /* Wait till primary CPU is done with debugging */ |
587 | while (atomic_read(&passive_cpu_wait[cpu])) | 590 | while (atomic_read(&passive_cpu_wait[cpu])) |
588 | cpu_relax(); | 591 | cpu_relax(); |
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index fe08660ce0bd..124760bb37b5 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore | |||
@@ -13,6 +13,7 @@ perf*.xml | |||
13 | perf*.html | 13 | perf*.html |
14 | common-cmds.h | 14 | common-cmds.h |
15 | perf.data | 15 | perf.data |
16 | perf.data.old | ||
16 | tags | 17 | tags |
17 | TAGS | 18 | TAGS |
18 | cscope* | 19 | cscope* |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index db10c0e8ecae..860f1eeeea7d 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -269,7 +269,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, | |||
269 | 269 | ||
270 | else if (!strncmp(tok, "none", strlen(arg))) { | 270 | else if (!strncmp(tok, "none", strlen(arg))) { |
271 | callchain_param.mode = CHAIN_NONE; | 271 | callchain_param.mode = CHAIN_NONE; |
272 | symbol_conf.use_callchain = true; | 272 | symbol_conf.use_callchain = false; |
273 | 273 | ||
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |