aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/debug/debug_core.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2010-10-24 12:57:05 -0400
committerPekka Enberg <penberg@kernel.org>2010-10-24 12:57:05 -0400
commit6d4121f6c20a0e86231d52f535f1c82423b3326f (patch)
tree5c235cac699ca86b504850aa663ddadde0455a61 /kernel/debug/debug_core.c
parent92a5bbc11ff2442a54b2f1d313088c245828ef4e (diff)
parent35da7a307c535f9c2929cae277f3df425c9f9b1e (diff)
Merge branch 'master' into for-linus
Conflicts: include/linux/percpu.h mm/percpu.c
Diffstat (limited to 'kernel/debug/debug_core.c')
-rw-r--r--kernel/debug/debug_core.c139
1 files changed, 70 insertions, 69 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index de407c78178d..fec596da9bd0 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -47,6 +47,7 @@
47#include <linux/pid.h> 47#include <linux/pid.h>
48#include <linux/smp.h> 48#include <linux/smp.h>
49#include <linux/mm.h> 49#include <linux/mm.h>
50#include <linux/rcupdate.h>
50 51
51#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
52#include <asm/byteorder.h> 53#include <asm/byteorder.h>
@@ -109,13 +110,15 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
109 */ 110 */
110atomic_t kgdb_active = ATOMIC_INIT(-1); 111atomic_t kgdb_active = ATOMIC_INIT(-1);
111EXPORT_SYMBOL_GPL(kgdb_active); 112EXPORT_SYMBOL_GPL(kgdb_active);
113static DEFINE_RAW_SPINLOCK(dbg_master_lock);
114static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
112 115
113/* 116/*
114 * We use NR_CPUs not PERCPU, in case kgdb is used to debug early 117 * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
115 * bootup code (which might not have percpu set up yet): 118 * bootup code (which might not have percpu set up yet):
116 */ 119 */
117static atomic_t passive_cpu_wait[NR_CPUS]; 120static atomic_t masters_in_kgdb;
118static atomic_t cpu_in_kgdb[NR_CPUS]; 121static atomic_t slaves_in_kgdb;
119static atomic_t kgdb_break_tasklet_var; 122static atomic_t kgdb_break_tasklet_var;
120atomic_t kgdb_setting_breakpoint; 123atomic_t kgdb_setting_breakpoint;
121 124
@@ -457,26 +460,32 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
457 return 1; 460 return 1;
458} 461}
459 462
460static void dbg_cpu_switch(int cpu, int next_cpu) 463static void dbg_touch_watchdogs(void)
461{ 464{
462 /* Mark the cpu we are switching away from as a slave when it 465 touch_softlockup_watchdog_sync();
463 * holds the kgdb_active token. This must be done so that the 466 clocksource_touch_watchdog();
464 * that all the cpus wait in for the debug core will not enter 467 rcu_cpu_stall_reset();
465 * again as the master. */
466 if (cpu == atomic_read(&kgdb_active)) {
467 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
468 kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
469 }
470 kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
471} 468}
472 469
473static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) 470static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
471 int exception_state)
474{ 472{
475 unsigned long flags; 473 unsigned long flags;
476 int sstep_tries = 100; 474 int sstep_tries = 100;
477 int error; 475 int error;
478 int i, cpu; 476 int cpu;
479 int trace_on = 0; 477 int trace_on = 0;
478 int online_cpus = num_online_cpus();
479
480 kgdb_info[ks->cpu].enter_kgdb++;
481 kgdb_info[ks->cpu].exception_state |= exception_state;
482
483 if (exception_state == DCPU_WANT_MASTER)
484 atomic_inc(&masters_in_kgdb);
485 else
486 atomic_inc(&slaves_in_kgdb);
487 kgdb_disable_hw_debug(ks->linux_regs);
488
480acquirelock: 489acquirelock:
481 /* 490 /*
482 * Interrupts will be restored by the 'trap return' code, except when 491 * Interrupts will be restored by the 'trap return' code, except when
@@ -489,14 +498,15 @@ acquirelock:
489 kgdb_info[cpu].task = current; 498 kgdb_info[cpu].task = current;
490 kgdb_info[cpu].ret_state = 0; 499 kgdb_info[cpu].ret_state = 0;
491 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; 500 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
492 /*
493 * Make sure the above info reaches the primary CPU before
494 * our cpu_in_kgdb[] flag setting does:
495 */
496 atomic_inc(&cpu_in_kgdb[cpu]);
497 501
498 if (exception_level == 1) 502 /* Make sure the above info reaches the primary CPU */
503 smp_mb();
504
505 if (exception_level == 1) {
506 if (raw_spin_trylock(&dbg_master_lock))
507 atomic_xchg(&kgdb_active, cpu);
499 goto cpu_master_loop; 508 goto cpu_master_loop;
509 }
500 510
501 /* 511 /*
502 * CPU will loop if it is a slave or request to become a kgdb 512 * CPU will loop if it is a slave or request to become a kgdb
@@ -508,10 +518,12 @@ cpu_loop:
508 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; 518 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
509 goto cpu_master_loop; 519 goto cpu_master_loop;
510 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { 520 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
511 if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) 521 if (raw_spin_trylock(&dbg_master_lock)) {
522 atomic_xchg(&kgdb_active, cpu);
512 break; 523 break;
524 }
513 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { 525 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
514 if (!atomic_read(&passive_cpu_wait[cpu])) 526 if (!raw_spin_is_locked(&dbg_slave_lock))
515 goto return_normal; 527 goto return_normal;
516 } else { 528 } else {
517return_normal: 529return_normal:
@@ -522,9 +534,12 @@ return_normal:
522 arch_kgdb_ops.correct_hw_break(); 534 arch_kgdb_ops.correct_hw_break();
523 if (trace_on) 535 if (trace_on)
524 tracing_on(); 536 tracing_on();
525 atomic_dec(&cpu_in_kgdb[cpu]); 537 kgdb_info[cpu].exception_state &=
526 touch_softlockup_watchdog_sync(); 538 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
527 clocksource_touch_watchdog(); 539 kgdb_info[cpu].enter_kgdb--;
540 smp_mb__before_atomic_dec();
541 atomic_dec(&slaves_in_kgdb);
542 dbg_touch_watchdogs();
528 local_irq_restore(flags); 543 local_irq_restore(flags);
529 return 0; 544 return 0;
530 } 545 }
@@ -541,8 +556,8 @@ return_normal:
541 (kgdb_info[cpu].task && 556 (kgdb_info[cpu].task &&
542 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 557 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
543 atomic_set(&kgdb_active, -1); 558 atomic_set(&kgdb_active, -1);
544 touch_softlockup_watchdog_sync(); 559 raw_spin_unlock(&dbg_master_lock);
545 clocksource_touch_watchdog(); 560 dbg_touch_watchdogs();
546 local_irq_restore(flags); 561 local_irq_restore(flags);
547 562
548 goto acquirelock; 563 goto acquirelock;
@@ -563,16 +578,12 @@ return_normal:
563 if (dbg_io_ops->pre_exception) 578 if (dbg_io_ops->pre_exception)
564 dbg_io_ops->pre_exception(); 579 dbg_io_ops->pre_exception();
565 580
566 kgdb_disable_hw_debug(ks->linux_regs);
567
568 /* 581 /*
569 * Get the passive CPU lock which will hold all the non-primary 582 * Get the passive CPU lock which will hold all the non-primary
570 * CPU in a spin state while the debugger is active 583 * CPU in a spin state while the debugger is active
571 */ 584 */
572 if (!kgdb_single_step) { 585 if (!kgdb_single_step)
573 for (i = 0; i < NR_CPUS; i++) 586 raw_spin_lock(&dbg_slave_lock);
574 atomic_inc(&passive_cpu_wait[i]);
575 }
576 587
577#ifdef CONFIG_SMP 588#ifdef CONFIG_SMP
578 /* Signal the other CPUs to enter kgdb_wait() */ 589 /* Signal the other CPUs to enter kgdb_wait() */
@@ -583,10 +594,9 @@ return_normal:
583 /* 594 /*
584 * Wait for the other CPUs to be notified and be waiting for us: 595 * Wait for the other CPUs to be notified and be waiting for us:
585 */ 596 */
586 for_each_online_cpu(i) { 597 while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
587 while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) 598 atomic_read(&slaves_in_kgdb)) != online_cpus)
588 cpu_relax(); 599 cpu_relax();
589 }
590 600
591 /* 601 /*
592 * At this point the primary processor is completely 602 * At this point the primary processor is completely
@@ -615,7 +625,8 @@ cpu_master_loop:
615 if (error == DBG_PASS_EVENT) { 625 if (error == DBG_PASS_EVENT) {
616 dbg_kdb_mode = !dbg_kdb_mode; 626 dbg_kdb_mode = !dbg_kdb_mode;
617 } else if (error == DBG_SWITCH_CPU_EVENT) { 627 } else if (error == DBG_SWITCH_CPU_EVENT) {
618 dbg_cpu_switch(cpu, dbg_switch_cpu); 628 kgdb_info[dbg_switch_cpu].exception_state |=
629 DCPU_NEXT_MASTER;
619 goto cpu_loop; 630 goto cpu_loop;
620 } else { 631 } else {
621 kgdb_info[cpu].ret_state = error; 632 kgdb_info[cpu].ret_state = error;
@@ -627,24 +638,11 @@ cpu_master_loop:
627 if (dbg_io_ops->post_exception) 638 if (dbg_io_ops->post_exception)
628 dbg_io_ops->post_exception(); 639 dbg_io_ops->post_exception();
629 640
630 atomic_dec(&cpu_in_kgdb[ks->cpu]);
631
632 if (!kgdb_single_step) { 641 if (!kgdb_single_step) {
633 for (i = NR_CPUS-1; i >= 0; i--) 642 raw_spin_unlock(&dbg_slave_lock);
634 atomic_dec(&passive_cpu_wait[i]); 643 /* Wait till all the CPUs have quit from the debugger. */
635 /* 644 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
636 * Wait till all the CPUs have quit from the debugger, 645 cpu_relax();
637 * but allow a CPU that hit an exception and is
638 * waiting to become the master to remain in the debug
639 * core.
640 */
641 for_each_online_cpu(i) {
642 while (kgdb_do_roundup &&
643 atomic_read(&cpu_in_kgdb[i]) &&
644 !(kgdb_info[i].exception_state &
645 DCPU_WANT_MASTER))
646 cpu_relax();
647 }
648 } 646 }
649 647
650kgdb_restore: 648kgdb_restore:
@@ -655,12 +653,20 @@ kgdb_restore:
655 else 653 else
656 kgdb_sstep_pid = 0; 654 kgdb_sstep_pid = 0;
657 } 655 }
656 if (arch_kgdb_ops.correct_hw_break)
657 arch_kgdb_ops.correct_hw_break();
658 if (trace_on) 658 if (trace_on)
659 tracing_on(); 659 tracing_on();
660
661 kgdb_info[cpu].exception_state &=
662 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
663 kgdb_info[cpu].enter_kgdb--;
664 smp_mb__before_atomic_dec();
665 atomic_dec(&masters_in_kgdb);
660 /* Free kgdb_active */ 666 /* Free kgdb_active */
661 atomic_set(&kgdb_active, -1); 667 atomic_set(&kgdb_active, -1);
662 touch_softlockup_watchdog_sync(); 668 raw_spin_unlock(&dbg_master_lock);
663 clocksource_touch_watchdog(); 669 dbg_touch_watchdogs();
664 local_irq_restore(flags); 670 local_irq_restore(flags);
665 671
666 return kgdb_info[cpu].ret_state; 672 return kgdb_info[cpu].ret_state;
@@ -678,7 +684,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
678{ 684{
679 struct kgdb_state kgdb_var; 685 struct kgdb_state kgdb_var;
680 struct kgdb_state *ks = &kgdb_var; 686 struct kgdb_state *ks = &kgdb_var;
681 int ret;
682 687
683 ks->cpu = raw_smp_processor_id(); 688 ks->cpu = raw_smp_processor_id();
684 ks->ex_vector = evector; 689 ks->ex_vector = evector;
@@ -689,11 +694,10 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
689 694
690 if (kgdb_reenter_check(ks)) 695 if (kgdb_reenter_check(ks))
691 return 0; /* Ouch, double exception ! */ 696 return 0; /* Ouch, double exception ! */
692 kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; 697 if (kgdb_info[ks->cpu].enter_kgdb != 0)
693 ret = kgdb_cpu_enter(ks, regs); 698 return 0;
694 kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | 699
695 DCPU_IS_SLAVE); 700 return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
696 return ret;
697} 701}
698 702
699int kgdb_nmicallback(int cpu, void *regs) 703int kgdb_nmicallback(int cpu, void *regs)
@@ -706,12 +710,9 @@ int kgdb_nmicallback(int cpu, void *regs)
706 ks->cpu = cpu; 710 ks->cpu = cpu;
707 ks->linux_regs = regs; 711 ks->linux_regs = regs;
708 712
709 if (!atomic_read(&cpu_in_kgdb[cpu]) && 713 if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
710 atomic_read(&kgdb_active) != -1 && 714 raw_spin_is_locked(&dbg_master_lock)) {
711 atomic_read(&kgdb_active) != cpu) { 715 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
712 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
713 kgdb_cpu_enter(ks, regs);
714 kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
715 return 0; 716 return 0;
716 } 717 }
717#endif 718#endif