aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/debug
diff options
context:
space:
mode:
authorJason Wessel <jason.wessel@windriver.com>2010-05-21 09:46:00 -0400
committerJason Wessel <jason.wessel@windriver.com>2010-10-22 16:34:13 -0400
commitdfee3a7b92208b30f77876068aece9ea571270c2 (patch)
treede243b8c2e1a3b5dea007b1fb79f1e7b4a8263ba /kernel/debug
parent39a0715f5ace92268190c89e246fd1cf741dbaea (diff)
debug_core: refactor locking for master/slave cpus
For quite some time there have been problems with memory barriers and various races with NMI on multi processor systems using the kernel debugger. The algorithm for entering the kernel debug core and resuming kernel execution was racy and had several known edge case problems with attempting to debug something on a heavily loaded system using breakpoints that are hit repeatedly and quickly. The prior "locking" design entry worked as follows: * The atomic counter kgdb_active was used with atomic exchange in order to elect a master cpu out of all the cpus that may have taken a debug exception. * The master cpu increments all elements of passive_cpu_wait[]. * The master cpu issues the round up cpus message. * Each "slave cpu" that enters the debug core increments its own element in cpu_in_kgdb[]. * Each "slave cpu" spins on passive_cpu_wait[] until it becomes 0. * The master cpu debugs the system. The new scheme removes the two arrays of atomic counters and replaces them with 2 single counters. One counter is used to count the number of cpus waiting to become a master cpu (because one or more hit an exception). The second counter is use to indicate how many cpus have entered as slave cpus. The new entry logic works as follows: * One or more cpus enters via kgdb_handle_exception() and increments the masters_in_kgdb. Each cpu attempts to get the spin lock called dbg_master_lock. * The master cpu sets kgdb_active to the current cpu. * The master cpu takes the spinlock dbg_slave_lock. * The master cpu asks to round up all the other cpus. * Each slave cpu that is not already in kgdb_handle_exception() will enter and increment slaves_in_kgdb. Each slave will now spin try_locking on dbg_slave_lock. * The master cpu waits for the sum of masters_in_kgdb and slaves_in_kgdb to be equal to the sum of the online cpus. * The master cpu debugs the system. In the new design the kgdb_active can only be changed while holding dbg_master_lock. Stress testing has not turned up any further entry/exit races that existed in the prior locking design. The prior locking design suffered from atomic variables not being truly atomic (in the capacity as used by kgdb) along with memory barrier races. Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Acked-by: Dongdong Deng <dongdong.deng@windriver.com>
Diffstat (limited to 'kernel/debug')
-rw-r--r--kernel/debug/debug_core.c105
-rw-r--r--kernel/debug/debug_core.h1
2 files changed, 56 insertions, 50 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index bb9497724808..26dbdc37d219 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -110,13 +110,15 @@ static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
110 */ 110 */
111atomic_t kgdb_active = ATOMIC_INIT(-1); 111atomic_t kgdb_active = ATOMIC_INIT(-1);
112EXPORT_SYMBOL_GPL(kgdb_active); 112EXPORT_SYMBOL_GPL(kgdb_active);
113static DEFINE_RAW_SPINLOCK(dbg_master_lock);
114static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
113 115
114/* 116/*
115 * We use NR_CPUs not PERCPU, in case kgdb is used to debug early 117 * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
116 * bootup code (which might not have percpu set up yet): 118 * bootup code (which might not have percpu set up yet):
117 */ 119 */
118static atomic_t passive_cpu_wait[NR_CPUS]; 120static atomic_t masters_in_kgdb;
119static atomic_t cpu_in_kgdb[NR_CPUS]; 121static atomic_t slaves_in_kgdb;
120static atomic_t kgdb_break_tasklet_var; 122static atomic_t kgdb_break_tasklet_var;
121atomic_t kgdb_setting_breakpoint; 123atomic_t kgdb_setting_breakpoint;
122 124
@@ -478,14 +480,23 @@ static void dbg_touch_watchdogs(void)
478 rcu_cpu_stall_reset(); 480 rcu_cpu_stall_reset();
479} 481}
480 482
481static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) 483static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
484 int exception_state)
482{ 485{
483 unsigned long flags; 486 unsigned long flags;
484 int sstep_tries = 100; 487 int sstep_tries = 100;
485 int error; 488 int error;
486 int i, cpu; 489 int cpu;
487 int trace_on = 0; 490 int trace_on = 0;
491 int online_cpus = num_online_cpus();
488 492
493 kgdb_info[ks->cpu].enter_kgdb++;
494 kgdb_info[ks->cpu].exception_state |= exception_state;
495
496 if (exception_state == DCPU_WANT_MASTER)
497 atomic_inc(&masters_in_kgdb);
498 else
499 atomic_inc(&slaves_in_kgdb);
489 kgdb_disable_hw_debug(ks->linux_regs); 500 kgdb_disable_hw_debug(ks->linux_regs);
490 501
491acquirelock: 502acquirelock:
@@ -500,14 +511,15 @@ acquirelock:
500 kgdb_info[cpu].task = current; 511 kgdb_info[cpu].task = current;
501 kgdb_info[cpu].ret_state = 0; 512 kgdb_info[cpu].ret_state = 0;
502 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; 513 kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
503 /*
504 * Make sure the above info reaches the primary CPU before
505 * our cpu_in_kgdb[] flag setting does:
506 */
507 atomic_inc(&cpu_in_kgdb[cpu]);
508 514
509 if (exception_level == 1) 515 /* Make sure the above info reaches the primary CPU */
516 smp_mb();
517
518 if (exception_level == 1) {
519 if (raw_spin_trylock(&dbg_master_lock))
520 atomic_xchg(&kgdb_active, cpu);
510 goto cpu_master_loop; 521 goto cpu_master_loop;
522 }
511 523
512 /* 524 /*
513 * CPU will loop if it is a slave or request to become a kgdb 525 * CPU will loop if it is a slave or request to become a kgdb
@@ -519,10 +531,12 @@ cpu_loop:
519 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; 531 kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
520 goto cpu_master_loop; 532 goto cpu_master_loop;
521 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { 533 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
522 if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) 534 if (raw_spin_trylock(&dbg_master_lock)) {
535 atomic_xchg(&kgdb_active, cpu);
523 break; 536 break;
537 }
524 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { 538 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
525 if (!atomic_read(&passive_cpu_wait[cpu])) 539 if (!raw_spin_is_locked(&dbg_slave_lock))
526 goto return_normal; 540 goto return_normal;
527 } else { 541 } else {
528return_normal: 542return_normal:
@@ -533,7 +547,11 @@ return_normal:
533 arch_kgdb_ops.correct_hw_break(); 547 arch_kgdb_ops.correct_hw_break();
534 if (trace_on) 548 if (trace_on)
535 tracing_on(); 549 tracing_on();
536 atomic_dec(&cpu_in_kgdb[cpu]); 550 kgdb_info[cpu].exception_state &=
551 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
552 kgdb_info[cpu].enter_kgdb--;
553 smp_mb__before_atomic_dec();
554 atomic_dec(&slaves_in_kgdb);
537 dbg_touch_watchdogs(); 555 dbg_touch_watchdogs();
538 local_irq_restore(flags); 556 local_irq_restore(flags);
539 return 0; 557 return 0;
@@ -551,6 +569,7 @@ return_normal:
551 (kgdb_info[cpu].task && 569 (kgdb_info[cpu].task &&
552 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 570 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
553 atomic_set(&kgdb_active, -1); 571 atomic_set(&kgdb_active, -1);
572 raw_spin_unlock(&dbg_master_lock);
554 dbg_touch_watchdogs(); 573 dbg_touch_watchdogs();
555 local_irq_restore(flags); 574 local_irq_restore(flags);
556 575
@@ -576,10 +595,8 @@ return_normal:
576 * Get the passive CPU lock which will hold all the non-primary 595 * Get the passive CPU lock which will hold all the non-primary
577 * CPU in a spin state while the debugger is active 596 * CPU in a spin state while the debugger is active
578 */ 597 */
579 if (!kgdb_single_step) { 598 if (!kgdb_single_step)
580 for (i = 0; i < NR_CPUS; i++) 599 raw_spin_lock(&dbg_slave_lock);
581 atomic_inc(&passive_cpu_wait[i]);
582 }
583 600
584#ifdef CONFIG_SMP 601#ifdef CONFIG_SMP
585 /* Signal the other CPUs to enter kgdb_wait() */ 602 /* Signal the other CPUs to enter kgdb_wait() */
@@ -590,10 +607,9 @@ return_normal:
590 /* 607 /*
591 * Wait for the other CPUs to be notified and be waiting for us: 608 * Wait for the other CPUs to be notified and be waiting for us:
592 */ 609 */
593 for_each_online_cpu(i) { 610 while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
594 while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) 611 atomic_read(&slaves_in_kgdb)) != online_cpus)
595 cpu_relax(); 612 cpu_relax();
596 }
597 613
598 /* 614 /*
599 * At this point the primary processor is completely 615 * At this point the primary processor is completely
@@ -634,24 +650,11 @@ cpu_master_loop:
634 if (dbg_io_ops->post_exception) 650 if (dbg_io_ops->post_exception)
635 dbg_io_ops->post_exception(); 651 dbg_io_ops->post_exception();
636 652
637 atomic_dec(&cpu_in_kgdb[ks->cpu]);
638
639 if (!kgdb_single_step) { 653 if (!kgdb_single_step) {
640 for (i = NR_CPUS-1; i >= 0; i--) 654 raw_spin_unlock(&dbg_slave_lock);
641 atomic_dec(&passive_cpu_wait[i]); 655 /* Wait till all the CPUs have quit from the debugger. */
642 /* 656 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
643 * Wait till all the CPUs have quit from the debugger, 657 cpu_relax();
644 * but allow a CPU that hit an exception and is
645 * waiting to become the master to remain in the debug
646 * core.
647 */
648 for_each_online_cpu(i) {
649 while (kgdb_do_roundup &&
650 atomic_read(&cpu_in_kgdb[i]) &&
651 !(kgdb_info[i].exception_state &
652 DCPU_WANT_MASTER))
653 cpu_relax();
654 }
655 } 658 }
656 659
657kgdb_restore: 660kgdb_restore:
@@ -666,8 +669,15 @@ kgdb_restore:
666 arch_kgdb_ops.correct_hw_break(); 669 arch_kgdb_ops.correct_hw_break();
667 if (trace_on) 670 if (trace_on)
668 tracing_on(); 671 tracing_on();
672
673 kgdb_info[cpu].exception_state &=
674 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
675 kgdb_info[cpu].enter_kgdb--;
676 smp_mb__before_atomic_dec();
677 atomic_dec(&masters_in_kgdb);
669 /* Free kgdb_active */ 678 /* Free kgdb_active */
670 atomic_set(&kgdb_active, -1); 679 atomic_set(&kgdb_active, -1);
680 raw_spin_unlock(&dbg_master_lock);
671 dbg_touch_watchdogs(); 681 dbg_touch_watchdogs();
672 local_irq_restore(flags); 682 local_irq_restore(flags);
673 683
@@ -686,7 +696,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
686{ 696{
687 struct kgdb_state kgdb_var; 697 struct kgdb_state kgdb_var;
688 struct kgdb_state *ks = &kgdb_var; 698 struct kgdb_state *ks = &kgdb_var;
689 int ret;
690 699
691 ks->cpu = raw_smp_processor_id(); 700 ks->cpu = raw_smp_processor_id();
692 ks->ex_vector = evector; 701 ks->ex_vector = evector;
@@ -697,11 +706,10 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
697 706
698 if (kgdb_reenter_check(ks)) 707 if (kgdb_reenter_check(ks))
699 return 0; /* Ouch, double exception ! */ 708 return 0; /* Ouch, double exception ! */
700 kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; 709 if (kgdb_info[ks->cpu].enter_kgdb != 0)
701 ret = kgdb_cpu_enter(ks, regs); 710 return 0;
702 kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | 711
703 DCPU_IS_SLAVE); 712 return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
704 return ret;
705} 713}
706 714
707int kgdb_nmicallback(int cpu, void *regs) 715int kgdb_nmicallback(int cpu, void *regs)
@@ -714,12 +722,9 @@ int kgdb_nmicallback(int cpu, void *regs)
714 ks->cpu = cpu; 722 ks->cpu = cpu;
715 ks->linux_regs = regs; 723 ks->linux_regs = regs;
716 724
717 if (!atomic_read(&cpu_in_kgdb[cpu]) && 725 if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
718 atomic_read(&kgdb_active) != -1 && 726 raw_spin_is_locked(&dbg_master_lock)) {
719 atomic_read(&kgdb_active) != cpu) { 727 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
720 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
721 kgdb_cpu_enter(ks, regs);
722 kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
723 return 0; 728 return 0;
724 } 729 }
725#endif 730#endif
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
index c5d753d80f67..3494c28a7e7a 100644
--- a/kernel/debug/debug_core.h
+++ b/kernel/debug/debug_core.h
@@ -40,6 +40,7 @@ struct debuggerinfo_struct {
40 int exception_state; 40 int exception_state;
41 int ret_state; 41 int ret_state;
42 int irq_depth; 42 int irq_depth;
43 int enter_kgdb;
43}; 44};
44 45
45extern struct debuggerinfo_struct kgdb_info[]; 46extern struct debuggerinfo_struct kgdb_info[];