aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kgdb.c
diff options
context:
space:
mode:
authorJason Wessel <jason.wessel@windriver.com>2008-04-01 17:55:27 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 14:05:43 -0400
commit56fb70932964927597ce30bbd820471633c72adc (patch)
tree72654913a07cbc83f174234d4fa0f91b79a2164c /kernel/kgdb.c
parent225a4424ade24e913c081d5a4c4bd71a0fe2e0ac (diff)
kgdb: fix SMP NMI kgdb_handle_exception exit race
Fix the problem of protecting the kgdb handle_exception exit which had an NMI race condition, while trying to restore normal system operation. There was a small window after the master processor sets cpu_in_debug to zero but before it has set kgdb_active to zero where a non-master processor in an SMP system could receive an NMI and re-enter the kgdb_wait() loop. As long as the master processor sets the cpu_in_debug before sending the cpu roundup the cpu_in_debug variable can also be used to guard against the race condition. The kgdb_wait() function no longer needs to check kgdb_active because it is done in the arch specific code and handled along with the nmi traps at the low level. This also allows kgdb_wait() to exit correctly if it was entered for some unknown reason due to a spurious NMI that could not be handled by the arch specific code. Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kgdb.c')
-rw-r--r--kernel/kgdb.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 85b7e5b934a7..4d1b3c232377 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -561,18 +561,6 @@ static void kgdb_wait(struct pt_regs *regs)
561 smp_wmb(); 561 smp_wmb();
562 atomic_set(&cpu_in_kgdb[cpu], 1); 562 atomic_set(&cpu_in_kgdb[cpu], 1);
563 563
564 /*
565 * The primary CPU must be active to enter here, but this is
566 * guard in case the primary CPU had not been selected if
567 * this was an entry via nmi.
568 */
569 while (atomic_read(&kgdb_active) == -1)
570 cpu_relax();
571
572 /* Wait till primary CPU goes completely into the debugger. */
573 while (!atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)]))
574 cpu_relax();
575
576 /* Wait till primary CPU is done with debugging */ 564 /* Wait till primary CPU is done with debugging */
577 while (atomic_read(&passive_cpu_wait[cpu])) 565 while (atomic_read(&passive_cpu_wait[cpu]))
578 cpu_relax(); 566 cpu_relax();
@@ -1447,18 +1435,18 @@ acquirelock:
1447 atomic_set(&passive_cpu_wait[i], 1); 1435 atomic_set(&passive_cpu_wait[i], 1);
1448 } 1436 }
1449 1437
1450#ifdef CONFIG_SMP
1451 /* Signal the other CPUs to enter kgdb_wait() */
1452 if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup)
1453 kgdb_roundup_cpus(flags);
1454#endif
1455
1456 /* 1438 /*
1457 * spin_lock code is good enough as a barrier so we don't 1439 * spin_lock code is good enough as a barrier so we don't
1458 * need one here: 1440 * need one here:
1459 */ 1441 */
1460 atomic_set(&cpu_in_kgdb[ks->cpu], 1); 1442 atomic_set(&cpu_in_kgdb[ks->cpu], 1);
1461 1443
1444#ifdef CONFIG_SMP
1445 /* Signal the other CPUs to enter kgdb_wait() */
1446 if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup)
1447 kgdb_roundup_cpus(flags);
1448#endif
1449
1462 /* 1450 /*
1463 * Wait for the other CPUs to be notified and be waiting for us: 1451 * Wait for the other CPUs to be notified and be waiting for us:
1464 */ 1452 */
@@ -1514,7 +1502,8 @@ int kgdb_nmicallback(int cpu, void *regs)
1514{ 1502{
1515#ifdef CONFIG_SMP 1503#ifdef CONFIG_SMP
1516 if (!atomic_read(&cpu_in_kgdb[cpu]) && 1504 if (!atomic_read(&cpu_in_kgdb[cpu]) &&
1517 atomic_read(&kgdb_active) != cpu) { 1505 atomic_read(&kgdb_active) != cpu &&
1506 atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
1518 kgdb_wait((struct pt_regs *)regs); 1507 kgdb_wait((struct pt_regs *)regs);
1519 return 0; 1508 return 0;
1520 } 1509 }