diff options
author | Jason Wessel <jason.wessel@windriver.com> | 2010-04-02 12:47:02 -0400 |
---|---|---|
committer | Jason Wessel <jason.wessel@windriver.com> | 2010-04-02 15:58:18 -0400 |
commit | 62fae312197a8fbcd3727261d59f5a6bd0dbf158 (patch) | |
tree | aa5166126f2735545326fe8af8a76627f5760191 /kernel/kgdb.c | |
parent | cad08acebf4b7d993b0cefb9af67208c48fb9a5e (diff) |
kgdb: eliminate kgdb_wait(), all cpus enter the same way
This is a kgdb architectural change to have all the cpus (master or
slave) enter the same function.
A cpu that hits an exception (wants to be the master cpu) will call
kgdb_handle_exception() from the trap handler and then invoke a
kgdb_roundup_cpu() to synchronize the other cpus and bring them into
the kgdb_handle_exception() as well.
A slave cpu will enter kgdb_handle_exception() from the
kgdb_nmicallback() and set the exception state to note that the
processor is a slave.
Previously the salve cpu would have called kgdb_wait(). This change
allows the debug core to change cpus without resuming the system in
order to inspect arch specific cpu information.
Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
Diffstat (limited to 'kernel/kgdb.c')
-rw-r--r-- | kernel/kgdb.c | 165 |
1 files changed, 82 insertions, 83 deletions
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 42fd128127a6..6882c047452d 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -69,9 +69,16 @@ struct kgdb_state { | |||
69 | struct pt_regs *linux_regs; | 69 | struct pt_regs *linux_regs; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Exception state values */ | ||
73 | #define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */ | ||
74 | #define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */ | ||
75 | #define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */ | ||
76 | #define DCPU_SSTEP 0x8 /* CPU is single stepping */ | ||
77 | |||
72 | static struct debuggerinfo_struct { | 78 | static struct debuggerinfo_struct { |
73 | void *debuggerinfo; | 79 | void *debuggerinfo; |
74 | struct task_struct *task; | 80 | struct task_struct *task; |
81 | int exception_state; | ||
75 | } kgdb_info[NR_CPUS]; | 82 | } kgdb_info[NR_CPUS]; |
76 | 83 | ||
77 | /** | 84 | /** |
@@ -558,49 +565,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid) | |||
558 | } | 565 | } |
559 | 566 | ||
560 | /* | 567 | /* |
561 | * CPU debug state control: | ||
562 | */ | ||
563 | |||
564 | #ifdef CONFIG_SMP | ||
565 | static void kgdb_wait(struct pt_regs *regs) | ||
566 | { | ||
567 | unsigned long flags; | ||
568 | int cpu; | ||
569 | |||
570 | local_irq_save(flags); | ||
571 | cpu = raw_smp_processor_id(); | ||
572 | kgdb_info[cpu].debuggerinfo = regs; | ||
573 | kgdb_info[cpu].task = current; | ||
574 | /* | ||
575 | * Make sure the above info reaches the primary CPU before | ||
576 | * our cpu_in_kgdb[] flag setting does: | ||
577 | */ | ||
578 | smp_wmb(); | ||
579 | atomic_set(&cpu_in_kgdb[cpu], 1); | ||
580 | |||
581 | /* Disable any cpu specific hw breakpoints */ | ||
582 | kgdb_disable_hw_debug(regs); | ||
583 | |||
584 | /* Wait till primary CPU is done with debugging */ | ||
585 | while (atomic_read(&passive_cpu_wait[cpu])) | ||
586 | cpu_relax(); | ||
587 | |||
588 | kgdb_info[cpu].debuggerinfo = NULL; | ||
589 | kgdb_info[cpu].task = NULL; | ||
590 | |||
591 | /* fix up hardware debug registers on local cpu */ | ||
592 | if (arch_kgdb_ops.correct_hw_break) | ||
593 | arch_kgdb_ops.correct_hw_break(); | ||
594 | |||
595 | /* Signal the primary CPU that we are done: */ | ||
596 | atomic_set(&cpu_in_kgdb[cpu], 0); | ||
597 | touch_softlockup_watchdog_sync(); | ||
598 | clocksource_touch_watchdog(); | ||
599 | local_irq_restore(flags); | ||
600 | } | ||
601 | #endif | ||
602 | |||
603 | /* | ||
604 | * Some architectures need cache flushes when we set/clear a | 568 | * Some architectures need cache flushes when we set/clear a |
605 | * breakpoint: | 569 | * breakpoint: |
606 | */ | 570 | */ |
@@ -1395,34 +1359,12 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
1395 | return 1; | 1359 | return 1; |
1396 | } | 1360 | } |
1397 | 1361 | ||
1398 | /* | 1362 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) |
1399 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
1400 | * | ||
1401 | * Locking hierarchy: | ||
1402 | * interface locks, if any (begin_session) | ||
1403 | * kgdb lock (kgdb_active) | ||
1404 | */ | ||
1405 | int | ||
1406 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
1407 | { | 1363 | { |
1408 | struct kgdb_state kgdb_var; | ||
1409 | struct kgdb_state *ks = &kgdb_var; | ||
1410 | unsigned long flags; | 1364 | unsigned long flags; |
1411 | int sstep_tries = 100; | 1365 | int sstep_tries = 100; |
1412 | int error = 0; | 1366 | int error = 0; |
1413 | int i, cpu; | 1367 | int i, cpu; |
1414 | |||
1415 | ks->cpu = raw_smp_processor_id(); | ||
1416 | ks->ex_vector = evector; | ||
1417 | ks->signo = signo; | ||
1418 | ks->ex_vector = evector; | ||
1419 | ks->err_code = ecode; | ||
1420 | ks->kgdb_usethreadid = 0; | ||
1421 | ks->linux_regs = regs; | ||
1422 | |||
1423 | if (kgdb_reenter_check(ks)) | ||
1424 | return 0; /* Ouch, double exception ! */ | ||
1425 | |||
1426 | acquirelock: | 1368 | acquirelock: |
1427 | /* | 1369 | /* |
1428 | * Interrupts will be restored by the 'trap return' code, except when | 1370 | * Interrupts will be restored by the 'trap return' code, except when |
@@ -1430,13 +1372,42 @@ acquirelock: | |||
1430 | */ | 1372 | */ |
1431 | local_irq_save(flags); | 1373 | local_irq_save(flags); |
1432 | 1374 | ||
1433 | cpu = raw_smp_processor_id(); | 1375 | cpu = ks->cpu; |
1376 | kgdb_info[cpu].debuggerinfo = regs; | ||
1377 | kgdb_info[cpu].task = current; | ||
1378 | /* | ||
1379 | * Make sure the above info reaches the primary CPU before | ||
1380 | * our cpu_in_kgdb[] flag setting does: | ||
1381 | */ | ||
1382 | smp_wmb(); | ||
1383 | atomic_set(&cpu_in_kgdb[cpu], 1); | ||
1434 | 1384 | ||
1435 | /* | 1385 | /* |
1436 | * Acquire the kgdb_active lock: | 1386 | * CPU will loop if it is a slave or request to become a kgdb |
1387 | * master cpu and acquire the kgdb_active lock: | ||
1437 | */ | 1388 | */ |
1438 | while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) | 1389 | while (1) { |
1390 | if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { | ||
1391 | if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) | ||
1392 | break; | ||
1393 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { | ||
1394 | if (!atomic_read(&passive_cpu_wait[cpu])) | ||
1395 | goto return_normal; | ||
1396 | } else { | ||
1397 | return_normal: | ||
1398 | /* Return to normal operation by executing any | ||
1399 | * hw breakpoint fixup. | ||
1400 | */ | ||
1401 | if (arch_kgdb_ops.correct_hw_break) | ||
1402 | arch_kgdb_ops.correct_hw_break(); | ||
1403 | atomic_set(&cpu_in_kgdb[cpu], 0); | ||
1404 | touch_softlockup_watchdog_sync(); | ||
1405 | clocksource_touch_watchdog(); | ||
1406 | local_irq_restore(flags); | ||
1407 | return 0; | ||
1408 | } | ||
1439 | cpu_relax(); | 1409 | cpu_relax(); |
1410 | } | ||
1440 | 1411 | ||
1441 | /* | 1412 | /* |
1442 | * For single stepping, try to only enter on the processor | 1413 | * For single stepping, try to only enter on the processor |
@@ -1470,9 +1441,6 @@ acquirelock: | |||
1470 | if (kgdb_io_ops->pre_exception) | 1441 | if (kgdb_io_ops->pre_exception) |
1471 | kgdb_io_ops->pre_exception(); | 1442 | kgdb_io_ops->pre_exception(); |
1472 | 1443 | ||
1473 | kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs; | ||
1474 | kgdb_info[ks->cpu].task = current; | ||
1475 | |||
1476 | kgdb_disable_hw_debug(ks->linux_regs); | 1444 | kgdb_disable_hw_debug(ks->linux_regs); |
1477 | 1445 | ||
1478 | /* | 1446 | /* |
@@ -1484,12 +1452,6 @@ acquirelock: | |||
1484 | atomic_set(&passive_cpu_wait[i], 1); | 1452 | atomic_set(&passive_cpu_wait[i], 1); |
1485 | } | 1453 | } |
1486 | 1454 | ||
1487 | /* | ||
1488 | * spin_lock code is good enough as a barrier so we don't | ||
1489 | * need one here: | ||
1490 | */ | ||
1491 | atomic_set(&cpu_in_kgdb[ks->cpu], 1); | ||
1492 | |||
1493 | #ifdef CONFIG_SMP | 1455 | #ifdef CONFIG_SMP |
1494 | /* Signal the other CPUs to enter kgdb_wait() */ | 1456 | /* Signal the other CPUs to enter kgdb_wait() */ |
1495 | if ((!kgdb_single_step) && kgdb_do_roundup) | 1457 | if ((!kgdb_single_step) && kgdb_do_roundup) |
@@ -1521,8 +1483,6 @@ acquirelock: | |||
1521 | if (kgdb_io_ops->post_exception) | 1483 | if (kgdb_io_ops->post_exception) |
1522 | kgdb_io_ops->post_exception(); | 1484 | kgdb_io_ops->post_exception(); |
1523 | 1485 | ||
1524 | kgdb_info[ks->cpu].debuggerinfo = NULL; | ||
1525 | kgdb_info[ks->cpu].task = NULL; | ||
1526 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1486 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1527 | 1487 | ||
1528 | if (!kgdb_single_step) { | 1488 | if (!kgdb_single_step) { |
@@ -1555,13 +1515,52 @@ kgdb_restore: | |||
1555 | return error; | 1515 | return error; |
1556 | } | 1516 | } |
1557 | 1517 | ||
1518 | /* | ||
1519 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
1520 | * | ||
1521 | * Locking hierarchy: | ||
1522 | * interface locks, if any (begin_session) | ||
1523 | * kgdb lock (kgdb_active) | ||
1524 | */ | ||
1525 | int | ||
1526 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
1527 | { | ||
1528 | struct kgdb_state kgdb_var; | ||
1529 | struct kgdb_state *ks = &kgdb_var; | ||
1530 | int ret; | ||
1531 | |||
1532 | ks->cpu = raw_smp_processor_id(); | ||
1533 | ks->ex_vector = evector; | ||
1534 | ks->signo = signo; | ||
1535 | ks->ex_vector = evector; | ||
1536 | ks->err_code = ecode; | ||
1537 | ks->kgdb_usethreadid = 0; | ||
1538 | ks->linux_regs = regs; | ||
1539 | |||
1540 | if (kgdb_reenter_check(ks)) | ||
1541 | return 0; /* Ouch, double exception ! */ | ||
1542 | kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; | ||
1543 | ret = kgdb_cpu_enter(ks, regs); | ||
1544 | kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER; | ||
1545 | return ret; | ||
1546 | } | ||
1547 | |||
1558 | int kgdb_nmicallback(int cpu, void *regs) | 1548 | int kgdb_nmicallback(int cpu, void *regs) |
1559 | { | 1549 | { |
1560 | #ifdef CONFIG_SMP | 1550 | #ifdef CONFIG_SMP |
1551 | struct kgdb_state kgdb_var; | ||
1552 | struct kgdb_state *ks = &kgdb_var; | ||
1553 | |||
1554 | memset(ks, 0, sizeof(struct kgdb_state)); | ||
1555 | ks->cpu = cpu; | ||
1556 | ks->linux_regs = regs; | ||
1557 | |||
1561 | if (!atomic_read(&cpu_in_kgdb[cpu]) && | 1558 | if (!atomic_read(&cpu_in_kgdb[cpu]) && |
1562 | atomic_read(&kgdb_active) != cpu && | 1559 | atomic_read(&kgdb_active) != -1 && |
1563 | atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { | 1560 | atomic_read(&kgdb_active) != cpu) { |
1564 | kgdb_wait((struct pt_regs *)regs); | 1561 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; |
1562 | kgdb_cpu_enter(ks, regs); | ||
1563 | kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; | ||
1565 | return 0; | 1564 | return 0; |
1566 | } | 1565 | } |
1567 | #endif | 1566 | #endif |