diff options
Diffstat (limited to 'kernel/kgdb.c')
-rw-r--r-- | kernel/kgdb.c | 264 |
1 files changed, 146 insertions, 118 deletions
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 9147a3190c9d..11f3515ca83f 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -69,9 +69,16 @@ struct kgdb_state { | |||
69 | struct pt_regs *linux_regs; | 69 | struct pt_regs *linux_regs; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Exception state values */ | ||
73 | #define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */ | ||
74 | #define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */ | ||
75 | #define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */ | ||
76 | #define DCPU_SSTEP 0x8 /* CPU is single stepping */ | ||
77 | |||
72 | static struct debuggerinfo_struct { | 78 | static struct debuggerinfo_struct { |
73 | void *debuggerinfo; | 79 | void *debuggerinfo; |
74 | struct task_struct *task; | 80 | struct task_struct *task; |
81 | int exception_state; | ||
75 | } kgdb_info[NR_CPUS]; | 82 | } kgdb_info[NR_CPUS]; |
76 | 83 | ||
77 | /** | 84 | /** |
@@ -129,6 +136,7 @@ struct task_struct *kgdb_usethread; | |||
129 | struct task_struct *kgdb_contthread; | 136 | struct task_struct *kgdb_contthread; |
130 | 137 | ||
131 | int kgdb_single_step; | 138 | int kgdb_single_step; |
139 | pid_t kgdb_sstep_pid; | ||
132 | 140 | ||
133 | /* Our I/O buffers. */ | 141 | /* Our I/O buffers. */ |
134 | static char remcom_in_buffer[BUFMAX]; | 142 | static char remcom_in_buffer[BUFMAX]; |
@@ -390,27 +398,22 @@ int kgdb_mem2hex(char *mem, char *buf, int count) | |||
390 | 398 | ||
391 | /* | 399 | /* |
392 | * Copy the binary array pointed to by buf into mem. Fix $, #, and | 400 | * Copy the binary array pointed to by buf into mem. Fix $, #, and |
393 | * 0x7d escaped with 0x7d. Return a pointer to the character after | 401 | * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success. |
394 | * the last byte written. | 402 | * The input buf is overwitten with the result to write to mem. |
395 | */ | 403 | */ |
396 | static int kgdb_ebin2mem(char *buf, char *mem, int count) | 404 | static int kgdb_ebin2mem(char *buf, char *mem, int count) |
397 | { | 405 | { |
398 | int err = 0; | 406 | int size = 0; |
399 | char c; | 407 | char *c = buf; |
400 | 408 | ||
401 | while (count-- > 0) { | 409 | while (count-- > 0) { |
402 | c = *buf++; | 410 | c[size] = *buf++; |
403 | if (c == 0x7d) | 411 | if (c[size] == 0x7d) |
404 | c = *buf++ ^ 0x20; | 412 | c[size] = *buf++ ^ 0x20; |
405 | 413 | size++; | |
406 | err = probe_kernel_write(mem, &c, 1); | ||
407 | if (err) | ||
408 | break; | ||
409 | |||
410 | mem++; | ||
411 | } | 414 | } |
412 | 415 | ||
413 | return err; | 416 | return probe_kernel_write(mem, c, size); |
414 | } | 417 | } |
415 | 418 | ||
416 | /* | 419 | /* |
@@ -541,12 +544,17 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid) | |||
541 | */ | 544 | */ |
542 | if (tid == 0 || tid == -1) | 545 | if (tid == 0 || tid == -1) |
543 | tid = -atomic_read(&kgdb_active) - 2; | 546 | tid = -atomic_read(&kgdb_active) - 2; |
544 | if (tid < 0) { | 547 | if (tid < -1 && tid > -NR_CPUS - 2) { |
545 | if (kgdb_info[-tid - 2].task) | 548 | if (kgdb_info[-tid - 2].task) |
546 | return kgdb_info[-tid - 2].task; | 549 | return kgdb_info[-tid - 2].task; |
547 | else | 550 | else |
548 | return idle_task(-tid - 2); | 551 | return idle_task(-tid - 2); |
549 | } | 552 | } |
553 | if (tid <= 0) { | ||
554 | printk(KERN_ERR "KGDB: Internal thread select error\n"); | ||
555 | dump_stack(); | ||
556 | return NULL; | ||
557 | } | ||
550 | 558 | ||
551 | /* | 559 | /* |
552 | * find_task_by_pid_ns() does not take the tasklist lock anymore | 560 | * find_task_by_pid_ns() does not take the tasklist lock anymore |
@@ -557,46 +565,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid) | |||
557 | } | 565 | } |
558 | 566 | ||
559 | /* | 567 | /* |
560 | * CPU debug state control: | ||
561 | */ | ||
562 | |||
563 | #ifdef CONFIG_SMP | ||
564 | static void kgdb_wait(struct pt_regs *regs) | ||
565 | { | ||
566 | unsigned long flags; | ||
567 | int cpu; | ||
568 | |||
569 | local_irq_save(flags); | ||
570 | cpu = raw_smp_processor_id(); | ||
571 | kgdb_info[cpu].debuggerinfo = regs; | ||
572 | kgdb_info[cpu].task = current; | ||
573 | /* | ||
574 | * Make sure the above info reaches the primary CPU before | ||
575 | * our cpu_in_kgdb[] flag setting does: | ||
576 | */ | ||
577 | smp_wmb(); | ||
578 | atomic_set(&cpu_in_kgdb[cpu], 1); | ||
579 | |||
580 | /* Wait till primary CPU is done with debugging */ | ||
581 | while (atomic_read(&passive_cpu_wait[cpu])) | ||
582 | cpu_relax(); | ||
583 | |||
584 | kgdb_info[cpu].debuggerinfo = NULL; | ||
585 | kgdb_info[cpu].task = NULL; | ||
586 | |||
587 | /* fix up hardware debug registers on local cpu */ | ||
588 | if (arch_kgdb_ops.correct_hw_break) | ||
589 | arch_kgdb_ops.correct_hw_break(); | ||
590 | |||
591 | /* Signal the primary CPU that we are done: */ | ||
592 | atomic_set(&cpu_in_kgdb[cpu], 0); | ||
593 | touch_softlockup_watchdog(); | ||
594 | clocksource_touch_watchdog(); | ||
595 | local_irq_restore(flags); | ||
596 | } | ||
597 | #endif | ||
598 | |||
599 | /* | ||
600 | * Some architectures need cache flushes when we set/clear a | 568 | * Some architectures need cache flushes when we set/clear a |
601 | * breakpoint: | 569 | * breakpoint: |
602 | */ | 570 | */ |
@@ -619,7 +587,8 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) | |||
619 | static int kgdb_activate_sw_breakpoints(void) | 587 | static int kgdb_activate_sw_breakpoints(void) |
620 | { | 588 | { |
621 | unsigned long addr; | 589 | unsigned long addr; |
622 | int error = 0; | 590 | int error; |
591 | int ret = 0; | ||
623 | int i; | 592 | int i; |
624 | 593 | ||
625 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 594 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
@@ -629,13 +598,16 @@ static int kgdb_activate_sw_breakpoints(void) | |||
629 | addr = kgdb_break[i].bpt_addr; | 598 | addr = kgdb_break[i].bpt_addr; |
630 | error = kgdb_arch_set_breakpoint(addr, | 599 | error = kgdb_arch_set_breakpoint(addr, |
631 | kgdb_break[i].saved_instr); | 600 | kgdb_break[i].saved_instr); |
632 | if (error) | 601 | if (error) { |
633 | return error; | 602 | ret = error; |
603 | printk(KERN_INFO "KGDB: BP install failed: %lx", addr); | ||
604 | continue; | ||
605 | } | ||
634 | 606 | ||
635 | kgdb_flush_swbreak_addr(addr); | 607 | kgdb_flush_swbreak_addr(addr); |
636 | kgdb_break[i].state = BP_ACTIVE; | 608 | kgdb_break[i].state = BP_ACTIVE; |
637 | } | 609 | } |
638 | return 0; | 610 | return ret; |
639 | } | 611 | } |
640 | 612 | ||
641 | static int kgdb_set_sw_break(unsigned long addr) | 613 | static int kgdb_set_sw_break(unsigned long addr) |
@@ -682,7 +654,8 @@ static int kgdb_set_sw_break(unsigned long addr) | |||
682 | static int kgdb_deactivate_sw_breakpoints(void) | 654 | static int kgdb_deactivate_sw_breakpoints(void) |
683 | { | 655 | { |
684 | unsigned long addr; | 656 | unsigned long addr; |
685 | int error = 0; | 657 | int error; |
658 | int ret = 0; | ||
686 | int i; | 659 | int i; |
687 | 660 | ||
688 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 661 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
@@ -691,13 +664,15 @@ static int kgdb_deactivate_sw_breakpoints(void) | |||
691 | addr = kgdb_break[i].bpt_addr; | 664 | addr = kgdb_break[i].bpt_addr; |
692 | error = kgdb_arch_remove_breakpoint(addr, | 665 | error = kgdb_arch_remove_breakpoint(addr, |
693 | kgdb_break[i].saved_instr); | 666 | kgdb_break[i].saved_instr); |
694 | if (error) | 667 | if (error) { |
695 | return error; | 668 | printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr); |
669 | ret = error; | ||
670 | } | ||
696 | 671 | ||
697 | kgdb_flush_swbreak_addr(addr); | 672 | kgdb_flush_swbreak_addr(addr); |
698 | kgdb_break[i].state = BP_SET; | 673 | kgdb_break[i].state = BP_SET; |
699 | } | 674 | } |
700 | return 0; | 675 | return ret; |
701 | } | 676 | } |
702 | 677 | ||
703 | static int kgdb_remove_sw_break(unsigned long addr) | 678 | static int kgdb_remove_sw_break(unsigned long addr) |
@@ -870,7 +845,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks) | |||
870 | 845 | ||
871 | /* | 846 | /* |
872 | * All threads that don't have debuggerinfo should be | 847 | * All threads that don't have debuggerinfo should be |
873 | * in __schedule() sleeping, since all other CPUs | 848 | * in schedule() sleeping, since all other CPUs |
874 | * are in kgdb_wait, and thus have debuggerinfo. | 849 | * are in kgdb_wait, and thus have debuggerinfo. |
875 | */ | 850 | */ |
876 | if (local_debuggerinfo) { | 851 | if (local_debuggerinfo) { |
@@ -1204,8 +1179,10 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks) | |||
1204 | return 1; | 1179 | return 1; |
1205 | 1180 | ||
1206 | } else { | 1181 | } else { |
1207 | error_packet(remcom_out_buffer, -EINVAL); | 1182 | kgdb_msg_write("KGDB only knows signal 9 (pass)" |
1208 | return 0; | 1183 | " and 15 (pass and disconnect)\n" |
1184 | "Executing a continue without signal passing\n", 0); | ||
1185 | remcom_in_buffer[0] = 'c'; | ||
1209 | } | 1186 | } |
1210 | 1187 | ||
1211 | /* Indicate fall through */ | 1188 | /* Indicate fall through */ |
@@ -1382,33 +1359,13 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
1382 | return 1; | 1359 | return 1; |
1383 | } | 1360 | } |
1384 | 1361 | ||
1385 | /* | 1362 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) |
1386 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
1387 | * | ||
1388 | * Locking hierarchy: | ||
1389 | * interface locks, if any (begin_session) | ||
1390 | * kgdb lock (kgdb_active) | ||
1391 | */ | ||
1392 | int | ||
1393 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
1394 | { | 1363 | { |
1395 | struct kgdb_state kgdb_var; | ||
1396 | struct kgdb_state *ks = &kgdb_var; | ||
1397 | unsigned long flags; | 1364 | unsigned long flags; |
1365 | int sstep_tries = 100; | ||
1398 | int error = 0; | 1366 | int error = 0; |
1399 | int i, cpu; | 1367 | int i, cpu; |
1400 | 1368 | int trace_on = 0; | |
1401 | ks->cpu = raw_smp_processor_id(); | ||
1402 | ks->ex_vector = evector; | ||
1403 | ks->signo = signo; | ||
1404 | ks->ex_vector = evector; | ||
1405 | ks->err_code = ecode; | ||
1406 | ks->kgdb_usethreadid = 0; | ||
1407 | ks->linux_regs = regs; | ||
1408 | |||
1409 | if (kgdb_reenter_check(ks)) | ||
1410 | return 0; /* Ouch, double exception ! */ | ||
1411 | |||
1412 | acquirelock: | 1369 | acquirelock: |
1413 | /* | 1370 | /* |
1414 | * Interrupts will be restored by the 'trap return' code, except when | 1371 | * Interrupts will be restored by the 'trap return' code, except when |
@@ -1416,24 +1373,55 @@ acquirelock: | |||
1416 | */ | 1373 | */ |
1417 | local_irq_save(flags); | 1374 | local_irq_save(flags); |
1418 | 1375 | ||
1419 | cpu = raw_smp_processor_id(); | 1376 | cpu = ks->cpu; |
1377 | kgdb_info[cpu].debuggerinfo = regs; | ||
1378 | kgdb_info[cpu].task = current; | ||
1379 | /* | ||
1380 | * Make sure the above info reaches the primary CPU before | ||
1381 | * our cpu_in_kgdb[] flag setting does: | ||
1382 | */ | ||
1383 | atomic_inc(&cpu_in_kgdb[cpu]); | ||
1420 | 1384 | ||
1421 | /* | 1385 | /* |
1422 | * Acquire the kgdb_active lock: | 1386 | * CPU will loop if it is a slave or request to become a kgdb |
1387 | * master cpu and acquire the kgdb_active lock: | ||
1423 | */ | 1388 | */ |
1424 | while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) | 1389 | while (1) { |
1390 | if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { | ||
1391 | if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) | ||
1392 | break; | ||
1393 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { | ||
1394 | if (!atomic_read(&passive_cpu_wait[cpu])) | ||
1395 | goto return_normal; | ||
1396 | } else { | ||
1397 | return_normal: | ||
1398 | /* Return to normal operation by executing any | ||
1399 | * hw breakpoint fixup. | ||
1400 | */ | ||
1401 | if (arch_kgdb_ops.correct_hw_break) | ||
1402 | arch_kgdb_ops.correct_hw_break(); | ||
1403 | if (trace_on) | ||
1404 | tracing_on(); | ||
1405 | atomic_dec(&cpu_in_kgdb[cpu]); | ||
1406 | touch_softlockup_watchdog_sync(); | ||
1407 | clocksource_touch_watchdog(); | ||
1408 | local_irq_restore(flags); | ||
1409 | return 0; | ||
1410 | } | ||
1425 | cpu_relax(); | 1411 | cpu_relax(); |
1412 | } | ||
1426 | 1413 | ||
1427 | /* | 1414 | /* |
1428 | * Do not start the debugger connection on this CPU if the last | 1415 | * For single stepping, try to only enter on the processor |
1429 | * instance of the exception handler wanted to come into the | 1416 | * that was single stepping. To gaurd against a deadlock, the |
1430 | * debugger on a different CPU via a single step | 1417 | * kernel will only try for the value of sstep_tries before |
1418 | * giving up and continuing on. | ||
1431 | */ | 1419 | */ |
1432 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && | 1420 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && |
1433 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { | 1421 | (kgdb_info[cpu].task && |
1434 | 1422 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | |
1435 | atomic_set(&kgdb_active, -1); | 1423 | atomic_set(&kgdb_active, -1); |
1436 | touch_softlockup_watchdog(); | 1424 | touch_softlockup_watchdog_sync(); |
1437 | clocksource_touch_watchdog(); | 1425 | clocksource_touch_watchdog(); |
1438 | local_irq_restore(flags); | 1426 | local_irq_restore(flags); |
1439 | 1427 | ||
@@ -1455,9 +1443,6 @@ acquirelock: | |||
1455 | if (kgdb_io_ops->pre_exception) | 1443 | if (kgdb_io_ops->pre_exception) |
1456 | kgdb_io_ops->pre_exception(); | 1444 | kgdb_io_ops->pre_exception(); |
1457 | 1445 | ||
1458 | kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs; | ||
1459 | kgdb_info[ks->cpu].task = current; | ||
1460 | |||
1461 | kgdb_disable_hw_debug(ks->linux_regs); | 1446 | kgdb_disable_hw_debug(ks->linux_regs); |
1462 | 1447 | ||
1463 | /* | 1448 | /* |
@@ -1466,15 +1451,9 @@ acquirelock: | |||
1466 | */ | 1451 | */ |
1467 | if (!kgdb_single_step) { | 1452 | if (!kgdb_single_step) { |
1468 | for (i = 0; i < NR_CPUS; i++) | 1453 | for (i = 0; i < NR_CPUS; i++) |
1469 | atomic_set(&passive_cpu_wait[i], 1); | 1454 | atomic_inc(&passive_cpu_wait[i]); |
1470 | } | 1455 | } |
1471 | 1456 | ||
1472 | /* | ||
1473 | * spin_lock code is good enough as a barrier so we don't | ||
1474 | * need one here: | ||
1475 | */ | ||
1476 | atomic_set(&cpu_in_kgdb[ks->cpu], 1); | ||
1477 | |||
1478 | #ifdef CONFIG_SMP | 1457 | #ifdef CONFIG_SMP |
1479 | /* Signal the other CPUs to enter kgdb_wait() */ | 1458 | /* Signal the other CPUs to enter kgdb_wait() */ |
1480 | if ((!kgdb_single_step) && kgdb_do_roundup) | 1459 | if ((!kgdb_single_step) && kgdb_do_roundup) |
@@ -1498,6 +1477,9 @@ acquirelock: | |||
1498 | kgdb_single_step = 0; | 1477 | kgdb_single_step = 0; |
1499 | kgdb_contthread = current; | 1478 | kgdb_contthread = current; |
1500 | exception_level = 0; | 1479 | exception_level = 0; |
1480 | trace_on = tracing_is_on(); | ||
1481 | if (trace_on) | ||
1482 | tracing_off(); | ||
1501 | 1483 | ||
1502 | /* Talk to debugger with gdbserial protocol */ | 1484 | /* Talk to debugger with gdbserial protocol */ |
1503 | error = gdb_serial_stub(ks); | 1485 | error = gdb_serial_stub(ks); |
@@ -1506,13 +1488,11 @@ acquirelock: | |||
1506 | if (kgdb_io_ops->post_exception) | 1488 | if (kgdb_io_ops->post_exception) |
1507 | kgdb_io_ops->post_exception(); | 1489 | kgdb_io_ops->post_exception(); |
1508 | 1490 | ||
1509 | kgdb_info[ks->cpu].debuggerinfo = NULL; | 1491 | atomic_dec(&cpu_in_kgdb[ks->cpu]); |
1510 | kgdb_info[ks->cpu].task = NULL; | ||
1511 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | ||
1512 | 1492 | ||
1513 | if (!kgdb_single_step) { | 1493 | if (!kgdb_single_step) { |
1514 | for (i = NR_CPUS-1; i >= 0; i--) | 1494 | for (i = NR_CPUS-1; i >= 0; i--) |
1515 | atomic_set(&passive_cpu_wait[i], 0); | 1495 | atomic_dec(&passive_cpu_wait[i]); |
1516 | /* | 1496 | /* |
1517 | * Wait till all the CPUs have quit | 1497 | * Wait till all the CPUs have quit |
1518 | * from the debugger. | 1498 | * from the debugger. |
@@ -1524,22 +1504,70 @@ acquirelock: | |||
1524 | } | 1504 | } |
1525 | 1505 | ||
1526 | kgdb_restore: | 1506 | kgdb_restore: |
1507 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | ||
1508 | int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); | ||
1509 | if (kgdb_info[sstep_cpu].task) | ||
1510 | kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; | ||
1511 | else | ||
1512 | kgdb_sstep_pid = 0; | ||
1513 | } | ||
1514 | if (trace_on) | ||
1515 | tracing_on(); | ||
1527 | /* Free kgdb_active */ | 1516 | /* Free kgdb_active */ |
1528 | atomic_set(&kgdb_active, -1); | 1517 | atomic_set(&kgdb_active, -1); |
1529 | touch_softlockup_watchdog(); | 1518 | touch_softlockup_watchdog_sync(); |
1530 | clocksource_touch_watchdog(); | 1519 | clocksource_touch_watchdog(); |
1531 | local_irq_restore(flags); | 1520 | local_irq_restore(flags); |
1532 | 1521 | ||
1533 | return error; | 1522 | return error; |
1534 | } | 1523 | } |
1535 | 1524 | ||
1525 | /* | ||
1526 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
1527 | * | ||
1528 | * Locking hierarchy: | ||
1529 | * interface locks, if any (begin_session) | ||
1530 | * kgdb lock (kgdb_active) | ||
1531 | */ | ||
1532 | int | ||
1533 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
1534 | { | ||
1535 | struct kgdb_state kgdb_var; | ||
1536 | struct kgdb_state *ks = &kgdb_var; | ||
1537 | int ret; | ||
1538 | |||
1539 | ks->cpu = raw_smp_processor_id(); | ||
1540 | ks->ex_vector = evector; | ||
1541 | ks->signo = signo; | ||
1542 | ks->ex_vector = evector; | ||
1543 | ks->err_code = ecode; | ||
1544 | ks->kgdb_usethreadid = 0; | ||
1545 | ks->linux_regs = regs; | ||
1546 | |||
1547 | if (kgdb_reenter_check(ks)) | ||
1548 | return 0; /* Ouch, double exception ! */ | ||
1549 | kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; | ||
1550 | ret = kgdb_cpu_enter(ks, regs); | ||
1551 | kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER; | ||
1552 | return ret; | ||
1553 | } | ||
1554 | |||
1536 | int kgdb_nmicallback(int cpu, void *regs) | 1555 | int kgdb_nmicallback(int cpu, void *regs) |
1537 | { | 1556 | { |
1538 | #ifdef CONFIG_SMP | 1557 | #ifdef CONFIG_SMP |
1558 | struct kgdb_state kgdb_var; | ||
1559 | struct kgdb_state *ks = &kgdb_var; | ||
1560 | |||
1561 | memset(ks, 0, sizeof(struct kgdb_state)); | ||
1562 | ks->cpu = cpu; | ||
1563 | ks->linux_regs = regs; | ||
1564 | |||
1539 | if (!atomic_read(&cpu_in_kgdb[cpu]) && | 1565 | if (!atomic_read(&cpu_in_kgdb[cpu]) && |
1540 | atomic_read(&kgdb_active) != cpu && | 1566 | atomic_read(&kgdb_active) != -1 && |
1541 | atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { | 1567 | atomic_read(&kgdb_active) != cpu) { |
1542 | kgdb_wait((struct pt_regs *)regs); | 1568 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; |
1569 | kgdb_cpu_enter(ks, regs); | ||
1570 | kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; | ||
1543 | return 0; | 1571 | return 0; |
1544 | } | 1572 | } |
1545 | #endif | 1573 | #endif |
@@ -1715,11 +1743,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | |||
1715 | */ | 1743 | */ |
1716 | void kgdb_breakpoint(void) | 1744 | void kgdb_breakpoint(void) |
1717 | { | 1745 | { |
1718 | atomic_set(&kgdb_setting_breakpoint, 1); | 1746 | atomic_inc(&kgdb_setting_breakpoint); |
1719 | wmb(); /* Sync point before breakpoint */ | 1747 | wmb(); /* Sync point before breakpoint */ |
1720 | arch_kgdb_breakpoint(); | 1748 | arch_kgdb_breakpoint(); |
1721 | wmb(); /* Sync point after breakpoint */ | 1749 | wmb(); /* Sync point after breakpoint */ |
1722 | atomic_set(&kgdb_setting_breakpoint, 0); | 1750 | atomic_dec(&kgdb_setting_breakpoint); |
1723 | } | 1751 | } |
1724 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); | 1752 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); |
1725 | 1753 | ||