diff options
Diffstat (limited to 'kernel/debug/debug_core.c')
| -rw-r--r-- | kernel/debug/debug_core.c | 983 |
1 files changed, 983 insertions, 0 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c new file mode 100644 index 000000000000..8bc5eeffec8a --- /dev/null +++ b/kernel/debug/debug_core.c | |||
| @@ -0,0 +1,983 @@ | |||
| 1 | /* | ||
| 2 | * Kernel Debug Core | ||
| 3 | * | ||
| 4 | * Maintainer: Jason Wessel <jason.wessel@windriver.com> | ||
| 5 | * | ||
| 6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | ||
| 7 | * Copyright (C) 2002-2004 Timesys Corporation | ||
| 8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> | ||
| 9 | * Copyright (C) 2004 Pavel Machek <pavel@suse.cz> | ||
| 10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> | ||
| 11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. | ||
| 12 | * Copyright (C) 2005-2009 Wind River Systems, Inc. | ||
| 13 | * Copyright (C) 2007 MontaVista Software, Inc. | ||
| 14 | * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
| 15 | * | ||
| 16 | * Contributors at various stages not listed above: | ||
| 17 | * Jason Wessel ( jason.wessel@windriver.com ) | ||
| 18 | * George Anzinger <george@mvista.com> | ||
| 19 | * Anurekh Saxena (anurekh.saxena@timesys.com) | ||
| 20 | * Lake Stevens Instrument Division (Glenn Engel) | ||
| 21 | * Jim Kingdon, Cygnus Support. | ||
| 22 | * | ||
| 23 | * Original KGDB stub: David Grothe <dave@gcom.com>, | ||
| 24 | * Tigran Aivazian <tigran@sco.com> | ||
| 25 | * | ||
| 26 | * This file is licensed under the terms of the GNU General Public License | ||
| 27 | * version 2. This program is licensed "as is" without any warranty of any | ||
| 28 | * kind, whether express or implied. | ||
| 29 | */ | ||
| 30 | #include <linux/pid_namespace.h> | ||
| 31 | #include <linux/clocksource.h> | ||
| 32 | #include <linux/interrupt.h> | ||
| 33 | #include <linux/spinlock.h> | ||
| 34 | #include <linux/console.h> | ||
| 35 | #include <linux/threads.h> | ||
| 36 | #include <linux/uaccess.h> | ||
| 37 | #include <linux/kernel.h> | ||
| 38 | #include <linux/module.h> | ||
| 39 | #include <linux/ptrace.h> | ||
| 40 | #include <linux/string.h> | ||
| 41 | #include <linux/delay.h> | ||
| 42 | #include <linux/sched.h> | ||
| 43 | #include <linux/sysrq.h> | ||
| 44 | #include <linux/init.h> | ||
| 45 | #include <linux/kgdb.h> | ||
| 46 | #include <linux/kdb.h> | ||
| 47 | #include <linux/pid.h> | ||
| 48 | #include <linux/smp.h> | ||
| 49 | #include <linux/mm.h> | ||
| 50 | |||
| 51 | #include <asm/cacheflush.h> | ||
| 52 | #include <asm/byteorder.h> | ||
| 53 | #include <asm/atomic.h> | ||
| 54 | #include <asm/system.h> | ||
| 55 | |||
| 56 | #include "debug_core.h" | ||
| 57 | |||
| 58 | static int kgdb_break_asap; | ||
| 59 | |||
| 60 | struct debuggerinfo_struct kgdb_info[NR_CPUS]; | ||
| 61 | |||
| 62 | /** | ||
| 63 | * kgdb_connected - Is a host GDB connected to us? | ||
| 64 | */ | ||
| 65 | int kgdb_connected; | ||
| 66 | EXPORT_SYMBOL_GPL(kgdb_connected); | ||
| 67 | |||
| 68 | /* All the KGDB handlers are installed */ | ||
| 69 | int kgdb_io_module_registered; | ||
| 70 | |||
| 71 | /* Guard for recursive entry */ | ||
| 72 | static int exception_level; | ||
| 73 | |||
| 74 | struct kgdb_io *dbg_io_ops; | ||
| 75 | static DEFINE_SPINLOCK(kgdb_registration_lock); | ||
| 76 | |||
| 77 | /* kgdb console driver is loaded */ | ||
| 78 | static int kgdb_con_registered; | ||
| 79 | /* determine if kgdb console output should be used */ | ||
| 80 | static int kgdb_use_con; | ||
| 81 | /* Flag for alternate operations for early debugging */ | ||
| 82 | bool dbg_is_early = true; | ||
| 83 | /* Next cpu to become the master debug core */ | ||
| 84 | int dbg_switch_cpu; | ||
| 85 | |||
| 86 | /* Use kdb or gdbserver mode */ | ||
| 87 | int dbg_kdb_mode = 1; | ||
| 88 | |||
| 89 | static int __init opt_kgdb_con(char *str) | ||
| 90 | { | ||
| 91 | kgdb_use_con = 1; | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | early_param("kgdbcon", opt_kgdb_con); | ||
| 96 | |||
| 97 | module_param(kgdb_use_con, int, 0644); | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Holds information about breakpoints in a kernel. These breakpoints are | ||
| 101 | * added and removed by gdb. | ||
| 102 | */ | ||
| 103 | static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { | ||
| 104 | [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } | ||
| 105 | }; | ||
| 106 | |||
| 107 | /* | ||
| 108 | * The CPU# of the active CPU, or -1 if none: | ||
| 109 | */ | ||
| 110 | atomic_t kgdb_active = ATOMIC_INIT(-1); | ||
| 111 | EXPORT_SYMBOL_GPL(kgdb_active); | ||
| 112 | |||
| 113 | /* | ||
| 114 | * We use NR_CPUs not PERCPU, in case kgdb is used to debug early | ||
| 115 | * bootup code (which might not have percpu set up yet): | ||
| 116 | */ | ||
| 117 | static atomic_t passive_cpu_wait[NR_CPUS]; | ||
| 118 | static atomic_t cpu_in_kgdb[NR_CPUS]; | ||
| 119 | static atomic_t kgdb_break_tasklet_var; | ||
| 120 | atomic_t kgdb_setting_breakpoint; | ||
| 121 | |||
| 122 | struct task_struct *kgdb_usethread; | ||
| 123 | struct task_struct *kgdb_contthread; | ||
| 124 | |||
| 125 | int kgdb_single_step; | ||
| 126 | static pid_t kgdb_sstep_pid; | ||
| 127 | |||
| 128 | /* to keep track of the CPU which is doing the single stepping*/ | ||
| 129 | atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); | ||
| 130 | |||
| 131 | /* | ||
| 132 | * If you are debugging a problem where roundup (the collection of | ||
| 133 | * all other CPUs) is a problem [this should be extremely rare], | ||
| 134 | * then use the nokgdbroundup option to avoid roundup. In that case | ||
| 135 | * the other CPUs might interfere with your debugging context, so | ||
| 136 | * use this with care: | ||
| 137 | */ | ||
| 138 | static int kgdb_do_roundup = 1; | ||
| 139 | |||
| 140 | static int __init opt_nokgdbroundup(char *str) | ||
| 141 | { | ||
| 142 | kgdb_do_roundup = 0; | ||
| 143 | |||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | early_param("nokgdbroundup", opt_nokgdbroundup); | ||
| 148 | |||
| 149 | /* | ||
| 150 | * Finally, some KGDB code :-) | ||
| 151 | */ | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Weak aliases for breakpoint management, | ||
| 155 | * can be overriden by architectures when needed: | ||
| 156 | */ | ||
| 157 | int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) | ||
| 158 | { | ||
| 159 | int err; | ||
| 160 | |||
| 161 | err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); | ||
| 162 | if (err) | ||
| 163 | return err; | ||
| 164 | |||
| 165 | return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, | ||
| 166 | BREAK_INSTR_SIZE); | ||
| 167 | } | ||
| 168 | |||
| 169 | int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) | ||
| 170 | { | ||
| 171 | return probe_kernel_write((char *)addr, | ||
| 172 | (char *)bundle, BREAK_INSTR_SIZE); | ||
| 173 | } | ||
| 174 | |||
| 175 | int __weak kgdb_validate_break_address(unsigned long addr) | ||
| 176 | { | ||
| 177 | char tmp_variable[BREAK_INSTR_SIZE]; | ||
| 178 | int err; | ||
| 179 | /* Validate setting the breakpoint and then removing it. In the | ||
| 180 | * remove fails, the kernel needs to emit a bad message because we | ||
| 181 | * are deep trouble not being able to put things back the way we | ||
| 182 | * found them. | ||
| 183 | */ | ||
| 184 | err = kgdb_arch_set_breakpoint(addr, tmp_variable); | ||
| 185 | if (err) | ||
| 186 | return err; | ||
| 187 | err = kgdb_arch_remove_breakpoint(addr, tmp_variable); | ||
| 188 | if (err) | ||
| 189 | printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " | ||
| 190 | "memory destroyed at: %lx", addr); | ||
| 191 | return err; | ||
| 192 | } | ||
| 193 | |||
| 194 | unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) | ||
| 195 | { | ||
| 196 | return instruction_pointer(regs); | ||
| 197 | } | ||
| 198 | |||
| 199 | int __weak kgdb_arch_init(void) | ||
| 200 | { | ||
| 201 | return 0; | ||
| 202 | } | ||
| 203 | |||
| 204 | int __weak kgdb_skipexception(int exception, struct pt_regs *regs) | ||
| 205 | { | ||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | /** | ||
| 210 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | ||
| 211 | * @regs: Current &struct pt_regs. | ||
| 212 | * | ||
| 213 | * This function will be called if the particular architecture must | ||
| 214 | * disable hardware debugging while it is processing gdb packets or | ||
| 215 | * handling exception. | ||
| 216 | */ | ||
| 217 | void __weak kgdb_disable_hw_debug(struct pt_regs *regs) | ||
| 218 | { | ||
| 219 | } | ||
| 220 | |||
| 221 | /* | ||
| 222 | * Some architectures need cache flushes when we set/clear a | ||
| 223 | * breakpoint: | ||
| 224 | */ | ||
| 225 | static void kgdb_flush_swbreak_addr(unsigned long addr) | ||
| 226 | { | ||
| 227 | if (!CACHE_FLUSH_IS_SAFE) | ||
| 228 | return; | ||
| 229 | |||
| 230 | if (current->mm && current->mm->mmap_cache) { | ||
| 231 | flush_cache_range(current->mm->mmap_cache, | ||
| 232 | addr, addr + BREAK_INSTR_SIZE); | ||
| 233 | } | ||
| 234 | /* Force flush instruction cache if it was outside the mm */ | ||
| 235 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); | ||
| 236 | } | ||
| 237 | |||
| 238 | /* | ||
| 239 | * SW breakpoint management: | ||
| 240 | */ | ||
| 241 | int dbg_activate_sw_breakpoints(void) | ||
| 242 | { | ||
| 243 | unsigned long addr; | ||
| 244 | int error; | ||
| 245 | int ret = 0; | ||
| 246 | int i; | ||
| 247 | |||
| 248 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 249 | if (kgdb_break[i].state != BP_SET) | ||
| 250 | continue; | ||
| 251 | |||
| 252 | addr = kgdb_break[i].bpt_addr; | ||
| 253 | error = kgdb_arch_set_breakpoint(addr, | ||
| 254 | kgdb_break[i].saved_instr); | ||
| 255 | if (error) { | ||
| 256 | ret = error; | ||
| 257 | printk(KERN_INFO "KGDB: BP install failed: %lx", addr); | ||
| 258 | continue; | ||
| 259 | } | ||
| 260 | |||
| 261 | kgdb_flush_swbreak_addr(addr); | ||
| 262 | kgdb_break[i].state = BP_ACTIVE; | ||
| 263 | } | ||
| 264 | return ret; | ||
| 265 | } | ||
| 266 | |||
| 267 | int dbg_set_sw_break(unsigned long addr) | ||
| 268 | { | ||
| 269 | int err = kgdb_validate_break_address(addr); | ||
| 270 | int breakno = -1; | ||
| 271 | int i; | ||
| 272 | |||
| 273 | if (err) | ||
| 274 | return err; | ||
| 275 | |||
| 276 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 277 | if ((kgdb_break[i].state == BP_SET) && | ||
| 278 | (kgdb_break[i].bpt_addr == addr)) | ||
| 279 | return -EEXIST; | ||
| 280 | } | ||
| 281 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 282 | if (kgdb_break[i].state == BP_REMOVED && | ||
| 283 | kgdb_break[i].bpt_addr == addr) { | ||
| 284 | breakno = i; | ||
| 285 | break; | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | if (breakno == -1) { | ||
| 290 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 291 | if (kgdb_break[i].state == BP_UNDEFINED) { | ||
| 292 | breakno = i; | ||
| 293 | break; | ||
| 294 | } | ||
| 295 | } | ||
| 296 | } | ||
| 297 | |||
| 298 | if (breakno == -1) | ||
| 299 | return -E2BIG; | ||
| 300 | |||
| 301 | kgdb_break[breakno].state = BP_SET; | ||
| 302 | kgdb_break[breakno].type = BP_BREAKPOINT; | ||
| 303 | kgdb_break[breakno].bpt_addr = addr; | ||
| 304 | |||
| 305 | return 0; | ||
| 306 | } | ||
| 307 | |||
| 308 | int dbg_deactivate_sw_breakpoints(void) | ||
| 309 | { | ||
| 310 | unsigned long addr; | ||
| 311 | int error; | ||
| 312 | int ret = 0; | ||
| 313 | int i; | ||
| 314 | |||
| 315 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 316 | if (kgdb_break[i].state != BP_ACTIVE) | ||
| 317 | continue; | ||
| 318 | addr = kgdb_break[i].bpt_addr; | ||
| 319 | error = kgdb_arch_remove_breakpoint(addr, | ||
| 320 | kgdb_break[i].saved_instr); | ||
| 321 | if (error) { | ||
| 322 | printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr); | ||
| 323 | ret = error; | ||
| 324 | } | ||
| 325 | |||
| 326 | kgdb_flush_swbreak_addr(addr); | ||
| 327 | kgdb_break[i].state = BP_SET; | ||
| 328 | } | ||
| 329 | return ret; | ||
| 330 | } | ||
| 331 | |||
| 332 | int dbg_remove_sw_break(unsigned long addr) | ||
| 333 | { | ||
| 334 | int i; | ||
| 335 | |||
| 336 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 337 | if ((kgdb_break[i].state == BP_SET) && | ||
| 338 | (kgdb_break[i].bpt_addr == addr)) { | ||
| 339 | kgdb_break[i].state = BP_REMOVED; | ||
| 340 | return 0; | ||
| 341 | } | ||
| 342 | } | ||
| 343 | return -ENOENT; | ||
| 344 | } | ||
| 345 | |||
| 346 | int kgdb_isremovedbreak(unsigned long addr) | ||
| 347 | { | ||
| 348 | int i; | ||
| 349 | |||
| 350 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 351 | if ((kgdb_break[i].state == BP_REMOVED) && | ||
| 352 | (kgdb_break[i].bpt_addr == addr)) | ||
| 353 | return 1; | ||
| 354 | } | ||
| 355 | return 0; | ||
| 356 | } | ||
| 357 | |||
| 358 | int dbg_remove_all_break(void) | ||
| 359 | { | ||
| 360 | unsigned long addr; | ||
| 361 | int error; | ||
| 362 | int i; | ||
| 363 | |||
| 364 | /* Clear memory breakpoints. */ | ||
| 365 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | ||
| 366 | if (kgdb_break[i].state != BP_ACTIVE) | ||
| 367 | goto setundefined; | ||
| 368 | addr = kgdb_break[i].bpt_addr; | ||
| 369 | error = kgdb_arch_remove_breakpoint(addr, | ||
| 370 | kgdb_break[i].saved_instr); | ||
| 371 | if (error) | ||
| 372 | printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", | ||
| 373 | addr); | ||
| 374 | setundefined: | ||
| 375 | kgdb_break[i].state = BP_UNDEFINED; | ||
| 376 | } | ||
| 377 | |||
| 378 | /* Clear hardware breakpoints. */ | ||
| 379 | if (arch_kgdb_ops.remove_all_hw_break) | ||
| 380 | arch_kgdb_ops.remove_all_hw_break(); | ||
| 381 | |||
| 382 | return 0; | ||
| 383 | } | ||
| 384 | |||
| 385 | /* | ||
| 386 | * Return true if there is a valid kgdb I/O module. Also if no | ||
| 387 | * debugger is attached a message can be printed to the console about | ||
| 388 | * waiting for the debugger to attach. | ||
| 389 | * | ||
| 390 | * The print_wait argument is only to be true when called from inside | ||
| 391 | * the core kgdb_handle_exception, because it will wait for the | ||
| 392 | * debugger to attach. | ||
| 393 | */ | ||
| 394 | static int kgdb_io_ready(int print_wait) | ||
| 395 | { | ||
| 396 | if (!dbg_io_ops) | ||
| 397 | return 0; | ||
| 398 | if (kgdb_connected) | ||
| 399 | return 1; | ||
| 400 | if (atomic_read(&kgdb_setting_breakpoint)) | ||
| 401 | return 1; | ||
| 402 | if (print_wait) { | ||
| 403 | #ifdef CONFIG_KGDB_KDB | ||
| 404 | if (!dbg_kdb_mode) | ||
| 405 | printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); | ||
| 406 | #else | ||
| 407 | printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); | ||
| 408 | #endif | ||
| 409 | } | ||
| 410 | return 1; | ||
| 411 | } | ||
| 412 | |||
| 413 | static int kgdb_reenter_check(struct kgdb_state *ks) | ||
| 414 | { | ||
| 415 | unsigned long addr; | ||
| 416 | |||
| 417 | if (atomic_read(&kgdb_active) != raw_smp_processor_id()) | ||
| 418 | return 0; | ||
| 419 | |||
| 420 | /* Panic on recursive debugger calls: */ | ||
| 421 | exception_level++; | ||
| 422 | addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); | ||
| 423 | dbg_deactivate_sw_breakpoints(); | ||
| 424 | |||
| 425 | /* | ||
| 426 | * If the break point removed ok at the place exception | ||
| 427 | * occurred, try to recover and print a warning to the end | ||
| 428 | * user because the user planted a breakpoint in a place that | ||
| 429 | * KGDB needs in order to function. | ||
| 430 | */ | ||
| 431 | if (dbg_remove_sw_break(addr) == 0) { | ||
| 432 | exception_level = 0; | ||
| 433 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | ||
| 434 | dbg_activate_sw_breakpoints(); | ||
| 435 | printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", | ||
| 436 | addr); | ||
| 437 | WARN_ON_ONCE(1); | ||
| 438 | |||
| 439 | return 1; | ||
| 440 | } | ||
| 441 | dbg_remove_all_break(); | ||
| 442 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | ||
| 443 | |||
| 444 | if (exception_level > 1) { | ||
| 445 | dump_stack(); | ||
| 446 | panic("Recursive entry to debugger"); | ||
| 447 | } | ||
| 448 | |||
| 449 | printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); | ||
| 450 | #ifdef CONFIG_KGDB_KDB | ||
| 451 | /* Allow kdb to debug itself one level */ | ||
| 452 | return 0; | ||
| 453 | #endif | ||
| 454 | dump_stack(); | ||
| 455 | panic("Recursive entry to debugger"); | ||
| 456 | |||
| 457 | return 1; | ||
| 458 | } | ||
| 459 | |||
| 460 | static void dbg_cpu_switch(int cpu, int next_cpu) | ||
| 461 | { | ||
| 462 | /* Mark the cpu we are switching away from as a slave when it | ||
| 463 | * holds the kgdb_active token. This must be done so that the | ||
| 464 | * that all the cpus wait in for the debug core will not enter | ||
| 465 | * again as the master. */ | ||
| 466 | if (cpu == atomic_read(&kgdb_active)) { | ||
| 467 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; | ||
| 468 | kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER; | ||
| 469 | } | ||
| 470 | kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER; | ||
| 471 | } | ||
| 472 | |||
| 473 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) | ||
| 474 | { | ||
| 475 | unsigned long flags; | ||
| 476 | int sstep_tries = 100; | ||
| 477 | int error; | ||
| 478 | int i, cpu; | ||
| 479 | int trace_on = 0; | ||
| 480 | acquirelock: | ||
| 481 | /* | ||
| 482 | * Interrupts will be restored by the 'trap return' code, except when | ||
| 483 | * single stepping. | ||
| 484 | */ | ||
| 485 | local_irq_save(flags); | ||
| 486 | |||
| 487 | cpu = ks->cpu; | ||
| 488 | kgdb_info[cpu].debuggerinfo = regs; | ||
| 489 | kgdb_info[cpu].task = current; | ||
| 490 | kgdb_info[cpu].ret_state = 0; | ||
| 491 | kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; | ||
| 492 | /* | ||
| 493 | * Make sure the above info reaches the primary CPU before | ||
| 494 | * our cpu_in_kgdb[] flag setting does: | ||
| 495 | */ | ||
| 496 | atomic_inc(&cpu_in_kgdb[cpu]); | ||
| 497 | |||
| 498 | if (exception_level == 1) | ||
| 499 | goto cpu_master_loop; | ||
| 500 | |||
| 501 | /* | ||
| 502 | * CPU will loop if it is a slave or request to become a kgdb | ||
| 503 | * master cpu and acquire the kgdb_active lock: | ||
| 504 | */ | ||
| 505 | while (1) { | ||
| 506 | cpu_loop: | ||
| 507 | if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) { | ||
| 508 | kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; | ||
| 509 | goto cpu_master_loop; | ||
| 510 | } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { | ||
| 511 | if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) | ||
| 512 | break; | ||
| 513 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { | ||
| 514 | if (!atomic_read(&passive_cpu_wait[cpu])) | ||
| 515 | goto return_normal; | ||
| 516 | } else { | ||
| 517 | return_normal: | ||
| 518 | /* Return to normal operation by executing any | ||
| 519 | * hw breakpoint fixup. | ||
| 520 | */ | ||
| 521 | if (arch_kgdb_ops.correct_hw_break) | ||
| 522 | arch_kgdb_ops.correct_hw_break(); | ||
| 523 | if (trace_on) | ||
| 524 | tracing_on(); | ||
| 525 | atomic_dec(&cpu_in_kgdb[cpu]); | ||
| 526 | touch_softlockup_watchdog_sync(); | ||
| 527 | clocksource_touch_watchdog(); | ||
| 528 | local_irq_restore(flags); | ||
| 529 | return 0; | ||
| 530 | } | ||
| 531 | cpu_relax(); | ||
| 532 | } | ||
| 533 | |||
| 534 | /* | ||
| 535 | * For single stepping, try to only enter on the processor | ||
| 536 | * that was single stepping. To gaurd against a deadlock, the | ||
| 537 | * kernel will only try for the value of sstep_tries before | ||
| 538 | * giving up and continuing on. | ||
| 539 | */ | ||
| 540 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && | ||
| 541 | (kgdb_info[cpu].task && | ||
| 542 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | ||
| 543 | atomic_set(&kgdb_active, -1); | ||
| 544 | touch_softlockup_watchdog_sync(); | ||
| 545 | clocksource_touch_watchdog(); | ||
| 546 | local_irq_restore(flags); | ||
| 547 | |||
| 548 | goto acquirelock; | ||
| 549 | } | ||
| 550 | |||
| 551 | if (!kgdb_io_ready(1)) { | ||
| 552 | kgdb_info[cpu].ret_state = 1; | ||
| 553 | goto kgdb_restore; /* No I/O connection, resume the system */ | ||
| 554 | } | ||
| 555 | |||
| 556 | /* | ||
| 557 | * Don't enter if we have hit a removed breakpoint. | ||
| 558 | */ | ||
| 559 | if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) | ||
| 560 | goto kgdb_restore; | ||
| 561 | |||
| 562 | /* Call the I/O driver's pre_exception routine */ | ||
| 563 | if (dbg_io_ops->pre_exception) | ||
| 564 | dbg_io_ops->pre_exception(); | ||
| 565 | |||
| 566 | kgdb_disable_hw_debug(ks->linux_regs); | ||
| 567 | |||
| 568 | /* | ||
| 569 | * Get the passive CPU lock which will hold all the non-primary | ||
| 570 | * CPU in a spin state while the debugger is active | ||
| 571 | */ | ||
| 572 | if (!kgdb_single_step) { | ||
| 573 | for (i = 0; i < NR_CPUS; i++) | ||
| 574 | atomic_inc(&passive_cpu_wait[i]); | ||
| 575 | } | ||
| 576 | |||
| 577 | #ifdef CONFIG_SMP | ||
| 578 | /* Signal the other CPUs to enter kgdb_wait() */ | ||
| 579 | if ((!kgdb_single_step) && kgdb_do_roundup) | ||
| 580 | kgdb_roundup_cpus(flags); | ||
| 581 | #endif | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Wait for the other CPUs to be notified and be waiting for us: | ||
| 585 | */ | ||
| 586 | for_each_online_cpu(i) { | ||
| 587 | while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) | ||
| 588 | cpu_relax(); | ||
| 589 | } | ||
| 590 | |||
| 591 | /* | ||
| 592 | * At this point the primary processor is completely | ||
| 593 | * in the debugger and all secondary CPUs are quiescent | ||
| 594 | */ | ||
| 595 | dbg_deactivate_sw_breakpoints(); | ||
| 596 | kgdb_single_step = 0; | ||
| 597 | kgdb_contthread = current; | ||
| 598 | exception_level = 0; | ||
| 599 | trace_on = tracing_is_on(); | ||
| 600 | if (trace_on) | ||
| 601 | tracing_off(); | ||
| 602 | |||
| 603 | while (1) { | ||
| 604 | cpu_master_loop: | ||
| 605 | if (dbg_kdb_mode) { | ||
| 606 | kgdb_connected = 1; | ||
| 607 | error = kdb_stub(ks); | ||
| 608 | kgdb_connected = 0; | ||
| 609 | } else { | ||
| 610 | error = gdb_serial_stub(ks); | ||
| 611 | } | ||
| 612 | |||
| 613 | if (error == DBG_PASS_EVENT) { | ||
| 614 | dbg_kdb_mode = !dbg_kdb_mode; | ||
| 615 | } else if (error == DBG_SWITCH_CPU_EVENT) { | ||
| 616 | dbg_cpu_switch(cpu, dbg_switch_cpu); | ||
| 617 | goto cpu_loop; | ||
| 618 | } else { | ||
| 619 | kgdb_info[cpu].ret_state = error; | ||
| 620 | break; | ||
| 621 | } | ||
| 622 | } | ||
| 623 | |||
| 624 | /* Call the I/O driver's post_exception routine */ | ||
| 625 | if (dbg_io_ops->post_exception) | ||
| 626 | dbg_io_ops->post_exception(); | ||
| 627 | |||
| 628 | atomic_dec(&cpu_in_kgdb[ks->cpu]); | ||
| 629 | |||
| 630 | if (!kgdb_single_step) { | ||
| 631 | for (i = NR_CPUS-1; i >= 0; i--) | ||
| 632 | atomic_dec(&passive_cpu_wait[i]); | ||
| 633 | /* | ||
| 634 | * Wait till all the CPUs have quit from the debugger, | ||
| 635 | * but allow a CPU that hit an exception and is | ||
| 636 | * waiting to become the master to remain in the debug | ||
| 637 | * core. | ||
| 638 | */ | ||
| 639 | for_each_online_cpu(i) { | ||
| 640 | while (kgdb_do_roundup && | ||
| 641 | atomic_read(&cpu_in_kgdb[i]) && | ||
| 642 | !(kgdb_info[i].exception_state & | ||
| 643 | DCPU_WANT_MASTER)) | ||
| 644 | cpu_relax(); | ||
| 645 | } | ||
| 646 | } | ||
| 647 | |||
| 648 | kgdb_restore: | ||
| 649 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | ||
| 650 | int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); | ||
| 651 | if (kgdb_info[sstep_cpu].task) | ||
| 652 | kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; | ||
| 653 | else | ||
| 654 | kgdb_sstep_pid = 0; | ||
| 655 | } | ||
| 656 | if (trace_on) | ||
| 657 | tracing_on(); | ||
| 658 | /* Free kgdb_active */ | ||
| 659 | atomic_set(&kgdb_active, -1); | ||
| 660 | touch_softlockup_watchdog_sync(); | ||
| 661 | clocksource_touch_watchdog(); | ||
| 662 | local_irq_restore(flags); | ||
| 663 | |||
| 664 | return kgdb_info[cpu].ret_state; | ||
| 665 | } | ||
| 666 | |||
| 667 | /* | ||
| 668 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
| 669 | * | ||
| 670 | * Locking hierarchy: | ||
| 671 | * interface locks, if any (begin_session) | ||
| 672 | * kgdb lock (kgdb_active) | ||
| 673 | */ | ||
| 674 | int | ||
| 675 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
| 676 | { | ||
| 677 | struct kgdb_state kgdb_var; | ||
| 678 | struct kgdb_state *ks = &kgdb_var; | ||
| 679 | int ret; | ||
| 680 | |||
| 681 | ks->cpu = raw_smp_processor_id(); | ||
| 682 | ks->ex_vector = evector; | ||
| 683 | ks->signo = signo; | ||
| 684 | ks->err_code = ecode; | ||
| 685 | ks->kgdb_usethreadid = 0; | ||
| 686 | ks->linux_regs = regs; | ||
| 687 | |||
| 688 | if (kgdb_reenter_check(ks)) | ||
| 689 | return 0; /* Ouch, double exception ! */ | ||
| 690 | kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; | ||
| 691 | ret = kgdb_cpu_enter(ks, regs); | ||
| 692 | kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | | ||
| 693 | DCPU_IS_SLAVE); | ||
| 694 | return ret; | ||
| 695 | } | ||
| 696 | |||
| 697 | int kgdb_nmicallback(int cpu, void *regs) | ||
| 698 | { | ||
| 699 | #ifdef CONFIG_SMP | ||
| 700 | struct kgdb_state kgdb_var; | ||
| 701 | struct kgdb_state *ks = &kgdb_var; | ||
| 702 | |||
| 703 | memset(ks, 0, sizeof(struct kgdb_state)); | ||
| 704 | ks->cpu = cpu; | ||
| 705 | ks->linux_regs = regs; | ||
| 706 | |||
| 707 | if (!atomic_read(&cpu_in_kgdb[cpu]) && | ||
| 708 | atomic_read(&kgdb_active) != -1 && | ||
| 709 | atomic_read(&kgdb_active) != cpu) { | ||
| 710 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; | ||
| 711 | kgdb_cpu_enter(ks, regs); | ||
| 712 | kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; | ||
| 713 | return 0; | ||
| 714 | } | ||
| 715 | #endif | ||
| 716 | return 1; | ||
| 717 | } | ||
| 718 | |||
| 719 | static void kgdb_console_write(struct console *co, const char *s, | ||
| 720 | unsigned count) | ||
| 721 | { | ||
| 722 | unsigned long flags; | ||
| 723 | |||
| 724 | /* If we're debugging, or KGDB has not connected, don't try | ||
| 725 | * and print. */ | ||
| 726 | if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode) | ||
| 727 | return; | ||
| 728 | |||
| 729 | local_irq_save(flags); | ||
| 730 | gdbstub_msg_write(s, count); | ||
| 731 | local_irq_restore(flags); | ||
| 732 | } | ||
| 733 | |||
| 734 | static struct console kgdbcons = { | ||
| 735 | .name = "kgdb", | ||
| 736 | .write = kgdb_console_write, | ||
| 737 | .flags = CON_PRINTBUFFER | CON_ENABLED, | ||
| 738 | .index = -1, | ||
| 739 | }; | ||
| 740 | |||
| 741 | #ifdef CONFIG_MAGIC_SYSRQ | ||
| 742 | static void sysrq_handle_dbg(int key, struct tty_struct *tty) | ||
| 743 | { | ||
| 744 | if (!dbg_io_ops) { | ||
| 745 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); | ||
| 746 | return; | ||
| 747 | } | ||
| 748 | if (!kgdb_connected) { | ||
| 749 | #ifdef CONFIG_KGDB_KDB | ||
| 750 | if (!dbg_kdb_mode) | ||
| 751 | printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); | ||
| 752 | #else | ||
| 753 | printk(KERN_CRIT "Entering KGDB\n"); | ||
| 754 | #endif | ||
| 755 | } | ||
| 756 | |||
| 757 | kgdb_breakpoint(); | ||
| 758 | } | ||
| 759 | |||
| 760 | static struct sysrq_key_op sysrq_dbg_op = { | ||
| 761 | .handler = sysrq_handle_dbg, | ||
| 762 | .help_msg = "debug(G)", | ||
| 763 | .action_msg = "DEBUG", | ||
| 764 | }; | ||
| 765 | #endif | ||
| 766 | |||
| 767 | static int kgdb_panic_event(struct notifier_block *self, | ||
| 768 | unsigned long val, | ||
| 769 | void *data) | ||
| 770 | { | ||
| 771 | if (dbg_kdb_mode) | ||
| 772 | kdb_printf("PANIC: %s\n", (char *)data); | ||
| 773 | kgdb_breakpoint(); | ||
| 774 | return NOTIFY_DONE; | ||
| 775 | } | ||
| 776 | |||
| 777 | static struct notifier_block kgdb_panic_event_nb = { | ||
| 778 | .notifier_call = kgdb_panic_event, | ||
| 779 | .priority = INT_MAX, | ||
| 780 | }; | ||
| 781 | |||
| 782 | void __weak kgdb_arch_late(void) | ||
| 783 | { | ||
| 784 | } | ||
| 785 | |||
| 786 | void __init dbg_late_init(void) | ||
| 787 | { | ||
| 788 | dbg_is_early = false; | ||
| 789 | if (kgdb_io_module_registered) | ||
| 790 | kgdb_arch_late(); | ||
| 791 | kdb_init(KDB_INIT_FULL); | ||
| 792 | } | ||
| 793 | |||
| 794 | static void kgdb_register_callbacks(void) | ||
| 795 | { | ||
| 796 | if (!kgdb_io_module_registered) { | ||
| 797 | kgdb_io_module_registered = 1; | ||
| 798 | kgdb_arch_init(); | ||
| 799 | if (!dbg_is_early) | ||
| 800 | kgdb_arch_late(); | ||
| 801 | atomic_notifier_chain_register(&panic_notifier_list, | ||
| 802 | &kgdb_panic_event_nb); | ||
| 803 | #ifdef CONFIG_MAGIC_SYSRQ | ||
| 804 | register_sysrq_key('g', &sysrq_dbg_op); | ||
| 805 | #endif | ||
| 806 | if (kgdb_use_con && !kgdb_con_registered) { | ||
| 807 | register_console(&kgdbcons); | ||
| 808 | kgdb_con_registered = 1; | ||
| 809 | } | ||
| 810 | } | ||
| 811 | } | ||
| 812 | |||
| 813 | static void kgdb_unregister_callbacks(void) | ||
| 814 | { | ||
| 815 | /* | ||
| 816 | * When this routine is called KGDB should unregister from the | ||
| 817 | * panic handler and clean up, making sure it is not handling any | ||
| 818 | * break exceptions at the time. | ||
| 819 | */ | ||
| 820 | if (kgdb_io_module_registered) { | ||
| 821 | kgdb_io_module_registered = 0; | ||
| 822 | atomic_notifier_chain_unregister(&panic_notifier_list, | ||
| 823 | &kgdb_panic_event_nb); | ||
| 824 | kgdb_arch_exit(); | ||
| 825 | #ifdef CONFIG_MAGIC_SYSRQ | ||
| 826 | unregister_sysrq_key('g', &sysrq_dbg_op); | ||
| 827 | #endif | ||
| 828 | if (kgdb_con_registered) { | ||
| 829 | unregister_console(&kgdbcons); | ||
| 830 | kgdb_con_registered = 0; | ||
| 831 | } | ||
| 832 | } | ||
| 833 | } | ||
| 834 | |||
| 835 | /* | ||
| 836 | * There are times a tasklet needs to be used vs a compiled in | ||
| 837 | * break point so as to cause an exception outside a kgdb I/O module, | ||
| 838 | * such as is the case with kgdboe, where calling a breakpoint in the | ||
| 839 | * I/O driver itself would be fatal. | ||
| 840 | */ | ||
| 841 | static void kgdb_tasklet_bpt(unsigned long ing) | ||
| 842 | { | ||
| 843 | kgdb_breakpoint(); | ||
| 844 | atomic_set(&kgdb_break_tasklet_var, 0); | ||
| 845 | } | ||
| 846 | |||
| 847 | static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); | ||
| 848 | |||
| 849 | void kgdb_schedule_breakpoint(void) | ||
| 850 | { | ||
| 851 | if (atomic_read(&kgdb_break_tasklet_var) || | ||
| 852 | atomic_read(&kgdb_active) != -1 || | ||
| 853 | atomic_read(&kgdb_setting_breakpoint)) | ||
| 854 | return; | ||
| 855 | atomic_inc(&kgdb_break_tasklet_var); | ||
| 856 | tasklet_schedule(&kgdb_tasklet_breakpoint); | ||
| 857 | } | ||
| 858 | EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); | ||
| 859 | |||
| 860 | static void kgdb_initial_breakpoint(void) | ||
| 861 | { | ||
| 862 | kgdb_break_asap = 0; | ||
| 863 | |||
| 864 | printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); | ||
| 865 | kgdb_breakpoint(); | ||
| 866 | } | ||
| 867 | |||
| 868 | /** | ||
| 869 | * kgdb_register_io_module - register KGDB IO module | ||
| 870 | * @new_dbg_io_ops: the io ops vector | ||
| 871 | * | ||
| 872 | * Register it with the KGDB core. | ||
| 873 | */ | ||
| 874 | int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) | ||
| 875 | { | ||
| 876 | int err; | ||
| 877 | |||
| 878 | spin_lock(&kgdb_registration_lock); | ||
| 879 | |||
| 880 | if (dbg_io_ops) { | ||
| 881 | spin_unlock(&kgdb_registration_lock); | ||
| 882 | |||
| 883 | printk(KERN_ERR "kgdb: Another I/O driver is already " | ||
| 884 | "registered with KGDB.\n"); | ||
| 885 | return -EBUSY; | ||
| 886 | } | ||
| 887 | |||
| 888 | if (new_dbg_io_ops->init) { | ||
| 889 | err = new_dbg_io_ops->init(); | ||
| 890 | if (err) { | ||
| 891 | spin_unlock(&kgdb_registration_lock); | ||
| 892 | return err; | ||
| 893 | } | ||
| 894 | } | ||
| 895 | |||
| 896 | dbg_io_ops = new_dbg_io_ops; | ||
| 897 | |||
| 898 | spin_unlock(&kgdb_registration_lock); | ||
| 899 | |||
| 900 | printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", | ||
| 901 | new_dbg_io_ops->name); | ||
| 902 | |||
| 903 | /* Arm KGDB now. */ | ||
| 904 | kgdb_register_callbacks(); | ||
| 905 | |||
| 906 | if (kgdb_break_asap) | ||
| 907 | kgdb_initial_breakpoint(); | ||
| 908 | |||
| 909 | return 0; | ||
| 910 | } | ||
| 911 | EXPORT_SYMBOL_GPL(kgdb_register_io_module); | ||
| 912 | |||
| 913 | /** | ||
| 914 | * kkgdb_unregister_io_module - unregister KGDB IO module | ||
| 915 | * @old_dbg_io_ops: the io ops vector | ||
| 916 | * | ||
| 917 | * Unregister it with the KGDB core. | ||
| 918 | */ | ||
| 919 | void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) | ||
| 920 | { | ||
| 921 | BUG_ON(kgdb_connected); | ||
| 922 | |||
| 923 | /* | ||
| 924 | * KGDB is no longer able to communicate out, so | ||
| 925 | * unregister our callbacks and reset state. | ||
| 926 | */ | ||
| 927 | kgdb_unregister_callbacks(); | ||
| 928 | |||
| 929 | spin_lock(&kgdb_registration_lock); | ||
| 930 | |||
| 931 | WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops); | ||
| 932 | dbg_io_ops = NULL; | ||
| 933 | |||
| 934 | spin_unlock(&kgdb_registration_lock); | ||
| 935 | |||
| 936 | printk(KERN_INFO | ||
| 937 | "kgdb: Unregistered I/O driver %s, debugger disabled.\n", | ||
| 938 | old_dbg_io_ops->name); | ||
| 939 | } | ||
| 940 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | ||
| 941 | |||
| 942 | int dbg_io_get_char(void) | ||
| 943 | { | ||
| 944 | int ret = dbg_io_ops->read_char(); | ||
| 945 | if (ret == NO_POLL_CHAR) | ||
| 946 | return -1; | ||
| 947 | if (!dbg_kdb_mode) | ||
| 948 | return ret; | ||
| 949 | if (ret == 127) | ||
| 950 | return 8; | ||
| 951 | return ret; | ||
| 952 | } | ||
| 953 | |||
| 954 | /** | ||
| 955 | * kgdb_breakpoint - generate breakpoint exception | ||
| 956 | * | ||
| 957 | * This function will generate a breakpoint exception. It is used at the | ||
| 958 | * beginning of a program to sync up with a debugger and can be used | ||
| 959 | * otherwise as a quick means to stop program execution and "break" into | ||
| 960 | * the debugger. | ||
| 961 | */ | ||
| 962 | void kgdb_breakpoint(void) | ||
| 963 | { | ||
| 964 | atomic_inc(&kgdb_setting_breakpoint); | ||
| 965 | wmb(); /* Sync point before breakpoint */ | ||
| 966 | arch_kgdb_breakpoint(); | ||
| 967 | wmb(); /* Sync point after breakpoint */ | ||
| 968 | atomic_dec(&kgdb_setting_breakpoint); | ||
| 969 | } | ||
| 970 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); | ||
| 971 | |||
| 972 | static int __init opt_kgdb_wait(char *str) | ||
| 973 | { | ||
| 974 | kgdb_break_asap = 1; | ||
| 975 | |||
| 976 | kdb_init(KDB_INIT_EARLY); | ||
| 977 | if (kgdb_io_module_registered) | ||
| 978 | kgdb_initial_breakpoint(); | ||
| 979 | |||
| 980 | return 0; | ||
| 981 | } | ||
| 982 | |||
| 983 | early_param("kgdbwait", opt_kgdb_wait); | ||
