diff options
author | Andi Kleen <ak@suse.de> | 2007-10-19 14:35:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-19 14:35:03 -0400 |
commit | 39743c9ef717fd4f2b5583f010115c5f2482b8ae (patch) | |
tree | 960daf67c159ddbce5323342db1fced74524bf74 /arch/x86 | |
parent | b1992df3f070475b243b12ca1241a5938ef5f9bc (diff) |
x86: use raw locks during oopses
Don't want any lockdep or other fragile machinery to run during oopses.
Use raw spinlocks directly for oops locking.
Also disables irq flag tracing there.
[ tglx: arch/x86 adaptation ]
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/traps_32.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 16 |
2 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 746fad2c504f..3f02e0f42e6a 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -350,11 +350,11 @@ int is_valid_bugaddr(unsigned long eip) | |||
350 | void die(const char * str, struct pt_regs * regs, long err) | 350 | void die(const char * str, struct pt_regs * regs, long err) |
351 | { | 351 | { |
352 | static struct { | 352 | static struct { |
353 | spinlock_t lock; | 353 | raw_spinlock_t lock; |
354 | u32 lock_owner; | 354 | u32 lock_owner; |
355 | int lock_owner_depth; | 355 | int lock_owner_depth; |
356 | } die = { | 356 | } die = { |
357 | .lock = __SPIN_LOCK_UNLOCKED(die.lock), | 357 | .lock = __RAW_SPIN_LOCK_UNLOCKED, |
358 | .lock_owner = -1, | 358 | .lock_owner = -1, |
359 | .lock_owner_depth = 0 | 359 | .lock_owner_depth = 0 |
360 | }; | 360 | }; |
@@ -365,13 +365,14 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
365 | 365 | ||
366 | if (die.lock_owner != raw_smp_processor_id()) { | 366 | if (die.lock_owner != raw_smp_processor_id()) { |
367 | console_verbose(); | 367 | console_verbose(); |
368 | spin_lock_irqsave(&die.lock, flags); | 368 | __raw_spin_lock(&die.lock); |
369 | raw_local_save_flags(flags); | ||
369 | die.lock_owner = smp_processor_id(); | 370 | die.lock_owner = smp_processor_id(); |
370 | die.lock_owner_depth = 0; | 371 | die.lock_owner_depth = 0; |
371 | bust_spinlocks(1); | 372 | bust_spinlocks(1); |
372 | } | 373 | } |
373 | else | 374 | else |
374 | local_save_flags(flags); | 375 | raw_local_save_flags(flags); |
375 | 376 | ||
376 | if (++die.lock_owner_depth < 3) { | 377 | if (++die.lock_owner_depth < 3) { |
377 | unsigned long esp; | 378 | unsigned long esp; |
@@ -415,7 +416,8 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
415 | bust_spinlocks(0); | 416 | bust_spinlocks(0); |
416 | die.lock_owner = -1; | 417 | die.lock_owner = -1; |
417 | add_taint(TAINT_DIE); | 418 | add_taint(TAINT_DIE); |
418 | spin_unlock_irqrestore(&die.lock, flags); | 419 | __raw_spin_unlock(&die.lock); |
420 | raw_local_irq_restore(flags); | ||
419 | 421 | ||
420 | if (!regs) | 422 | if (!regs) |
421 | return; | 423 | return; |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index b4a9b3db1994..df690c3fa458 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -462,7 +462,7 @@ void out_of_line_bug(void) | |||
462 | EXPORT_SYMBOL(out_of_line_bug); | 462 | EXPORT_SYMBOL(out_of_line_bug); |
463 | #endif | 463 | #endif |
464 | 464 | ||
465 | static DEFINE_SPINLOCK(die_lock); | 465 | static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; |
466 | static int die_owner = -1; | 466 | static int die_owner = -1; |
467 | static unsigned int die_nest_count; | 467 | static unsigned int die_nest_count; |
468 | 468 | ||
@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void) | |||
474 | oops_enter(); | 474 | oops_enter(); |
475 | 475 | ||
476 | /* racy, but better than risking deadlock. */ | 476 | /* racy, but better than risking deadlock. */ |
477 | local_irq_save(flags); | 477 | raw_local_irq_save(flags); |
478 | cpu = smp_processor_id(); | 478 | cpu = smp_processor_id(); |
479 | if (!spin_trylock(&die_lock)) { | 479 | if (!__raw_spin_trylock(&die_lock)) { |
480 | if (cpu == die_owner) | 480 | if (cpu == die_owner) |
481 | /* nested oops. should stop eventually */; | 481 | /* nested oops. should stop eventually */; |
482 | else | 482 | else |
483 | spin_lock(&die_lock); | 483 | __raw_spin_lock(&die_lock); |
484 | } | 484 | } |
485 | die_nest_count++; | 485 | die_nest_count++; |
486 | die_owner = cpu; | 486 | die_owner = cpu; |
@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags) | |||
494 | die_owner = -1; | 494 | die_owner = -1; |
495 | bust_spinlocks(0); | 495 | bust_spinlocks(0); |
496 | die_nest_count--; | 496 | die_nest_count--; |
497 | if (die_nest_count) | 497 | if (!die_nest_count) |
498 | /* We still own the lock */ | ||
499 | local_irq_restore(flags); | ||
500 | else | ||
501 | /* Nest count reaches zero, release the lock. */ | 498 | /* Nest count reaches zero, release the lock. */ |
502 | spin_unlock_irqrestore(&die_lock, flags); | 499 | __raw_spin_unlock(&die_lock); |
500 | raw_local_irq_restore(flags); | ||
503 | if (panic_on_oops) | 501 | if (panic_on_oops) |
504 | panic("Fatal exception"); | 502 | panic("Fatal exception"); |
505 | oops_exit(); | 503 | oops_exit(); |