diff options
author | Dimitri Sivanich <sivanich@sgi.com> | 2008-05-12 15:21:14 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-25 00:35:03 -0400 |
commit | 9383d9679056e6cc4e7ff70f31da945a268238f4 (patch) | |
tree | c6678a8c4e0fed0739e43f24b608d51856af6f1f /kernel/softlockup.c | |
parent | 9c44bc03fff44ff04237a7d92e35304a0e50c331 (diff) |
softlockup: fix softlockup_thresh unaligned access and disable detection at runtime
Fix unaligned access errors when setting softlockup_thresh on
64 bit platforms.
Allow softlockup detection to be disabled by setting
softlockup_thresh <= 0.
Detect that boot time softlockup detection has been disabled
earlier in softlockup_tick.
Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/softlockup.c')
-rw-r--r-- | kernel/softlockup.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 78e0ad21cb0c..a3a0b239b7f7 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -25,7 +25,7 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); | |||
25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
26 | 26 | ||
27 | static int __read_mostly did_panic; | 27 | static int __read_mostly did_panic; |
28 | unsigned long __read_mostly softlockup_thresh = 60; | 28 | int __read_mostly softlockup_thresh = 60; |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Should we panic (and reboot, if panic_timeout= is set) when a | 31 | * Should we panic (and reboot, if panic_timeout= is set) when a |
@@ -94,6 +94,14 @@ void softlockup_tick(void) | |||
94 | struct pt_regs *regs = get_irq_regs(); | 94 | struct pt_regs *regs = get_irq_regs(); |
95 | unsigned long now; | 95 | unsigned long now; |
96 | 96 | ||
97 | /* Is detection switched off? */ | ||
98 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | ||
99 | /* Be sure we don't false trigger if switched back on */ | ||
100 | if (touch_timestamp) | ||
101 | per_cpu(touch_timestamp, this_cpu) = 0; | ||
102 | return; | ||
103 | } | ||
104 | |||
97 | if (touch_timestamp == 0) { | 105 | if (touch_timestamp == 0) { |
98 | touch_softlockup_watchdog(); | 106 | touch_softlockup_watchdog(); |
99 | return; | 107 | return; |
@@ -104,7 +112,7 @@ void softlockup_tick(void) | |||
104 | /* report at most once a second */ | 112 | /* report at most once a second */ |
105 | if ((print_timestamp >= touch_timestamp && | 113 | if ((print_timestamp >= touch_timestamp && |
106 | print_timestamp < (touch_timestamp + 1)) || | 114 | print_timestamp < (touch_timestamp + 1)) || |
107 | did_panic || !per_cpu(watchdog_task, this_cpu)) { | 115 | did_panic) { |
108 | return; | 116 | return; |
109 | } | 117 | } |
110 | 118 | ||