aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/nmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/nmi.c')
-rw-r--r--arch/sparc/kernel/nmi.c72
1 files changed, 53 insertions, 19 deletions
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b75bf502cd42..378eb53e0776 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21 21
22#include <asm/perf_counter.h>
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/local.h> 24#include <asm/local.h>
24#include <asm/pcr.h> 25#include <asm/pcr.h>
@@ -31,13 +32,19 @@
31 * level 14 as our IRQ off level. 32 * level 14 as our IRQ off level.
32 */ 33 */
33 34
34static int nmi_watchdog_active;
35static int panic_on_timeout; 35static int panic_on_timeout;
36 36
37int nmi_usable; 37/* nmi_active:
38EXPORT_SYMBOL_GPL(nmi_usable); 38 * >0: the NMI watchdog is active, but can be disabled
39 * <0: the NMI watchdog has not been set up, and cannot be enabled
40 * 0: the NMI watchdog is disabled, but can be enabled
41 */
42atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
43EXPORT_SYMBOL(nmi_active);
39 44
40static unsigned int nmi_hz = HZ; 45static unsigned int nmi_hz = HZ;
46static DEFINE_PER_CPU(short, wd_enabled);
47static int endflag __initdata;
41 48
42static DEFINE_PER_CPU(unsigned int, last_irq_sum); 49static DEFINE_PER_CPU(unsigned int, last_irq_sum);
43static DEFINE_PER_CPU(local_t, alert_counter); 50static DEFINE_PER_CPU(local_t, alert_counter);
@@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch);
45 52
46void touch_nmi_watchdog(void) 53void touch_nmi_watchdog(void)
47{ 54{
48 if (nmi_watchdog_active) { 55 if (atomic_read(&nmi_active)) {
49 int cpu; 56 int cpu;
50 57
51 for_each_present_cpu(cpu) { 58 for_each_present_cpu(cpu) {
@@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
78 if (do_panic || panic_on_oops) 85 if (do_panic || panic_on_oops)
79 panic("Non maskable interrupt"); 86 panic("Non maskable interrupt");
80 87
88 nmi_exit();
81 local_irq_enable(); 89 local_irq_enable();
82 do_exit(SIGBUS); 90 do_exit(SIGBUS);
83} 91}
@@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
92 100
93 local_cpu_data().__nmi_count++; 101 local_cpu_data().__nmi_count++;
94 102
103 nmi_enter();
104
95 if (notify_die(DIE_NMI, "nmi", regs, 0, 105 if (notify_die(DIE_NMI, "nmi", regs, 0,
96 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 106 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
97 touched = 1; 107 touched = 1;
@@ -110,10 +120,12 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
110 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
111 local_set(&__get_cpu_var(alert_counter), 0); 121 local_set(&__get_cpu_var(alert_counter), 0);
112 } 122 }
113 if (nmi_usable) { 123 if (__get_cpu_var(wd_enabled)) {
114 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
115 pcr_ops->write(pcr_enable); 125 pcr_ops->write(pcr_enable);
116 } 126 }
127
128 nmi_exit();
117} 129}
118 130
119static inline unsigned int get_nmi_count(int cpu) 131static inline unsigned int get_nmi_count(int cpu)
@@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu)
121 return cpu_data(cpu).__nmi_count; 133 return cpu_data(cpu).__nmi_count;
122} 134}
123 135
124static int endflag __initdata;
125
126static __init void nmi_cpu_busy(void *data) 136static __init void nmi_cpu_busy(void *data)
127{ 137{
128 local_irq_enable_in_hardirq(); 138 local_irq_enable_in_hardirq();
@@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
143 printk(KERN_WARNING 153 printk(KERN_WARNING
144 "and attach the output of the 'dmesg' command.\n"); 154 "and attach the output of the 'dmesg' command.\n");
145 155
146 nmi_usable = 0; 156 per_cpu(wd_enabled, cpu) = 0;
157 atomic_dec(&nmi_active);
147} 158}
148 159
149static void stop_watchdog(void *unused) 160void stop_nmi_watchdog(void *unused)
150{ 161{
151 pcr_ops->write(PCR_PIC_PRIV); 162 pcr_ops->write(PCR_PIC_PRIV);
163 __get_cpu_var(wd_enabled) = 0;
164 atomic_dec(&nmi_active);
152} 165}
153 166
154static int __init check_nmi_watchdog(void) 167static int __init check_nmi_watchdog(void)
@@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void)
156 unsigned int *prev_nmi_count; 169 unsigned int *prev_nmi_count;
157 int cpu, err; 170 int cpu, err;
158 171
172 if (!atomic_read(&nmi_active))
173 return 0;
174
159 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); 175 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
160 if (!prev_nmi_count) { 176 if (!prev_nmi_count) {
161 err = -ENOMEM; 177 err = -ENOMEM;
@@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void)
172 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ 188 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
173 189
174 for_each_online_cpu(cpu) { 190 for_each_online_cpu(cpu) {
191 if (!per_cpu(wd_enabled, cpu))
192 continue;
175 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) 193 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
176 report_broken_nmi(cpu, prev_nmi_count); 194 report_broken_nmi(cpu, prev_nmi_count);
177 } 195 }
178 endflag = 1; 196 endflag = 1;
179 if (!nmi_usable) { 197 if (!atomic_read(&nmi_active)) {
180 kfree(prev_nmi_count); 198 kfree(prev_nmi_count);
199 atomic_set(&nmi_active, -1);
181 err = -ENODEV; 200 err = -ENODEV;
182 goto error; 201 goto error;
183 } 202 }
@@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void)
188 kfree(prev_nmi_count); 207 kfree(prev_nmi_count);
189 return 0; 208 return 0;
190error: 209error:
191 on_each_cpu(stop_watchdog, NULL, 1); 210 on_each_cpu(stop_nmi_watchdog, NULL, 1);
192 return err; 211 return err;
193} 212}
194 213
195static void start_watchdog(void *unused) 214void start_nmi_watchdog(void *unused)
196{ 215{
216 __get_cpu_var(wd_enabled) = 1;
217 atomic_inc(&nmi_active);
218
219 pcr_ops->write(PCR_PIC_PRIV);
220 write_pic(picl_value(nmi_hz));
221
222 pcr_ops->write(pcr_enable);
223}
224
225static void nmi_adjust_hz_one(void *unused)
226{
227 if (!__get_cpu_var(wd_enabled))
228 return;
229
197 pcr_ops->write(PCR_PIC_PRIV); 230 pcr_ops->write(PCR_PIC_PRIV);
198 write_pic(picl_value(nmi_hz)); 231 write_pic(picl_value(nmi_hz));
199 232
@@ -203,13 +236,13 @@ static void start_watchdog(void *unused)
203void nmi_adjust_hz(unsigned int new_hz) 236void nmi_adjust_hz(unsigned int new_hz)
204{ 237{
205 nmi_hz = new_hz; 238 nmi_hz = new_hz;
206 on_each_cpu(start_watchdog, NULL, 1); 239 on_each_cpu(nmi_adjust_hz_one, NULL, 1);
207} 240}
208EXPORT_SYMBOL_GPL(nmi_adjust_hz); 241EXPORT_SYMBOL_GPL(nmi_adjust_hz);
209 242
210static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) 243static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
211{ 244{
212 on_each_cpu(stop_watchdog, NULL, 1); 245 on_each_cpu(stop_nmi_watchdog, NULL, 1);
213 return 0; 246 return 0;
214} 247}
215 248
@@ -221,18 +254,19 @@ int __init nmi_init(void)
221{ 254{
222 int err; 255 int err;
223 256
224 nmi_usable = 1; 257 on_each_cpu(start_nmi_watchdog, NULL, 1);
225
226 on_each_cpu(start_watchdog, NULL, 1);
227 258
228 err = check_nmi_watchdog(); 259 err = check_nmi_watchdog();
229 if (!err) { 260 if (!err) {
230 err = register_reboot_notifier(&nmi_reboot_notifier); 261 err = register_reboot_notifier(&nmi_reboot_notifier);
231 if (err) { 262 if (err) {
232 nmi_usable = 0; 263 on_each_cpu(stop_nmi_watchdog, NULL, 1);
233 on_each_cpu(stop_watchdog, NULL, 1); 264 atomic_set(&nmi_active, -1);
234 } 265 }
235 } 266 }
267 if (!err)
268 init_hw_perf_counters();
269
236 return err; 270 return err;
237} 271}
238 272