diff options
author | Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> | 2008-01-30 07:30:33 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:33 -0500 |
commit | 416b72182ac3f3f4931ed17d0256b1d805d1b553 (patch) | |
tree | f06587fbc2cff8fa1fb7ad6d03cc94e8b1e24d54 /arch/x86/kernel/nmi_64.c | |
parent | 6ea8bad1c06c8ccde381f1c848afe6a245bb33b5 (diff) |
x86: clean up nmi_32/64.c
clean up and make nmi_32/64.c more similar.
- white space and coding style clean up.
- nmi_cpu_busy is available on CONFIG_SMP.
- move functions __acpi_nmi_enable, acpi_nmi_enable,
__acpi_nmi_disable and acpi_nmi_disable.
- make variables name more similar.
Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/nmi_64.c')
-rw-r--r-- | arch/x86/kernel/nmi_64.c | 95 |
1 files changed, 47 insertions, 48 deletions
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 53faef632fc6..fb99484d21cf 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -78,22 +78,22 @@ static __init void nmi_cpu_busy(void *data) | |||
78 | } | 78 | } |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | int __init check_nmi_watchdog (void) | 81 | int __init check_nmi_watchdog(void) |
82 | { | 82 | { |
83 | int *counts; | 83 | int *prev_nmi_count; |
84 | int cpu; | 84 | int cpu; |
85 | 85 | ||
86 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) | 86 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) |
87 | return 0; | 87 | return 0; |
88 | 88 | ||
89 | if (!atomic_read(&nmi_active)) | 89 | if (!atomic_read(&nmi_active)) |
90 | return 0; | 90 | return 0; |
91 | 91 | ||
92 | counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); | 92 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
93 | if (!counts) | 93 | if (!prev_nmi_count) |
94 | return -1; | 94 | return -1; |
95 | 95 | ||
96 | printk(KERN_INFO "testing NMI watchdog ... "); | 96 | printk(KERN_INFO "Testing NMI watchdog ... "); |
97 | 97 | ||
98 | #ifdef CONFIG_SMP | 98 | #ifdef CONFIG_SMP |
99 | if (nmi_watchdog == NMI_LOCAL_APIC) | 99 | if (nmi_watchdog == NMI_LOCAL_APIC) |
@@ -101,30 +101,29 @@ int __init check_nmi_watchdog (void) | |||
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 103 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
104 | counts[cpu] = cpu_pda(cpu)->__nmi_count; | 104 | prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count; |
105 | local_irq_enable(); | 105 | local_irq_enable(); |
106 | mdelay((20*1000)/nmi_hz); // wait 20 ticks | 106 | mdelay((20*1000)/nmi_hz); // wait 20 ticks |
107 | 107 | ||
108 | for_each_online_cpu(cpu) { | 108 | for_each_online_cpu(cpu) { |
109 | if (!per_cpu(wd_enabled, cpu)) | 109 | if (!per_cpu(wd_enabled, cpu)) |
110 | continue; | 110 | continue; |
111 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { | 111 | if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) { |
112 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " | 112 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " |
113 | "appears to be stuck (%d->%d)!\n", | 113 | "appears to be stuck (%d->%d)!\n", |
114 | cpu, | 114 | cpu, |
115 | counts[cpu], | 115 | prev_nmi_count[cpu], |
116 | cpu_pda(cpu)->__nmi_count); | 116 | cpu_pda(cpu)->__nmi_count); |
117 | per_cpu(wd_enabled, cpu) = 0; | 117 | per_cpu(wd_enabled, cpu) = 0; |
118 | atomic_dec(&nmi_active); | 118 | atomic_dec(&nmi_active); |
119 | } | 119 | } |
120 | } | 120 | } |
121 | endflag = 1; | ||
121 | if (!atomic_read(&nmi_active)) { | 122 | if (!atomic_read(&nmi_active)) { |
122 | kfree(counts); | 123 | kfree(prev_nmi_count); |
123 | atomic_set(&nmi_active, -1); | 124 | atomic_set(&nmi_active, -1); |
124 | endflag = 1; | ||
125 | return -1; | 125 | return -1; |
126 | } | 126 | } |
127 | endflag = 1; | ||
128 | printk("OK.\n"); | 127 | printk("OK.\n"); |
129 | 128 | ||
130 | /* now that we know it works we can reduce NMI frequency to | 129 | /* now that we know it works we can reduce NMI frequency to |
@@ -132,7 +131,7 @@ int __init check_nmi_watchdog (void) | |||
132 | if (nmi_watchdog == NMI_LOCAL_APIC) | 131 | if (nmi_watchdog == NMI_LOCAL_APIC) |
133 | nmi_hz = lapic_adjust_nmi_hz(1); | 132 | nmi_hz = lapic_adjust_nmi_hz(1); |
134 | 133 | ||
135 | kfree(counts); | 134 | kfree(prev_nmi_count); |
136 | return 0; | 135 | return 0; |
137 | } | 136 | } |
138 | 137 | ||
@@ -159,34 +158,6 @@ static int __init setup_nmi_watchdog(char *str) | |||
159 | 158 | ||
160 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 159 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
161 | 160 | ||
162 | |||
163 | static void __acpi_nmi_disable(void *__unused) | ||
164 | { | ||
165 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Disable timer based NMIs on all CPUs: | ||
170 | */ | ||
171 | void acpi_nmi_disable(void) | ||
172 | { | ||
173 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
174 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
175 | } | ||
176 | |||
177 | static void __acpi_nmi_enable(void *__unused) | ||
178 | { | ||
179 | apic_write(APIC_LVT0, APIC_DM_NMI); | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Enable timer based NMIs on all CPUs: | ||
184 | */ | ||
185 | void acpi_nmi_enable(void) | ||
186 | { | ||
187 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
188 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
189 | } | ||
190 | #ifdef CONFIG_PM | 161 | #ifdef CONFIG_PM |
191 | 162 | ||
192 | static int nmi_pm_active; /* nmi_active before suspend */ | 163 | static int nmi_pm_active; /* nmi_active before suspend */ |
@@ -217,7 +188,7 @@ static struct sysdev_class nmi_sysclass = { | |||
217 | }; | 188 | }; |
218 | 189 | ||
219 | static struct sys_device device_lapic_nmi = { | 190 | static struct sys_device device_lapic_nmi = { |
220 | .id = 0, | 191 | .id = 0, |
221 | .cls = &nmi_sysclass, | 192 | .cls = &nmi_sysclass, |
222 | }; | 193 | }; |
223 | 194 | ||
@@ -231,7 +202,7 @@ static int __init init_lapic_nmi_sysfs(void) | |||
231 | if (nmi_watchdog != NMI_LOCAL_APIC) | 202 | if (nmi_watchdog != NMI_LOCAL_APIC) |
232 | return 0; | 203 | return 0; |
233 | 204 | ||
234 | if ( atomic_read(&nmi_active) < 0 ) | 205 | if (atomic_read(&nmi_active) < 0) |
235 | return 0; | 206 | return 0; |
236 | 207 | ||
237 | error = sysdev_class_register(&nmi_sysclass); | 208 | error = sysdev_class_register(&nmi_sysclass); |
@@ -244,9 +215,37 @@ late_initcall(init_lapic_nmi_sysfs); | |||
244 | 215 | ||
245 | #endif /* CONFIG_PM */ | 216 | #endif /* CONFIG_PM */ |
246 | 217 | ||
218 | static void __acpi_nmi_enable(void *__unused) | ||
219 | { | ||
220 | apic_write(APIC_LVT0, APIC_DM_NMI); | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * Enable timer based NMIs on all CPUs: | ||
225 | */ | ||
226 | void acpi_nmi_enable(void) | ||
227 | { | ||
228 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
229 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
230 | } | ||
231 | |||
232 | static void __acpi_nmi_disable(void *__unused) | ||
233 | { | ||
234 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Disable timer based NMIs on all CPUs: | ||
239 | */ | ||
240 | void acpi_nmi_disable(void) | ||
241 | { | ||
242 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
243 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
244 | } | ||
245 | |||
247 | void setup_apic_nmi_watchdog(void *unused) | 246 | void setup_apic_nmi_watchdog(void *unused) |
248 | { | 247 | { |
249 | if (__get_cpu_var(wd_enabled) == 1) | 248 | if (__get_cpu_var(wd_enabled)) |
250 | return; | 249 | return; |
251 | 250 | ||
252 | /* cheap hack to support suspend/resume */ | 251 | /* cheap hack to support suspend/resume */ |
@@ -311,8 +310,9 @@ void touch_nmi_watchdog(void) | |||
311 | } | 310 | } |
312 | } | 311 | } |
313 | 312 | ||
314 | touch_softlockup_watchdog(); | 313 | touch_softlockup_watchdog(); |
315 | } | 314 | } |
315 | EXPORT_SYMBOL(touch_nmi_watchdog); | ||
316 | 316 | ||
317 | int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | 317 | int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) |
318 | { | 318 | { |
@@ -479,4 +479,3 @@ void __trigger_all_cpu_backtrace(void) | |||
479 | 479 | ||
480 | EXPORT_SYMBOL(nmi_active); | 480 | EXPORT_SYMBOL(nmi_active); |
481 | EXPORT_SYMBOL(nmi_watchdog); | 481 | EXPORT_SYMBOL(nmi_watchdog); |
482 | EXPORT_SYMBOL(touch_nmi_watchdog); | ||