diff options
-rw-r--r-- | arch/i386/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/perfctr-watchdog.c | 658 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 829 | ||||
-rw-r--r-- | include/asm-i386/nmi.h | 8 |
4 files changed, 721 insertions, 776 deletions
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile index 5fb1a7560438..74f27a463db0 100644 --- a/arch/i386/kernel/cpu/Makefile +++ b/arch/i386/kernel/cpu/Makefile | |||
@@ -17,3 +17,5 @@ obj-$(CONFIG_X86_MCE) += mcheck/ | |||
17 | 17 | ||
18 | obj-$(CONFIG_MTRR) += mtrr/ | 18 | obj-$(CONFIG_MTRR) += mtrr/ |
19 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 19 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
20 | |||
21 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | ||
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c new file mode 100644 index 000000000000..2b04c8f1db62 --- /dev/null +++ b/arch/i386/kernel/cpu/perfctr-watchdog.c | |||
@@ -0,0 +1,658 @@ | |||
1 | /* local apic based NMI watchdog for various CPUs. | ||
2 | This file also handles reservation of performance counters for coordination | ||
3 | with other users (like oprofile). | ||
4 | |||
5 | Note that these events normally don't tick when the CPU idles. This means | ||
6 | the frequency varies with CPU load. | ||
7 | |||
8 | Original code for K7/P6 written by Keith Owens */ | ||
9 | |||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/nmi.h> | ||
16 | #include <asm/apic.h> | ||
17 | #include <asm/intel_arch_perfmon.h> | ||
18 | |||
19 | struct nmi_watchdog_ctlblk { | ||
20 | unsigned int cccr_msr; | ||
21 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ | ||
22 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ | ||
23 | }; | ||
24 | |||
25 | /* Interface defining a CPU specific perfctr watchdog */ | ||
26 | struct wd_ops { | ||
27 | int (*reserve)(void); | ||
28 | void (*unreserve)(void); | ||
29 | int (*setup)(unsigned nmi_hz); | ||
30 | void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); | ||
31 | void (*stop)(void *); | ||
32 | unsigned perfctr; | ||
33 | unsigned evntsel; | ||
34 | u64 checkbit; | ||
35 | }; | ||
36 | |||
37 | static struct wd_ops *wd_ops; | ||
38 | |||
39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | ||
40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | ||
41 | */ | ||
42 | #define NMI_MAX_COUNTER_BITS 66 | ||
43 | |||
44 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: | ||
45 | * evtsel_nmi_owner tracks the ownership of the event selection | ||
46 | * - different performance counters/ event selection may be reserved for | ||
47 | * different subsystems this reservation system just tries to coordinate | ||
48 | * things a little | ||
49 | */ | ||
50 | static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); | ||
51 | static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); | ||
52 | |||
53 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | ||
54 | |||
55 | /* converts an msr to an appropriate reservation bit */ | ||
56 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | ||
57 | { | ||
58 | return wd_ops ? msr - wd_ops->perfctr : 0; | ||
59 | } | ||
60 | |||
61 | /* converts an msr to an appropriate reservation bit */ | ||
62 | /* returns the bit offset of the event selection register */ | ||
63 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | ||
64 | { | ||
65 | return wd_ops ? msr - wd_ops->evntsel : 0; | ||
66 | } | ||
67 | |||
68 | /* checks for a bit availability (hack for oprofile) */ | ||
69 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | ||
70 | { | ||
71 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
72 | |||
73 | return (!test_bit(counter, perfctr_nmi_owner)); | ||
74 | } | ||
75 | |||
76 | /* checks the an msr for availability */ | ||
77 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
78 | { | ||
79 | unsigned int counter; | ||
80 | |||
81 | counter = nmi_perfctr_msr_to_bit(msr); | ||
82 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
83 | |||
84 | return (!test_bit(counter, perfctr_nmi_owner)); | ||
85 | } | ||
86 | |||
87 | int reserve_perfctr_nmi(unsigned int msr) | ||
88 | { | ||
89 | unsigned int counter; | ||
90 | |||
91 | counter = nmi_perfctr_msr_to_bit(msr); | ||
92 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
93 | |||
94 | if (!test_and_set_bit(counter, perfctr_nmi_owner)) | ||
95 | return 1; | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | void release_perfctr_nmi(unsigned int msr) | ||
100 | { | ||
101 | unsigned int counter; | ||
102 | |||
103 | counter = nmi_perfctr_msr_to_bit(msr); | ||
104 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
105 | |||
106 | clear_bit(counter, perfctr_nmi_owner); | ||
107 | } | ||
108 | |||
109 | int reserve_evntsel_nmi(unsigned int msr) | ||
110 | { | ||
111 | unsigned int counter; | ||
112 | |||
113 | counter = nmi_evntsel_msr_to_bit(msr); | ||
114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
115 | |||
116 | if (!test_and_set_bit(counter, evntsel_nmi_owner)) | ||
117 | return 1; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | void release_evntsel_nmi(unsigned int msr) | ||
122 | { | ||
123 | unsigned int counter; | ||
124 | |||
125 | counter = nmi_evntsel_msr_to_bit(msr); | ||
126 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
127 | |||
128 | clear_bit(counter, evntsel_nmi_owner); | ||
129 | } | ||
130 | |||
131 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); | ||
132 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | ||
133 | EXPORT_SYMBOL(reserve_perfctr_nmi); | ||
134 | EXPORT_SYMBOL(release_perfctr_nmi); | ||
135 | EXPORT_SYMBOL(reserve_evntsel_nmi); | ||
136 | EXPORT_SYMBOL(release_evntsel_nmi); | ||
137 | |||
138 | void disable_lapic_nmi_watchdog(void) | ||
139 | { | ||
140 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
141 | |||
142 | if (atomic_read(&nmi_active) <= 0) | ||
143 | return; | ||
144 | |||
145 | on_each_cpu(wd_ops->stop, NULL, 0, 1); | ||
146 | wd_ops->unreserve(); | ||
147 | |||
148 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
149 | } | ||
150 | |||
151 | void enable_lapic_nmi_watchdog(void) | ||
152 | { | ||
153 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
154 | |||
155 | /* are we already enabled */ | ||
156 | if (atomic_read(&nmi_active) != 0) | ||
157 | return; | ||
158 | |||
159 | /* are we lapic aware */ | ||
160 | if (!wd_ops) | ||
161 | return; | ||
162 | if (!wd_ops->reserve()) { | ||
163 | printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n"); | ||
164 | return; | ||
165 | } | ||
166 | |||
167 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | ||
168 | touch_nmi_watchdog(); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Activate the NMI watchdog via the local APIC. | ||
173 | */ | ||
174 | |||
175 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | ||
176 | { | ||
177 | u64 counter_val; | ||
178 | unsigned int retval = hz; | ||
179 | |||
180 | /* | ||
181 | * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter | ||
182 | * are writable, with higher bits sign extending from bit 31. | ||
183 | * So, we can only program the counter with 31 bit values and | ||
184 | * 32nd bit should be 1, for 33.. to be 1. | ||
185 | * Find the appropriate nmi_hz | ||
186 | */ | ||
187 | counter_val = (u64)cpu_khz * 1000; | ||
188 | do_div(counter_val, retval); | ||
189 | if (counter_val > 0x7fffffffULL) { | ||
190 | u64 count = (u64)cpu_khz * 1000; | ||
191 | do_div(count, 0x7fffffffUL); | ||
192 | retval = count + 1; | ||
193 | } | ||
194 | return retval; | ||
195 | } | ||
196 | |||
197 | static void | ||
198 | write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz) | ||
199 | { | ||
200 | u64 count = (u64)cpu_khz * 1000; | ||
201 | |||
202 | do_div(count, nmi_hz); | ||
203 | if(descr) | ||
204 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
205 | wrmsrl(perfctr_msr, 0 - count); | ||
206 | } | ||
207 | |||
208 | static void write_watchdog_counter32(unsigned int perfctr_msr, | ||
209 | const char *descr, unsigned nmi_hz) | ||
210 | { | ||
211 | u64 count = (u64)cpu_khz * 1000; | ||
212 | |||
213 | do_div(count, nmi_hz); | ||
214 | if(descr) | ||
215 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
216 | wrmsr(perfctr_msr, (u32)(-count), 0); | ||
217 | } | ||
218 | |||
219 | /* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface | ||
220 | nicely stable so there is not much variety */ | ||
221 | |||
222 | #define K7_EVNTSEL_ENABLE (1 << 22) | ||
223 | #define K7_EVNTSEL_INT (1 << 20) | ||
224 | #define K7_EVNTSEL_OS (1 << 17) | ||
225 | #define K7_EVNTSEL_USR (1 << 16) | ||
226 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | ||
227 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | ||
228 | |||
229 | static int setup_k7_watchdog(unsigned nmi_hz) | ||
230 | { | ||
231 | unsigned int perfctr_msr, evntsel_msr; | ||
232 | unsigned int evntsel; | ||
233 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
234 | |||
235 | perfctr_msr = MSR_K7_PERFCTR0; | ||
236 | evntsel_msr = MSR_K7_EVNTSEL0; | ||
237 | |||
238 | wrmsrl(perfctr_msr, 0UL); | ||
239 | |||
240 | evntsel = K7_EVNTSEL_INT | ||
241 | | K7_EVNTSEL_OS | ||
242 | | K7_EVNTSEL_USR | ||
243 | | K7_NMI_EVENT; | ||
244 | |||
245 | /* setup the timer */ | ||
246 | wrmsr(evntsel_msr, evntsel, 0); | ||
247 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | ||
248 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
249 | evntsel |= K7_EVNTSEL_ENABLE; | ||
250 | wrmsr(evntsel_msr, evntsel, 0); | ||
251 | |||
252 | wd->perfctr_msr = perfctr_msr; | ||
253 | wd->evntsel_msr = evntsel_msr; | ||
254 | wd->cccr_msr = 0; //unused | ||
255 | return 1; | ||
256 | } | ||
257 | |||
258 | static void single_msr_stop_watchdog(void *arg) | ||
259 | { | ||
260 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
261 | |||
262 | wrmsr(wd->evntsel_msr, 0, 0); | ||
263 | } | ||
264 | |||
265 | static int single_msr_reserve(void) | ||
266 | { | ||
267 | if (!reserve_perfctr_nmi(wd_ops->perfctr)) | ||
268 | return 0; | ||
269 | |||
270 | if (!reserve_evntsel_nmi(wd_ops->evntsel)) { | ||
271 | release_perfctr_nmi(wd_ops->perfctr); | ||
272 | return 0; | ||
273 | } | ||
274 | return 1; | ||
275 | } | ||
276 | |||
277 | static void single_msr_unreserve(void) | ||
278 | { | ||
279 | release_evntsel_nmi(wd_ops->perfctr); | ||
280 | release_perfctr_nmi(wd_ops->evntsel); | ||
281 | } | ||
282 | |||
283 | static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
284 | { | ||
285 | /* start the cycle over again */ | ||
286 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | ||
287 | } | ||
288 | |||
289 | static struct wd_ops k7_wd_ops = { | ||
290 | .reserve = single_msr_reserve, | ||
291 | .unreserve = single_msr_unreserve, | ||
292 | .setup = setup_k7_watchdog, | ||
293 | .rearm = single_msr_rearm, | ||
294 | .stop = single_msr_stop_watchdog, | ||
295 | .perfctr = MSR_K7_PERFCTR0, | ||
296 | .evntsel = MSR_K7_EVNTSEL0, | ||
297 | .checkbit = 1ULL<<63, | ||
298 | }; | ||
299 | |||
300 | /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */ | ||
301 | |||
302 | #define P6_EVNTSEL0_ENABLE (1 << 22) | ||
303 | #define P6_EVNTSEL_INT (1 << 20) | ||
304 | #define P6_EVNTSEL_OS (1 << 17) | ||
305 | #define P6_EVNTSEL_USR (1 << 16) | ||
306 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | ||
307 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | ||
308 | |||
309 | static int setup_p6_watchdog(unsigned nmi_hz) | ||
310 | { | ||
311 | unsigned int perfctr_msr, evntsel_msr; | ||
312 | unsigned int evntsel; | ||
313 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
314 | |||
315 | perfctr_msr = MSR_P6_PERFCTR0; | ||
316 | evntsel_msr = MSR_P6_EVNTSEL0; | ||
317 | |||
318 | wrmsrl(perfctr_msr, 0UL); | ||
319 | |||
320 | evntsel = P6_EVNTSEL_INT | ||
321 | | P6_EVNTSEL_OS | ||
322 | | P6_EVNTSEL_USR | ||
323 | | P6_NMI_EVENT; | ||
324 | |||
325 | /* setup the timer */ | ||
326 | wrmsr(evntsel_msr, evntsel, 0); | ||
327 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
328 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | ||
329 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
330 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
331 | wrmsr(evntsel_msr, evntsel, 0); | ||
332 | |||
333 | wd->perfctr_msr = perfctr_msr; | ||
334 | wd->evntsel_msr = evntsel_msr; | ||
335 | wd->cccr_msr = 0; //unused | ||
336 | return 1; | ||
337 | } | ||
338 | |||
339 | static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
340 | { | ||
341 | /* P6 based Pentium M need to re-unmask | ||
342 | * the apic vector but it doesn't hurt | ||
343 | * other P6 variant. | ||
344 | * ArchPerfom/Core Duo also needs this */ | ||
345 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
346 | /* P6/ARCH_PERFMON has 32 bit counter write */ | ||
347 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | ||
348 | } | ||
349 | |||
350 | static struct wd_ops p6_wd_ops = { | ||
351 | .reserve = single_msr_reserve, | ||
352 | .unreserve = single_msr_unreserve, | ||
353 | .setup = setup_p6_watchdog, | ||
354 | .rearm = p6_rearm, | ||
355 | .stop = single_msr_stop_watchdog, | ||
356 | .perfctr = MSR_P6_PERFCTR0, | ||
357 | .evntsel = MSR_P6_EVNTSEL0, | ||
358 | .checkbit = 1ULL<<39, | ||
359 | }; | ||
360 | |||
361 | /* Intel P4 performance counters. By far the most complicated of all. */ | ||
362 | |||
363 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | ||
364 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | ||
365 | #define P4_ESCR_OS (1<<3) | ||
366 | #define P4_ESCR_USR (1<<2) | ||
367 | #define P4_CCCR_OVF_PMI0 (1<<26) | ||
368 | #define P4_CCCR_OVF_PMI1 (1<<27) | ||
369 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | ||
370 | #define P4_CCCR_COMPLEMENT (1<<19) | ||
371 | #define P4_CCCR_COMPARE (1<<18) | ||
372 | #define P4_CCCR_REQUIRED (3<<16) | ||
373 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | ||
374 | #define P4_CCCR_ENABLE (1<<12) | ||
375 | #define P4_CCCR_OVF (1<<31) | ||
376 | |||
377 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | ||
378 | CRU_ESCR0 (with any non-null event selector) through a complemented | ||
379 | max threshold. [IA32-Vol3, Section 14.9.9] */ | ||
380 | |||
381 | static int setup_p4_watchdog(unsigned nmi_hz) | ||
382 | { | ||
383 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; | ||
384 | unsigned int evntsel, cccr_val; | ||
385 | unsigned int misc_enable, dummy; | ||
386 | unsigned int ht_num; | ||
387 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
388 | |||
389 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); | ||
390 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | ||
391 | return 0; | ||
392 | |||
393 | #ifdef CONFIG_SMP | ||
394 | /* detect which hyperthread we are on */ | ||
395 | if (smp_num_siblings == 2) { | ||
396 | unsigned int ebx, apicid; | ||
397 | |||
398 | ebx = cpuid_ebx(1); | ||
399 | apicid = (ebx >> 24) & 0xff; | ||
400 | ht_num = apicid & 1; | ||
401 | } else | ||
402 | #endif | ||
403 | ht_num = 0; | ||
404 | |||
405 | /* performance counters are shared resources | ||
406 | * assign each hyperthread its own set | ||
407 | * (re-use the ESCR0 register, seems safe | ||
408 | * and keeps the cccr_val the same) | ||
409 | */ | ||
410 | if (!ht_num) { | ||
411 | /* logical cpu 0 */ | ||
412 | perfctr_msr = MSR_P4_IQ_PERFCTR0; | ||
413 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
414 | cccr_msr = MSR_P4_IQ_CCCR0; | ||
415 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | ||
416 | } else { | ||
417 | /* logical cpu 1 */ | ||
418 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | ||
419 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
420 | cccr_msr = MSR_P4_IQ_CCCR1; | ||
421 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | ||
422 | } | ||
423 | |||
424 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | ||
425 | | P4_ESCR_OS | ||
426 | | P4_ESCR_USR; | ||
427 | |||
428 | cccr_val |= P4_CCCR_THRESHOLD(15) | ||
429 | | P4_CCCR_COMPLEMENT | ||
430 | | P4_CCCR_COMPARE | ||
431 | | P4_CCCR_REQUIRED; | ||
432 | |||
433 | wrmsr(evntsel_msr, evntsel, 0); | ||
434 | wrmsr(cccr_msr, cccr_val, 0); | ||
435 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); | ||
436 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
437 | cccr_val |= P4_CCCR_ENABLE; | ||
438 | wrmsr(cccr_msr, cccr_val, 0); | ||
439 | wd->perfctr_msr = perfctr_msr; | ||
440 | wd->evntsel_msr = evntsel_msr; | ||
441 | wd->cccr_msr = cccr_msr; | ||
442 | return 1; | ||
443 | } | ||
444 | |||
445 | static void stop_p4_watchdog(void *arg) | ||
446 | { | ||
447 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
448 | wrmsr(wd->cccr_msr, 0, 0); | ||
449 | wrmsr(wd->evntsel_msr, 0, 0); | ||
450 | } | ||
451 | |||
452 | static int p4_reserve(void) | ||
453 | { | ||
454 | if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0)) | ||
455 | return 0; | ||
456 | #ifdef CONFIG_SMP | ||
457 | if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1)) | ||
458 | goto fail1; | ||
459 | #endif | ||
460 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) | ||
461 | goto fail2; | ||
462 | /* RED-PEN why is ESCR1 not reserved here? */ | ||
463 | return 1; | ||
464 | fail2: | ||
465 | #ifdef CONFIG_SMP | ||
466 | if (smp_num_siblings > 1) | ||
467 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); | ||
468 | fail1: | ||
469 | #endif | ||
470 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static void p4_unreserve(void) | ||
475 | { | ||
476 | #ifdef CONFIG_SMP | ||
477 | if (smp_num_siblings > 1) | ||
478 | release_evntsel_nmi(MSR_P4_IQ_PERFCTR1); | ||
479 | #endif | ||
480 | release_evntsel_nmi(MSR_P4_IQ_PERFCTR0); | ||
481 | release_perfctr_nmi(MSR_P4_CRU_ESCR0); | ||
482 | } | ||
483 | |||
484 | static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
485 | { | ||
486 | unsigned dummy; | ||
487 | /* | ||
488 | * P4 quirks: | ||
489 | * - An overflown perfctr will assert its interrupt | ||
490 | * until the OVF flag in its CCCR is cleared. | ||
491 | * - LVTPC is masked on interrupt and must be | ||
492 | * unmasked by the LVTPC handler. | ||
493 | */ | ||
494 | rdmsrl(wd->cccr_msr, dummy); | ||
495 | dummy &= ~P4_CCCR_OVF; | ||
496 | wrmsrl(wd->cccr_msr, dummy); | ||
497 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
498 | /* start the cycle over again */ | ||
499 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | ||
500 | } | ||
501 | |||
502 | static struct wd_ops p4_wd_ops = { | ||
503 | .reserve = p4_reserve, | ||
504 | .unreserve = p4_unreserve, | ||
505 | .setup = setup_p4_watchdog, | ||
506 | .rearm = p4_rearm, | ||
507 | .stop = stop_p4_watchdog, | ||
508 | /* RED-PEN this is wrong for the other sibling */ | ||
509 | .perfctr = MSR_P4_BPU_PERFCTR0, | ||
510 | .evntsel = MSR_P4_BSU_ESCR0, | ||
511 | .checkbit = 1ULL<<39, | ||
512 | }; | ||
513 | |||
514 | /* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully | ||
515 | all future Intel CPUs. */ | ||
516 | |||
517 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
518 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
519 | |||
520 | static int setup_intel_arch_watchdog(unsigned nmi_hz) | ||
521 | { | ||
522 | unsigned int ebx; | ||
523 | union cpuid10_eax eax; | ||
524 | unsigned int unused; | ||
525 | unsigned int perfctr_msr, evntsel_msr; | ||
526 | unsigned int evntsel; | ||
527 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
528 | |||
529 | /* | ||
530 | * Check whether the Architectural PerfMon supports | ||
531 | * Unhalted Core Cycles Event or not. | ||
532 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | ||
533 | */ | ||
534 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | ||
535 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
536 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
537 | return 0; | ||
538 | |||
539 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1; | ||
540 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1; | ||
541 | |||
542 | wrmsrl(perfctr_msr, 0UL); | ||
543 | |||
544 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
545 | | ARCH_PERFMON_EVENTSEL_OS | ||
546 | | ARCH_PERFMON_EVENTSEL_USR | ||
547 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
548 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
549 | |||
550 | /* setup the timer */ | ||
551 | wrmsr(evntsel_msr, evntsel, 0); | ||
552 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
553 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); | ||
554 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
555 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
556 | wrmsr(evntsel_msr, evntsel, 0); | ||
557 | |||
558 | wd->perfctr_msr = perfctr_msr; | ||
559 | wd->evntsel_msr = evntsel_msr; | ||
560 | wd->cccr_msr = 0; //unused | ||
561 | wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1); | ||
562 | return 1; | ||
563 | } | ||
564 | |||
565 | static struct wd_ops intel_arch_wd_ops = { | ||
566 | .reserve = single_msr_reserve, | ||
567 | .unreserve = single_msr_unreserve, | ||
568 | .setup = setup_intel_arch_watchdog, | ||
569 | .rearm = p6_rearm, | ||
570 | .stop = single_msr_stop_watchdog, | ||
571 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
572 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
573 | }; | ||
574 | |||
575 | static void probe_nmi_watchdog(void) | ||
576 | { | ||
577 | switch (boot_cpu_data.x86_vendor) { | ||
578 | case X86_VENDOR_AMD: | ||
579 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | ||
580 | boot_cpu_data.x86 != 16) | ||
581 | return; | ||
582 | wd_ops = &k7_wd_ops; | ||
583 | break; | ||
584 | case X86_VENDOR_INTEL: | ||
585 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
586 | wd_ops = &intel_arch_wd_ops; | ||
587 | break; | ||
588 | } | ||
589 | switch (boot_cpu_data.x86) { | ||
590 | case 6: | ||
591 | if (boot_cpu_data.x86_model > 0xd) | ||
592 | return; | ||
593 | |||
594 | wd_ops = &p6_wd_ops; | ||
595 | break; | ||
596 | case 15: | ||
597 | if (boot_cpu_data.x86_model > 0x4) | ||
598 | return; | ||
599 | |||
600 | wd_ops = &p4_wd_ops; | ||
601 | break; | ||
602 | default: | ||
603 | return; | ||
604 | } | ||
605 | break; | ||
606 | } | ||
607 | } | ||
608 | |||
609 | /* Interface to nmi.c */ | ||
610 | |||
611 | int lapic_watchdog_init(unsigned nmi_hz) | ||
612 | { | ||
613 | if (!wd_ops) { | ||
614 | probe_nmi_watchdog(); | ||
615 | if (!wd_ops) | ||
616 | return -1; | ||
617 | } | ||
618 | |||
619 | if (!(wd_ops->setup(nmi_hz))) { | ||
620 | printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n", | ||
621 | raw_smp_processor_id()); | ||
622 | return -1; | ||
623 | } | ||
624 | |||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | void lapic_watchdog_stop(void) | ||
629 | { | ||
630 | if (wd_ops) | ||
631 | wd_ops->stop(NULL); | ||
632 | } | ||
633 | |||
634 | unsigned lapic_adjust_nmi_hz(unsigned hz) | ||
635 | { | ||
636 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
637 | if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | ||
638 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) | ||
639 | hz = adjust_for_32bit_ctr(hz); | ||
640 | return hz; | ||
641 | } | ||
642 | |||
643 | int lapic_wd_event(unsigned nmi_hz) | ||
644 | { | ||
645 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
646 | u64 ctr; | ||
647 | rdmsrl(wd->perfctr_msr, ctr); | ||
648 | if (ctr & wd_ops->checkbit) { /* perfctr still running? */ | ||
649 | return 0; | ||
650 | } | ||
651 | wd_ops->rearm(wd, nmi_hz); | ||
652 | return 1; | ||
653 | } | ||
654 | |||
655 | int lapic_watchdog_ok(void) | ||
656 | { | ||
657 | return wd_ops != NULL; | ||
658 | } | ||
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 2dec1b105737..33cf2f3c444f 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/sysdev.h> | 20 | #include <linux/sysdev.h> |
21 | #include <linux/sysctl.h> | 21 | #include <linux/sysctl.h> |
22 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
23 | #include <linux/dmi.h> | ||
24 | #include <linux/kprobes.h> | 23 | #include <linux/kprobes.h> |
25 | #include <linux/cpumask.h> | 24 | #include <linux/cpumask.h> |
26 | #include <linux/kernel_stat.h> | 25 | #include <linux/kernel_stat.h> |
@@ -28,30 +27,14 @@ | |||
28 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
29 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
30 | #include <asm/kdebug.h> | 29 | #include <asm/kdebug.h> |
31 | #include <asm/intel_arch_perfmon.h> | ||
32 | 30 | ||
33 | #include "mach_traps.h" | 31 | #include "mach_traps.h" |
34 | 32 | ||
35 | int unknown_nmi_panic; | 33 | int unknown_nmi_panic; |
36 | int nmi_watchdog_enabled; | 34 | int nmi_watchdog_enabled; |
37 | 35 | ||
38 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: | ||
39 | * evtsel_nmi_owner tracks the ownership of the event selection | ||
40 | * - different performance counters/ event selection may be reserved for | ||
41 | * different subsystems this reservation system just tries to coordinate | ||
42 | * things a little | ||
43 | */ | ||
44 | |||
45 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | ||
46 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | ||
47 | */ | ||
48 | #define NMI_MAX_COUNTER_BITS 66 | ||
49 | #define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS) | ||
50 | |||
51 | static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]); | ||
52 | static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]); | ||
53 | |||
54 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | 36 | static cpumask_t backtrace_mask = CPU_MASK_NONE; |
37 | |||
55 | /* nmi_active: | 38 | /* nmi_active: |
56 | * >0: the lapic NMI watchdog is active, but can be disabled | 39 | * >0: the lapic NMI watchdog is active, but can be disabled |
57 | * <0: the lapic NMI watchdog has not been set up, and cannot | 40 | * <0: the lapic NMI watchdog has not been set up, and cannot |
@@ -63,203 +46,11 @@ atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ | |||
63 | unsigned int nmi_watchdog = NMI_DEFAULT; | 46 | unsigned int nmi_watchdog = NMI_DEFAULT; |
64 | static unsigned int nmi_hz = HZ; | 47 | static unsigned int nmi_hz = HZ; |
65 | 48 | ||
66 | struct nmi_watchdog_ctlblk { | 49 | static DEFINE_PER_CPU(short, wd_enabled); |
67 | int enabled; | ||
68 | u64 check_bit; | ||
69 | unsigned int cccr_msr; | ||
70 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ | ||
71 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ | ||
72 | }; | ||
73 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | ||
74 | 50 | ||
75 | /* local prototypes */ | 51 | /* local prototypes */ |
76 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | 52 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); |
77 | 53 | ||
78 | /* converts an msr to an appropriate reservation bit */ | ||
79 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | ||
80 | { | ||
81 | /* returns the bit offset of the performance counter register */ | ||
82 | switch (boot_cpu_data.x86_vendor) { | ||
83 | case X86_VENDOR_AMD: | ||
84 | return (msr - MSR_K7_PERFCTR0); | ||
85 | case X86_VENDOR_INTEL: | ||
86 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
87 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | ||
88 | |||
89 | switch (boot_cpu_data.x86) { | ||
90 | case 6: | ||
91 | return (msr - MSR_P6_PERFCTR0); | ||
92 | case 15: | ||
93 | return (msr - MSR_P4_BPU_PERFCTR0); | ||
94 | } | ||
95 | } | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /* converts an msr to an appropriate reservation bit */ | ||
100 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | ||
101 | { | ||
102 | /* returns the bit offset of the event selection register */ | ||
103 | switch (boot_cpu_data.x86_vendor) { | ||
104 | case X86_VENDOR_AMD: | ||
105 | return (msr - MSR_K7_EVNTSEL0); | ||
106 | case X86_VENDOR_INTEL: | ||
107 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
108 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | ||
109 | |||
110 | switch (boot_cpu_data.x86) { | ||
111 | case 6: | ||
112 | return (msr - MSR_P6_EVNTSEL0); | ||
113 | case 15: | ||
114 | return (msr - MSR_P4_BSU_ESCR0); | ||
115 | } | ||
116 | } | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* checks for a bit availability (hack for oprofile) */ | ||
121 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | ||
122 | { | ||
123 | int cpu; | ||
124 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
125 | for_each_possible_cpu (cpu) { | ||
126 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0])) | ||
127 | return 0; | ||
128 | } | ||
129 | return 1; | ||
130 | } | ||
131 | |||
132 | /* checks the an msr for availability */ | ||
133 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
134 | { | ||
135 | unsigned int counter; | ||
136 | int cpu; | ||
137 | |||
138 | counter = nmi_perfctr_msr_to_bit(msr); | ||
139 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
140 | |||
141 | for_each_possible_cpu (cpu) { | ||
142 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0])) | ||
143 | return 0; | ||
144 | } | ||
145 | return 1; | ||
146 | } | ||
147 | |||
148 | static int __reserve_perfctr_nmi(int cpu, unsigned int msr) | ||
149 | { | ||
150 | unsigned int counter; | ||
151 | if (cpu < 0) | ||
152 | cpu = smp_processor_id(); | ||
153 | |||
154 | counter = nmi_perfctr_msr_to_bit(msr); | ||
155 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
156 | |||
157 | if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0])) | ||
158 | return 1; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static void __release_perfctr_nmi(int cpu, unsigned int msr) | ||
163 | { | ||
164 | unsigned int counter; | ||
165 | if (cpu < 0) | ||
166 | cpu = smp_processor_id(); | ||
167 | |||
168 | counter = nmi_perfctr_msr_to_bit(msr); | ||
169 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
170 | |||
171 | clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)[0]); | ||
172 | } | ||
173 | |||
174 | int reserve_perfctr_nmi(unsigned int msr) | ||
175 | { | ||
176 | int cpu, i; | ||
177 | for_each_possible_cpu (cpu) { | ||
178 | if (!__reserve_perfctr_nmi(cpu, msr)) { | ||
179 | for_each_possible_cpu (i) { | ||
180 | if (i >= cpu) | ||
181 | break; | ||
182 | __release_perfctr_nmi(i, msr); | ||
183 | } | ||
184 | return 0; | ||
185 | } | ||
186 | } | ||
187 | return 1; | ||
188 | } | ||
189 | |||
190 | void release_perfctr_nmi(unsigned int msr) | ||
191 | { | ||
192 | int cpu; | ||
193 | for_each_possible_cpu (cpu) { | ||
194 | __release_perfctr_nmi(cpu, msr); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | int __reserve_evntsel_nmi(int cpu, unsigned int msr) | ||
199 | { | ||
200 | unsigned int counter; | ||
201 | if (cpu < 0) | ||
202 | cpu = smp_processor_id(); | ||
203 | |||
204 | counter = nmi_evntsel_msr_to_bit(msr); | ||
205 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
206 | |||
207 | if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) | ||
208 | return 1; | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static void __release_evntsel_nmi(int cpu, unsigned int msr) | ||
213 | { | ||
214 | unsigned int counter; | ||
215 | if (cpu < 0) | ||
216 | cpu = smp_processor_id(); | ||
217 | |||
218 | counter = nmi_evntsel_msr_to_bit(msr); | ||
219 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
220 | |||
221 | clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); | ||
222 | } | ||
223 | |||
224 | int reserve_evntsel_nmi(unsigned int msr) | ||
225 | { | ||
226 | int cpu, i; | ||
227 | for_each_possible_cpu (cpu) { | ||
228 | if (!__reserve_evntsel_nmi(cpu, msr)) { | ||
229 | for_each_possible_cpu (i) { | ||
230 | if (i >= cpu) | ||
231 | break; | ||
232 | __release_evntsel_nmi(i, msr); | ||
233 | } | ||
234 | return 0; | ||
235 | } | ||
236 | } | ||
237 | return 1; | ||
238 | } | ||
239 | |||
240 | void release_evntsel_nmi(unsigned int msr) | ||
241 | { | ||
242 | int cpu; | ||
243 | for_each_possible_cpu (cpu) { | ||
244 | __release_evntsel_nmi(cpu, msr); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | static __cpuinit inline int nmi_known_cpu(void) | ||
249 | { | ||
250 | switch (boot_cpu_data.x86_vendor) { | ||
251 | case X86_VENDOR_AMD: | ||
252 | return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6) | ||
253 | || (boot_cpu_data.x86 == 16)); | ||
254 | case X86_VENDOR_INTEL: | ||
255 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
256 | return 1; | ||
257 | else | ||
258 | return ((boot_cpu_data.x86 == 15) || (boot_cpu_data.x86 == 6)); | ||
259 | } | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int endflag __initdata = 0; | 54 | static int endflag __initdata = 0; |
264 | 55 | ||
265 | #ifdef CONFIG_SMP | 56 | #ifdef CONFIG_SMP |
@@ -281,28 +72,6 @@ static __init void nmi_cpu_busy(void *data) | |||
281 | } | 72 | } |
282 | #endif | 73 | #endif |
283 | 74 | ||
284 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | ||
285 | { | ||
286 | u64 counter_val; | ||
287 | unsigned int retval = hz; | ||
288 | |||
289 | /* | ||
290 | * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter | ||
291 | * are writable, with higher bits sign extending from bit 31. | ||
292 | * So, we can only program the counter with 31 bit values and | ||
293 | * 32nd bit should be 1, for 33.. to be 1. | ||
294 | * Find the appropriate nmi_hz | ||
295 | */ | ||
296 | counter_val = (u64)cpu_khz * 1000; | ||
297 | do_div(counter_val, retval); | ||
298 | if (counter_val > 0x7fffffffULL) { | ||
299 | u64 count = (u64)cpu_khz * 1000; | ||
300 | do_div(count, 0x7fffffffUL); | ||
301 | retval = count + 1; | ||
302 | } | ||
303 | return retval; | ||
304 | } | ||
305 | |||
306 | static int __init check_nmi_watchdog(void) | 75 | static int __init check_nmi_watchdog(void) |
307 | { | 76 | { |
308 | unsigned int *prev_nmi_count; | 77 | unsigned int *prev_nmi_count; |
@@ -335,14 +104,14 @@ static int __init check_nmi_watchdog(void) | |||
335 | if (!cpu_isset(cpu, cpu_callin_map)) | 104 | if (!cpu_isset(cpu, cpu_callin_map)) |
336 | continue; | 105 | continue; |
337 | #endif | 106 | #endif |
338 | if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled) | 107 | if (!per_cpu(wd_enabled, cpu)) |
339 | continue; | 108 | continue; |
340 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { | 109 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { |
341 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", | 110 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", |
342 | cpu, | 111 | cpu, |
343 | prev_nmi_count[cpu], | 112 | prev_nmi_count[cpu], |
344 | nmi_count(cpu)); | 113 | nmi_count(cpu)); |
345 | per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0; | 114 | per_cpu(wd_enabled, cpu) = 0; |
346 | atomic_dec(&nmi_active); | 115 | atomic_dec(&nmi_active); |
347 | } | 116 | } |
348 | } | 117 | } |
@@ -356,16 +125,8 @@ static int __init check_nmi_watchdog(void) | |||
356 | 125 | ||
357 | /* now that we know it works we can reduce NMI frequency to | 126 | /* now that we know it works we can reduce NMI frequency to |
358 | something more reasonable; makes a difference in some configs */ | 127 | something more reasonable; makes a difference in some configs */ |
359 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 128 | if (nmi_watchdog == NMI_LOCAL_APIC) |
360 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 129 | nmi_hz = lapic_adjust_nmi_hz(1); |
361 | |||
362 | nmi_hz = 1; | ||
363 | |||
364 | if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | ||
365 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) { | ||
366 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
367 | } | ||
368 | } | ||
369 | 130 | ||
370 | kfree(prev_nmi_count); | 131 | kfree(prev_nmi_count); |
371 | return 0; | 132 | return 0; |
@@ -388,85 +149,8 @@ static int __init setup_nmi_watchdog(char *str) | |||
388 | 149 | ||
389 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 150 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
390 | 151 | ||
391 | static void disable_lapic_nmi_watchdog(void) | ||
392 | { | ||
393 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
394 | |||
395 | if (atomic_read(&nmi_active) <= 0) | ||
396 | return; | ||
397 | |||
398 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | ||
399 | |||
400 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
401 | } | ||
402 | |||
403 | static void enable_lapic_nmi_watchdog(void) | ||
404 | { | ||
405 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
406 | |||
407 | /* are we already enabled */ | ||
408 | if (atomic_read(&nmi_active) != 0) | ||
409 | return; | ||
410 | |||
411 | /* are we lapic aware */ | ||
412 | if (nmi_known_cpu() <= 0) | ||
413 | return; | ||
414 | |||
415 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | ||
416 | touch_nmi_watchdog(); | ||
417 | } | ||
418 | |||
419 | void disable_timer_nmi_watchdog(void) | ||
420 | { | ||
421 | BUG_ON(nmi_watchdog != NMI_IO_APIC); | ||
422 | |||
423 | if (atomic_read(&nmi_active) <= 0) | ||
424 | return; | ||
425 | |||
426 | disable_irq(0); | ||
427 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | ||
428 | |||
429 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
430 | } | ||
431 | |||
432 | void enable_timer_nmi_watchdog(void) | ||
433 | { | ||
434 | BUG_ON(nmi_watchdog != NMI_IO_APIC); | ||
435 | |||
436 | if (atomic_read(&nmi_active) == 0) { | ||
437 | touch_nmi_watchdog(); | ||
438 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | ||
439 | enable_irq(0); | ||
440 | } | ||
441 | } | ||
442 | |||
443 | static void __acpi_nmi_disable(void *__unused) | ||
444 | { | ||
445 | apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
446 | } | ||
447 | 152 | ||
448 | /* | 153 | /* Suspend/resume support */ |
449 | * Disable timer based NMIs on all CPUs: | ||
450 | */ | ||
451 | void acpi_nmi_disable(void) | ||
452 | { | ||
453 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
454 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
455 | } | ||
456 | |||
457 | static void __acpi_nmi_enable(void *__unused) | ||
458 | { | ||
459 | apic_write_around(APIC_LVT0, APIC_DM_NMI); | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Enable timer based NMIs on all CPUs: | ||
464 | */ | ||
465 | void acpi_nmi_enable(void) | ||
466 | { | ||
467 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
468 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
469 | } | ||
470 | 154 | ||
471 | #ifdef CONFIG_PM | 155 | #ifdef CONFIG_PM |
472 | 156 | ||
@@ -513,7 +197,7 @@ static int __init init_lapic_nmi_sysfs(void) | |||
513 | if (nmi_watchdog != NMI_LOCAL_APIC) | 197 | if (nmi_watchdog != NMI_LOCAL_APIC) |
514 | return 0; | 198 | return 0; |
515 | 199 | ||
516 | if ( atomic_read(&nmi_active) < 0 ) | 200 | if (atomic_read(&nmi_active) < 0) |
517 | return 0; | 201 | return 0; |
518 | 202 | ||
519 | error = sysdev_class_register(&nmi_sysclass); | 203 | error = sysdev_class_register(&nmi_sysclass); |
@@ -526,433 +210,69 @@ late_initcall(init_lapic_nmi_sysfs); | |||
526 | 210 | ||
527 | #endif /* CONFIG_PM */ | 211 | #endif /* CONFIG_PM */ |
528 | 212 | ||
529 | /* | 213 | static void __acpi_nmi_enable(void *__unused) |
530 | * Activate the NMI watchdog via the local APIC. | ||
531 | * Original code written by Keith Owens. | ||
532 | */ | ||
533 | |||
534 | static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr) | ||
535 | { | ||
536 | u64 count = (u64)cpu_khz * 1000; | ||
537 | |||
538 | do_div(count, nmi_hz); | ||
539 | if(descr) | ||
540 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
541 | wrmsrl(perfctr_msr, 0 - count); | ||
542 | } | ||
543 | |||
544 | static void write_watchdog_counter32(unsigned int perfctr_msr, | ||
545 | const char *descr) | ||
546 | { | ||
547 | u64 count = (u64)cpu_khz * 1000; | ||
548 | |||
549 | do_div(count, nmi_hz); | ||
550 | if(descr) | ||
551 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
552 | wrmsr(perfctr_msr, (u32)(-count), 0); | ||
553 | } | ||
554 | |||
555 | /* Note that these events don't tick when the CPU idles. This means | ||
556 | the frequency varies with CPU load. */ | ||
557 | |||
558 | #define K7_EVNTSEL_ENABLE (1 << 22) | ||
559 | #define K7_EVNTSEL_INT (1 << 20) | ||
560 | #define K7_EVNTSEL_OS (1 << 17) | ||
561 | #define K7_EVNTSEL_USR (1 << 16) | ||
562 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | ||
563 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | ||
564 | |||
565 | static int setup_k7_watchdog(void) | ||
566 | { | ||
567 | unsigned int perfctr_msr, evntsel_msr; | ||
568 | unsigned int evntsel; | ||
569 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
570 | |||
571 | perfctr_msr = MSR_K7_PERFCTR0; | ||
572 | evntsel_msr = MSR_K7_EVNTSEL0; | ||
573 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) | ||
574 | goto fail; | ||
575 | |||
576 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) | ||
577 | goto fail1; | ||
578 | |||
579 | wrmsrl(perfctr_msr, 0UL); | ||
580 | |||
581 | evntsel = K7_EVNTSEL_INT | ||
582 | | K7_EVNTSEL_OS | ||
583 | | K7_EVNTSEL_USR | ||
584 | | K7_NMI_EVENT; | ||
585 | |||
586 | /* setup the timer */ | ||
587 | wrmsr(evntsel_msr, evntsel, 0); | ||
588 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0"); | ||
589 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
590 | evntsel |= K7_EVNTSEL_ENABLE; | ||
591 | wrmsr(evntsel_msr, evntsel, 0); | ||
592 | |||
593 | wd->perfctr_msr = perfctr_msr; | ||
594 | wd->evntsel_msr = evntsel_msr; | ||
595 | wd->cccr_msr = 0; //unused | ||
596 | wd->check_bit = 1ULL<<63; | ||
597 | return 1; | ||
598 | fail1: | ||
599 | __release_perfctr_nmi(-1, perfctr_msr); | ||
600 | fail: | ||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static void stop_k7_watchdog(void) | ||
605 | { | ||
606 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
607 | |||
608 | wrmsr(wd->evntsel_msr, 0, 0); | ||
609 | |||
610 | __release_evntsel_nmi(-1, wd->evntsel_msr); | ||
611 | __release_perfctr_nmi(-1, wd->perfctr_msr); | ||
612 | } | ||
613 | |||
614 | #define P6_EVNTSEL0_ENABLE (1 << 22) | ||
615 | #define P6_EVNTSEL_INT (1 << 20) | ||
616 | #define P6_EVNTSEL_OS (1 << 17) | ||
617 | #define P6_EVNTSEL_USR (1 << 16) | ||
618 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | ||
619 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | ||
620 | |||
621 | static int setup_p6_watchdog(void) | ||
622 | { | ||
623 | unsigned int perfctr_msr, evntsel_msr; | ||
624 | unsigned int evntsel; | ||
625 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
626 | |||
627 | perfctr_msr = MSR_P6_PERFCTR0; | ||
628 | evntsel_msr = MSR_P6_EVNTSEL0; | ||
629 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) | ||
630 | goto fail; | ||
631 | |||
632 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) | ||
633 | goto fail1; | ||
634 | |||
635 | wrmsrl(perfctr_msr, 0UL); | ||
636 | |||
637 | evntsel = P6_EVNTSEL_INT | ||
638 | | P6_EVNTSEL_OS | ||
639 | | P6_EVNTSEL_USR | ||
640 | | P6_NMI_EVENT; | ||
641 | |||
642 | /* setup the timer */ | ||
643 | wrmsr(evntsel_msr, evntsel, 0); | ||
644 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
645 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0"); | ||
646 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
647 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
648 | wrmsr(evntsel_msr, evntsel, 0); | ||
649 | |||
650 | wd->perfctr_msr = perfctr_msr; | ||
651 | wd->evntsel_msr = evntsel_msr; | ||
652 | wd->cccr_msr = 0; //unused | ||
653 | wd->check_bit = 1ULL<<39; | ||
654 | return 1; | ||
655 | fail1: | ||
656 | __release_perfctr_nmi(-1, perfctr_msr); | ||
657 | fail: | ||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static void stop_p6_watchdog(void) | ||
662 | { | ||
663 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
664 | |||
665 | wrmsr(wd->evntsel_msr, 0, 0); | ||
666 | |||
667 | __release_evntsel_nmi(-1, wd->evntsel_msr); | ||
668 | __release_perfctr_nmi(-1, wd->perfctr_msr); | ||
669 | } | ||
670 | |||
671 | /* Note that these events don't tick when the CPU idles. This means | ||
672 | the frequency varies with CPU load. */ | ||
673 | |||
674 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | ||
675 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | ||
676 | #define P4_ESCR_OS (1<<3) | ||
677 | #define P4_ESCR_USR (1<<2) | ||
678 | #define P4_CCCR_OVF_PMI0 (1<<26) | ||
679 | #define P4_CCCR_OVF_PMI1 (1<<27) | ||
680 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | ||
681 | #define P4_CCCR_COMPLEMENT (1<<19) | ||
682 | #define P4_CCCR_COMPARE (1<<18) | ||
683 | #define P4_CCCR_REQUIRED (3<<16) | ||
684 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | ||
685 | #define P4_CCCR_ENABLE (1<<12) | ||
686 | #define P4_CCCR_OVF (1<<31) | ||
687 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | ||
688 | CRU_ESCR0 (with any non-null event selector) through a complemented | ||
689 | max threshold. [IA32-Vol3, Section 14.9.9] */ | ||
690 | |||
691 | static int setup_p4_watchdog(void) | ||
692 | { | 214 | { |
693 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; | 215 | apic_write_around(APIC_LVT0, APIC_DM_NMI); |
694 | unsigned int evntsel, cccr_val; | ||
695 | unsigned int misc_enable, dummy; | ||
696 | unsigned int ht_num; | ||
697 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
698 | |||
699 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); | ||
700 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | ||
701 | return 0; | ||
702 | |||
703 | #ifdef CONFIG_SMP | ||
704 | /* detect which hyperthread we are on */ | ||
705 | if (smp_num_siblings == 2) { | ||
706 | unsigned int ebx, apicid; | ||
707 | |||
708 | ebx = cpuid_ebx(1); | ||
709 | apicid = (ebx >> 24) & 0xff; | ||
710 | ht_num = apicid & 1; | ||
711 | } else | ||
712 | #endif | ||
713 | ht_num = 0; | ||
714 | |||
715 | /* performance counters are shared resources | ||
716 | * assign each hyperthread its own set | ||
717 | * (re-use the ESCR0 register, seems safe | ||
718 | * and keeps the cccr_val the same) | ||
719 | */ | ||
720 | if (!ht_num) { | ||
721 | /* logical cpu 0 */ | ||
722 | perfctr_msr = MSR_P4_IQ_PERFCTR0; | ||
723 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
724 | cccr_msr = MSR_P4_IQ_CCCR0; | ||
725 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | ||
726 | } else { | ||
727 | /* logical cpu 1 */ | ||
728 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | ||
729 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
730 | cccr_msr = MSR_P4_IQ_CCCR1; | ||
731 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | ||
732 | } | ||
733 | |||
734 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) | ||
735 | goto fail; | ||
736 | |||
737 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) | ||
738 | goto fail1; | ||
739 | |||
740 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | ||
741 | | P4_ESCR_OS | ||
742 | | P4_ESCR_USR; | ||
743 | |||
744 | cccr_val |= P4_CCCR_THRESHOLD(15) | ||
745 | | P4_CCCR_COMPLEMENT | ||
746 | | P4_CCCR_COMPARE | ||
747 | | P4_CCCR_REQUIRED; | ||
748 | |||
749 | wrmsr(evntsel_msr, evntsel, 0); | ||
750 | wrmsr(cccr_msr, cccr_val, 0); | ||
751 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0"); | ||
752 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
753 | cccr_val |= P4_CCCR_ENABLE; | ||
754 | wrmsr(cccr_msr, cccr_val, 0); | ||
755 | wd->perfctr_msr = perfctr_msr; | ||
756 | wd->evntsel_msr = evntsel_msr; | ||
757 | wd->cccr_msr = cccr_msr; | ||
758 | wd->check_bit = 1ULL<<39; | ||
759 | return 1; | ||
760 | fail1: | ||
761 | __release_perfctr_nmi(-1, perfctr_msr); | ||
762 | fail: | ||
763 | return 0; | ||
764 | } | 216 | } |
765 | 217 | ||
766 | static void stop_p4_watchdog(void) | 218 | /* |
219 | * Enable timer based NMIs on all CPUs: | ||
220 | */ | ||
221 | void acpi_nmi_enable(void) | ||
767 | { | 222 | { |
768 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 223 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
769 | 224 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | |
770 | wrmsr(wd->cccr_msr, 0, 0); | ||
771 | wrmsr(wd->evntsel_msr, 0, 0); | ||
772 | |||
773 | __release_evntsel_nmi(-1, wd->evntsel_msr); | ||
774 | __release_perfctr_nmi(-1, wd->perfctr_msr); | ||
775 | } | 225 | } |
776 | 226 | ||
777 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | 227 | static void __acpi_nmi_disable(void *__unused) |
778 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
779 | |||
780 | static int setup_intel_arch_watchdog(void) | ||
781 | { | 228 | { |
782 | unsigned int ebx; | 229 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); |
783 | union cpuid10_eax eax; | ||
784 | unsigned int unused; | ||
785 | unsigned int perfctr_msr, evntsel_msr; | ||
786 | unsigned int evntsel; | ||
787 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
788 | |||
789 | /* | ||
790 | * Check whether the Architectural PerfMon supports | ||
791 | * Unhalted Core Cycles Event or not. | ||
792 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | ||
793 | */ | ||
794 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | ||
795 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
796 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
797 | goto fail; | ||
798 | |||
799 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1; | ||
800 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1; | ||
801 | |||
802 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) | ||
803 | goto fail; | ||
804 | |||
805 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) | ||
806 | goto fail1; | ||
807 | |||
808 | wrmsrl(perfctr_msr, 0UL); | ||
809 | |||
810 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
811 | | ARCH_PERFMON_EVENTSEL_OS | ||
812 | | ARCH_PERFMON_EVENTSEL_USR | ||
813 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
814 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
815 | |||
816 | /* setup the timer */ | ||
817 | wrmsr(evntsel_msr, evntsel, 0); | ||
818 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
819 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0"); | ||
820 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
821 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
822 | wrmsr(evntsel_msr, evntsel, 0); | ||
823 | |||
824 | wd->perfctr_msr = perfctr_msr; | ||
825 | wd->evntsel_msr = evntsel_msr; | ||
826 | wd->cccr_msr = 0; //unused | ||
827 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); | ||
828 | return 1; | ||
829 | fail1: | ||
830 | __release_perfctr_nmi(-1, perfctr_msr); | ||
831 | fail: | ||
832 | return 0; | ||
833 | } | 230 | } |
834 | 231 | ||
835 | static void stop_intel_arch_watchdog(void) | 232 | /* |
233 | * Disable timer based NMIs on all CPUs: | ||
234 | */ | ||
235 | void acpi_nmi_disable(void) | ||
836 | { | 236 | { |
837 | unsigned int ebx; | 237 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
838 | union cpuid10_eax eax; | 238 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); |
839 | unsigned int unused; | ||
840 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
841 | |||
842 | /* | ||
843 | * Check whether the Architectural PerfMon supports | ||
844 | * Unhalted Core Cycles Event or not. | ||
845 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | ||
846 | */ | ||
847 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | ||
848 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
849 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
850 | return; | ||
851 | |||
852 | wrmsr(wd->evntsel_msr, 0, 0); | ||
853 | __release_evntsel_nmi(-1, wd->evntsel_msr); | ||
854 | __release_perfctr_nmi(-1, wd->perfctr_msr); | ||
855 | } | 239 | } |
856 | 240 | ||
857 | void setup_apic_nmi_watchdog (void *unused) | 241 | void setup_apic_nmi_watchdog (void *unused) |
858 | { | 242 | { |
859 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | 243 | if (__get_cpu_var(wd_enabled)) |
860 | 244 | return; | |
861 | /* only support LOCAL and IO APICs for now */ | ||
862 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | ||
863 | (nmi_watchdog != NMI_IO_APIC)) | ||
864 | return; | ||
865 | |||
866 | if (wd->enabled == 1) | ||
867 | return; | ||
868 | 245 | ||
869 | /* cheap hack to support suspend/resume */ | 246 | /* cheap hack to support suspend/resume */ |
870 | /* if cpu0 is not active neither should the other cpus */ | 247 | /* if cpu0 is not active neither should the other cpus */ |
871 | if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) | 248 | if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) |
872 | return; | 249 | return; |
873 | 250 | ||
874 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 251 | switch (nmi_watchdog) { |
875 | switch (boot_cpu_data.x86_vendor) { | 252 | case NMI_LOCAL_APIC: |
876 | case X86_VENDOR_AMD: | 253 | __get_cpu_var(wd_enabled) = 1; /* enable it before to avoid race with handler */ |
877 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | 254 | if (lapic_watchdog_init(nmi_hz) < 0) { |
878 | boot_cpu_data.x86 != 16) | 255 | __get_cpu_var(wd_enabled) = 0; |
879 | return; | ||
880 | if (!setup_k7_watchdog()) | ||
881 | return; | ||
882 | break; | ||
883 | case X86_VENDOR_INTEL: | ||
884 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
885 | if (!setup_intel_arch_watchdog()) | ||
886 | return; | ||
887 | break; | ||
888 | } | ||
889 | switch (boot_cpu_data.x86) { | ||
890 | case 6: | ||
891 | if (boot_cpu_data.x86_model > 0xd) | ||
892 | return; | ||
893 | |||
894 | if (!setup_p6_watchdog()) | ||
895 | return; | ||
896 | break; | ||
897 | case 15: | ||
898 | if (boot_cpu_data.x86_model > 0x4) | ||
899 | return; | ||
900 | |||
901 | if (!setup_p4_watchdog()) | ||
902 | return; | ||
903 | break; | ||
904 | default: | ||
905 | return; | ||
906 | } | ||
907 | break; | ||
908 | default: | ||
909 | return; | 256 | return; |
910 | } | 257 | } |
258 | /* FALL THROUGH */ | ||
259 | case NMI_IO_APIC: | ||
260 | __get_cpu_var(wd_enabled) = 1; | ||
261 | atomic_inc(&nmi_active); | ||
911 | } | 262 | } |
912 | wd->enabled = 1; | ||
913 | atomic_inc(&nmi_active); | ||
914 | } | 263 | } |
915 | 264 | ||
916 | void stop_apic_nmi_watchdog(void *unused) | 265 | void stop_apic_nmi_watchdog(void *unused) |
917 | { | 266 | { |
918 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
919 | |||
920 | /* only support LOCAL and IO APICs for now */ | 267 | /* only support LOCAL and IO APICs for now */ |
921 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | 268 | if ((nmi_watchdog != NMI_LOCAL_APIC) && |
922 | (nmi_watchdog != NMI_IO_APIC)) | 269 | (nmi_watchdog != NMI_IO_APIC)) |
923 | return; | 270 | return; |
924 | 271 | if (__get_cpu_var(wd_enabled) == 0) | |
925 | if (wd->enabled == 0) | ||
926 | return; | 272 | return; |
927 | 273 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
928 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 274 | lapic_watchdog_stop(); |
929 | switch (boot_cpu_data.x86_vendor) { | 275 | __get_cpu_var(wd_enabled) = 0; |
930 | case X86_VENDOR_AMD: | ||
931 | stop_k7_watchdog(); | ||
932 | break; | ||
933 | case X86_VENDOR_INTEL: | ||
934 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
935 | stop_intel_arch_watchdog(); | ||
936 | break; | ||
937 | } | ||
938 | switch (boot_cpu_data.x86) { | ||
939 | case 6: | ||
940 | if (boot_cpu_data.x86_model > 0xd) | ||
941 | break; | ||
942 | stop_p6_watchdog(); | ||
943 | break; | ||
944 | case 15: | ||
945 | if (boot_cpu_data.x86_model > 0x4) | ||
946 | break; | ||
947 | stop_p4_watchdog(); | ||
948 | break; | ||
949 | } | ||
950 | break; | ||
951 | default: | ||
952 | return; | ||
953 | } | ||
954 | } | ||
955 | wd->enabled = 0; | ||
956 | atomic_dec(&nmi_active); | 276 | atomic_dec(&nmi_active); |
957 | } | 277 | } |
958 | 278 | ||
@@ -1008,8 +328,6 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
1008 | unsigned int sum; | 328 | unsigned int sum; |
1009 | int touched = 0; | 329 | int touched = 0; |
1010 | int cpu = smp_processor_id(); | 330 | int cpu = smp_processor_id(); |
1011 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
1012 | u64 dummy; | ||
1013 | int rc=0; | 331 | int rc=0; |
1014 | 332 | ||
1015 | /* check for other users first */ | 333 | /* check for other users first */ |
@@ -1052,53 +370,20 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
1052 | alert_counter[cpu] = 0; | 370 | alert_counter[cpu] = 0; |
1053 | } | 371 | } |
1054 | /* see if the nmi watchdog went off */ | 372 | /* see if the nmi watchdog went off */ |
1055 | if (wd->enabled) { | 373 | if (!__get_cpu_var(wd_enabled)) |
1056 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 374 | return rc; |
1057 | rdmsrl(wd->perfctr_msr, dummy); | 375 | switch (nmi_watchdog) { |
1058 | if (dummy & wd->check_bit){ | 376 | case NMI_LOCAL_APIC: |
1059 | /* this wasn't a watchdog timer interrupt */ | 377 | rc |= lapic_wd_event(nmi_hz); |
1060 | goto done; | 378 | break; |
1061 | } | 379 | case NMI_IO_APIC: |
1062 | 380 | /* don't know how to accurately check for this. | |
1063 | /* only Intel P4 uses the cccr msr */ | 381 | * just assume it was a watchdog timer interrupt |
1064 | if (wd->cccr_msr != 0) { | 382 | * This matches the old behaviour. |
1065 | /* | 383 | */ |
1066 | * P4 quirks: | 384 | rc = 1; |
1067 | * - An overflown perfctr will assert its interrupt | 385 | break; |
1068 | * until the OVF flag in its CCCR is cleared. | ||
1069 | * - LVTPC is masked on interrupt and must be | ||
1070 | * unmasked by the LVTPC handler. | ||
1071 | */ | ||
1072 | rdmsrl(wd->cccr_msr, dummy); | ||
1073 | dummy &= ~P4_CCCR_OVF; | ||
1074 | wrmsrl(wd->cccr_msr, dummy); | ||
1075 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1076 | /* start the cycle over again */ | ||
1077 | write_watchdog_counter(wd->perfctr_msr, NULL); | ||
1078 | } | ||
1079 | else if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | ||
1080 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) { | ||
1081 | /* P6 based Pentium M need to re-unmask | ||
1082 | * the apic vector but it doesn't hurt | ||
1083 | * other P6 variant. | ||
1084 | * ArchPerfom/Core Duo also needs this */ | ||
1085 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1086 | /* P6/ARCH_PERFMON has 32 bit counter write */ | ||
1087 | write_watchdog_counter32(wd->perfctr_msr, NULL); | ||
1088 | } else { | ||
1089 | /* start the cycle over again */ | ||
1090 | write_watchdog_counter(wd->perfctr_msr, NULL); | ||
1091 | } | ||
1092 | rc = 1; | ||
1093 | } else if (nmi_watchdog == NMI_IO_APIC) { | ||
1094 | /* don't know how to accurately check for this. | ||
1095 | * just assume it was a watchdog timer interrupt | ||
1096 | * This matches the old behaviour. | ||
1097 | */ | ||
1098 | rc = 1; | ||
1099 | } | ||
1100 | } | 386 | } |
1101 | done: | ||
1102 | return rc; | 387 | return rc; |
1103 | } | 388 | } |
1104 | 389 | ||
@@ -1143,7 +428,7 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, | |||
1143 | } | 428 | } |
1144 | 429 | ||
1145 | if (nmi_watchdog == NMI_DEFAULT) { | 430 | if (nmi_watchdog == NMI_DEFAULT) { |
1146 | if (nmi_known_cpu() > 0) | 431 | if (lapic_watchdog_ok()) |
1147 | nmi_watchdog = NMI_LOCAL_APIC; | 432 | nmi_watchdog = NMI_LOCAL_APIC; |
1148 | else | 433 | else |
1149 | nmi_watchdog = NMI_IO_APIC; | 434 | nmi_watchdog = NMI_IO_APIC; |
@@ -1179,11 +464,3 @@ void __trigger_all_cpu_backtrace(void) | |||
1179 | 464 | ||
1180 | EXPORT_SYMBOL(nmi_active); | 465 | EXPORT_SYMBOL(nmi_active); |
1181 | EXPORT_SYMBOL(nmi_watchdog); | 466 | EXPORT_SYMBOL(nmi_watchdog); |
1182 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); | ||
1183 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | ||
1184 | EXPORT_SYMBOL(reserve_perfctr_nmi); | ||
1185 | EXPORT_SYMBOL(release_perfctr_nmi); | ||
1186 | EXPORT_SYMBOL(reserve_evntsel_nmi); | ||
1187 | EXPORT_SYMBOL(release_evntsel_nmi); | ||
1188 | EXPORT_SYMBOL(disable_timer_nmi_watchdog); | ||
1189 | EXPORT_SYMBOL(enable_timer_nmi_watchdog); | ||
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index b04333ea6f31..fb1e133efd9f 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -50,4 +50,12 @@ void __trigger_all_cpu_backtrace(void); | |||
50 | 50 | ||
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | void lapic_watchdog_stop(void); | ||
54 | int lapic_watchdog_init(unsigned nmi_hz); | ||
55 | int lapic_wd_event(unsigned nmi_hz); | ||
56 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
57 | int lapic_watchdog_ok(void); | ||
58 | void disable_lapic_nmi_watchdog(void); | ||
59 | void enable_lapic_nmi_watchdog(void); | ||
60 | |||
53 | #endif /* ASM_NMI_H */ | 61 | #endif /* ASM_NMI_H */ |