diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:58 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:58 -0400 |
commit | f7627e2513987bb5d4e8cb13c4e0a478352141ac (patch) | |
tree | 46ef70a107285c1dfe8161a57f433d30252d285a /arch/x86/kernel/cpu/perfctr-watchdog.c | |
parent | 4ac24f63fd203bc12a841a88a2034dccd358d0d1 (diff) |
i386: move kernel/cpu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perfctr-watchdog.c')
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 713 |
1 files changed, 713 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c new file mode 100644 index 000000000000..93fecd4b03de --- /dev/null +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -0,0 +1,713 @@ | |||
1 | /* local apic based NMI watchdog for various CPUs. | ||
2 | This file also handles reservation of performance counters for coordination | ||
3 | with other users (like oprofile). | ||
4 | |||
5 | Note that these events normally don't tick when the CPU idles. This means | ||
6 | the frequency varies with CPU load. | ||
7 | |||
8 | Original code for K7/P6 written by Keith Owens */ | ||
9 | |||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/nmi.h> | ||
16 | #include <asm/apic.h> | ||
17 | #include <asm/intel_arch_perfmon.h> | ||
18 | |||
19 | struct nmi_watchdog_ctlblk { | ||
20 | unsigned int cccr_msr; | ||
21 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ | ||
22 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ | ||
23 | }; | ||
24 | |||
25 | /* Interface defining a CPU specific perfctr watchdog */ | ||
26 | struct wd_ops { | ||
27 | int (*reserve)(void); | ||
28 | void (*unreserve)(void); | ||
29 | int (*setup)(unsigned nmi_hz); | ||
30 | void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); | ||
31 | void (*stop)(void); | ||
32 | unsigned perfctr; | ||
33 | unsigned evntsel; | ||
34 | u64 checkbit; | ||
35 | }; | ||
36 | |||
37 | static struct wd_ops *wd_ops; | ||
38 | |||
39 | /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | ||
40 | * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) | ||
41 | */ | ||
42 | #define NMI_MAX_COUNTER_BITS 66 | ||
43 | |||
44 | /* perfctr_nmi_owner tracks the ownership of the perfctr registers: | ||
45 | * evtsel_nmi_owner tracks the ownership of the event selection | ||
46 | * - different performance counters/ event selection may be reserved for | ||
47 | * different subsystems this reservation system just tries to coordinate | ||
48 | * things a little | ||
49 | */ | ||
50 | static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); | ||
51 | static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); | ||
52 | |||
53 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | ||
54 | |||
55 | /* converts an msr to an appropriate reservation bit */ | ||
56 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | ||
57 | { | ||
58 | /* returns the bit offset of the performance counter register */ | ||
59 | switch (boot_cpu_data.x86_vendor) { | ||
60 | case X86_VENDOR_AMD: | ||
61 | return (msr - MSR_K7_PERFCTR0); | ||
62 | case X86_VENDOR_INTEL: | ||
63 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
64 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | ||
65 | |||
66 | switch (boot_cpu_data.x86) { | ||
67 | case 6: | ||
68 | return (msr - MSR_P6_PERFCTR0); | ||
69 | case 15: | ||
70 | return (msr - MSR_P4_BPU_PERFCTR0); | ||
71 | } | ||
72 | } | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /* converts an msr to an appropriate reservation bit */ | ||
77 | /* returns the bit offset of the event selection register */ | ||
78 | static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | ||
79 | { | ||
80 | /* returns the bit offset of the event selection register */ | ||
81 | switch (boot_cpu_data.x86_vendor) { | ||
82 | case X86_VENDOR_AMD: | ||
83 | return (msr - MSR_K7_EVNTSEL0); | ||
84 | case X86_VENDOR_INTEL: | ||
85 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
86 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | ||
87 | |||
88 | switch (boot_cpu_data.x86) { | ||
89 | case 6: | ||
90 | return (msr - MSR_P6_EVNTSEL0); | ||
91 | case 15: | ||
92 | return (msr - MSR_P4_BSU_ESCR0); | ||
93 | } | ||
94 | } | ||
95 | return 0; | ||
96 | |||
97 | } | ||
98 | |||
99 | /* checks for a bit availability (hack for oprofile) */ | ||
100 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | ||
101 | { | ||
102 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
103 | |||
104 | return (!test_bit(counter, perfctr_nmi_owner)); | ||
105 | } | ||
106 | |||
107 | /* checks the an msr for availability */ | ||
108 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | ||
109 | { | ||
110 | unsigned int counter; | ||
111 | |||
112 | counter = nmi_perfctr_msr_to_bit(msr); | ||
113 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
114 | |||
115 | return (!test_bit(counter, perfctr_nmi_owner)); | ||
116 | } | ||
117 | |||
118 | int reserve_perfctr_nmi(unsigned int msr) | ||
119 | { | ||
120 | unsigned int counter; | ||
121 | |||
122 | counter = nmi_perfctr_msr_to_bit(msr); | ||
123 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
124 | |||
125 | if (!test_and_set_bit(counter, perfctr_nmi_owner)) | ||
126 | return 1; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | void release_perfctr_nmi(unsigned int msr) | ||
131 | { | ||
132 | unsigned int counter; | ||
133 | |||
134 | counter = nmi_perfctr_msr_to_bit(msr); | ||
135 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
136 | |||
137 | clear_bit(counter, perfctr_nmi_owner); | ||
138 | } | ||
139 | |||
140 | int reserve_evntsel_nmi(unsigned int msr) | ||
141 | { | ||
142 | unsigned int counter; | ||
143 | |||
144 | counter = nmi_evntsel_msr_to_bit(msr); | ||
145 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
146 | |||
147 | if (!test_and_set_bit(counter, evntsel_nmi_owner)) | ||
148 | return 1; | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | void release_evntsel_nmi(unsigned int msr) | ||
153 | { | ||
154 | unsigned int counter; | ||
155 | |||
156 | counter = nmi_evntsel_msr_to_bit(msr); | ||
157 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | ||
158 | |||
159 | clear_bit(counter, evntsel_nmi_owner); | ||
160 | } | ||
161 | |||
162 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi); | ||
163 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | ||
164 | EXPORT_SYMBOL(reserve_perfctr_nmi); | ||
165 | EXPORT_SYMBOL(release_perfctr_nmi); | ||
166 | EXPORT_SYMBOL(reserve_evntsel_nmi); | ||
167 | EXPORT_SYMBOL(release_evntsel_nmi); | ||
168 | |||
169 | void disable_lapic_nmi_watchdog(void) | ||
170 | { | ||
171 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
172 | |||
173 | if (atomic_read(&nmi_active) <= 0) | ||
174 | return; | ||
175 | |||
176 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | ||
177 | wd_ops->unreserve(); | ||
178 | |||
179 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
180 | } | ||
181 | |||
182 | void enable_lapic_nmi_watchdog(void) | ||
183 | { | ||
184 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
185 | |||
186 | /* are we already enabled */ | ||
187 | if (atomic_read(&nmi_active) != 0) | ||
188 | return; | ||
189 | |||
190 | /* are we lapic aware */ | ||
191 | if (!wd_ops) | ||
192 | return; | ||
193 | if (!wd_ops->reserve()) { | ||
194 | printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n"); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | ||
199 | touch_nmi_watchdog(); | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Activate the NMI watchdog via the local APIC. | ||
204 | */ | ||
205 | |||
206 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | ||
207 | { | ||
208 | u64 counter_val; | ||
209 | unsigned int retval = hz; | ||
210 | |||
211 | /* | ||
212 | * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter | ||
213 | * are writable, with higher bits sign extending from bit 31. | ||
214 | * So, we can only program the counter with 31 bit values and | ||
215 | * 32nd bit should be 1, for 33.. to be 1. | ||
216 | * Find the appropriate nmi_hz | ||
217 | */ | ||
218 | counter_val = (u64)cpu_khz * 1000; | ||
219 | do_div(counter_val, retval); | ||
220 | if (counter_val > 0x7fffffffULL) { | ||
221 | u64 count = (u64)cpu_khz * 1000; | ||
222 | do_div(count, 0x7fffffffUL); | ||
223 | retval = count + 1; | ||
224 | } | ||
225 | return retval; | ||
226 | } | ||
227 | |||
228 | static void | ||
229 | write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz) | ||
230 | { | ||
231 | u64 count = (u64)cpu_khz * 1000; | ||
232 | |||
233 | do_div(count, nmi_hz); | ||
234 | if(descr) | ||
235 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
236 | wrmsrl(perfctr_msr, 0 - count); | ||
237 | } | ||
238 | |||
239 | static void write_watchdog_counter32(unsigned int perfctr_msr, | ||
240 | const char *descr, unsigned nmi_hz) | ||
241 | { | ||
242 | u64 count = (u64)cpu_khz * 1000; | ||
243 | |||
244 | do_div(count, nmi_hz); | ||
245 | if(descr) | ||
246 | Dprintk("setting %s to -0x%08Lx\n", descr, count); | ||
247 | wrmsr(perfctr_msr, (u32)(-count), 0); | ||
248 | } | ||
249 | |||
250 | /* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface | ||
251 | nicely stable so there is not much variety */ | ||
252 | |||
253 | #define K7_EVNTSEL_ENABLE (1 << 22) | ||
254 | #define K7_EVNTSEL_INT (1 << 20) | ||
255 | #define K7_EVNTSEL_OS (1 << 17) | ||
256 | #define K7_EVNTSEL_USR (1 << 16) | ||
257 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | ||
258 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | ||
259 | |||
260 | static int setup_k7_watchdog(unsigned nmi_hz) | ||
261 | { | ||
262 | unsigned int perfctr_msr, evntsel_msr; | ||
263 | unsigned int evntsel; | ||
264 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
265 | |||
266 | perfctr_msr = wd_ops->perfctr; | ||
267 | evntsel_msr = wd_ops->evntsel; | ||
268 | |||
269 | wrmsrl(perfctr_msr, 0UL); | ||
270 | |||
271 | evntsel = K7_EVNTSEL_INT | ||
272 | | K7_EVNTSEL_OS | ||
273 | | K7_EVNTSEL_USR | ||
274 | | K7_NMI_EVENT; | ||
275 | |||
276 | /* setup the timer */ | ||
277 | wrmsr(evntsel_msr, evntsel, 0); | ||
278 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | ||
279 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
280 | evntsel |= K7_EVNTSEL_ENABLE; | ||
281 | wrmsr(evntsel_msr, evntsel, 0); | ||
282 | |||
283 | wd->perfctr_msr = perfctr_msr; | ||
284 | wd->evntsel_msr = evntsel_msr; | ||
285 | wd->cccr_msr = 0; //unused | ||
286 | return 1; | ||
287 | } | ||
288 | |||
289 | static void single_msr_stop_watchdog(void) | ||
290 | { | ||
291 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
292 | |||
293 | wrmsr(wd->evntsel_msr, 0, 0); | ||
294 | } | ||
295 | |||
296 | static int single_msr_reserve(void) | ||
297 | { | ||
298 | if (!reserve_perfctr_nmi(wd_ops->perfctr)) | ||
299 | return 0; | ||
300 | |||
301 | if (!reserve_evntsel_nmi(wd_ops->evntsel)) { | ||
302 | release_perfctr_nmi(wd_ops->perfctr); | ||
303 | return 0; | ||
304 | } | ||
305 | return 1; | ||
306 | } | ||
307 | |||
308 | static void single_msr_unreserve(void) | ||
309 | { | ||
310 | release_evntsel_nmi(wd_ops->evntsel); | ||
311 | release_perfctr_nmi(wd_ops->perfctr); | ||
312 | } | ||
313 | |||
314 | static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
315 | { | ||
316 | /* start the cycle over again */ | ||
317 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | ||
318 | } | ||
319 | |||
320 | static struct wd_ops k7_wd_ops = { | ||
321 | .reserve = single_msr_reserve, | ||
322 | .unreserve = single_msr_unreserve, | ||
323 | .setup = setup_k7_watchdog, | ||
324 | .rearm = single_msr_rearm, | ||
325 | .stop = single_msr_stop_watchdog, | ||
326 | .perfctr = MSR_K7_PERFCTR0, | ||
327 | .evntsel = MSR_K7_EVNTSEL0, | ||
328 | .checkbit = 1ULL<<47, | ||
329 | }; | ||
330 | |||
331 | /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */ | ||
332 | |||
333 | #define P6_EVNTSEL0_ENABLE (1 << 22) | ||
334 | #define P6_EVNTSEL_INT (1 << 20) | ||
335 | #define P6_EVNTSEL_OS (1 << 17) | ||
336 | #define P6_EVNTSEL_USR (1 << 16) | ||
337 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | ||
338 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | ||
339 | |||
340 | static int setup_p6_watchdog(unsigned nmi_hz) | ||
341 | { | ||
342 | unsigned int perfctr_msr, evntsel_msr; | ||
343 | unsigned int evntsel; | ||
344 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
345 | |||
346 | perfctr_msr = wd_ops->perfctr; | ||
347 | evntsel_msr = wd_ops->evntsel; | ||
348 | |||
349 | /* KVM doesn't implement this MSR */ | ||
350 | if (wrmsr_safe(perfctr_msr, 0, 0) < 0) | ||
351 | return 0; | ||
352 | |||
353 | evntsel = P6_EVNTSEL_INT | ||
354 | | P6_EVNTSEL_OS | ||
355 | | P6_EVNTSEL_USR | ||
356 | | P6_NMI_EVENT; | ||
357 | |||
358 | /* setup the timer */ | ||
359 | wrmsr(evntsel_msr, evntsel, 0); | ||
360 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
361 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | ||
362 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
363 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
364 | wrmsr(evntsel_msr, evntsel, 0); | ||
365 | |||
366 | wd->perfctr_msr = perfctr_msr; | ||
367 | wd->evntsel_msr = evntsel_msr; | ||
368 | wd->cccr_msr = 0; //unused | ||
369 | return 1; | ||
370 | } | ||
371 | |||
372 | static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
373 | { | ||
374 | /* P6 based Pentium M need to re-unmask | ||
375 | * the apic vector but it doesn't hurt | ||
376 | * other P6 variant. | ||
377 | * ArchPerfom/Core Duo also needs this */ | ||
378 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
379 | /* P6/ARCH_PERFMON has 32 bit counter write */ | ||
380 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | ||
381 | } | ||
382 | |||
383 | static struct wd_ops p6_wd_ops = { | ||
384 | .reserve = single_msr_reserve, | ||
385 | .unreserve = single_msr_unreserve, | ||
386 | .setup = setup_p6_watchdog, | ||
387 | .rearm = p6_rearm, | ||
388 | .stop = single_msr_stop_watchdog, | ||
389 | .perfctr = MSR_P6_PERFCTR0, | ||
390 | .evntsel = MSR_P6_EVNTSEL0, | ||
391 | .checkbit = 1ULL<<39, | ||
392 | }; | ||
393 | |||
394 | /* Intel P4 performance counters. By far the most complicated of all. */ | ||
395 | |||
396 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | ||
397 | #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) | ||
398 | #define P4_ESCR_OS (1<<3) | ||
399 | #define P4_ESCR_USR (1<<2) | ||
400 | #define P4_CCCR_OVF_PMI0 (1<<26) | ||
401 | #define P4_CCCR_OVF_PMI1 (1<<27) | ||
402 | #define P4_CCCR_THRESHOLD(N) ((N)<<20) | ||
403 | #define P4_CCCR_COMPLEMENT (1<<19) | ||
404 | #define P4_CCCR_COMPARE (1<<18) | ||
405 | #define P4_CCCR_REQUIRED (3<<16) | ||
406 | #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) | ||
407 | #define P4_CCCR_ENABLE (1<<12) | ||
408 | #define P4_CCCR_OVF (1<<31) | ||
409 | |||
410 | /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | ||
411 | CRU_ESCR0 (with any non-null event selector) through a complemented | ||
412 | max threshold. [IA32-Vol3, Section 14.9.9] */ | ||
413 | |||
414 | static int setup_p4_watchdog(unsigned nmi_hz) | ||
415 | { | ||
416 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; | ||
417 | unsigned int evntsel, cccr_val; | ||
418 | unsigned int misc_enable, dummy; | ||
419 | unsigned int ht_num; | ||
420 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
421 | |||
422 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); | ||
423 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | ||
424 | return 0; | ||
425 | |||
426 | #ifdef CONFIG_SMP | ||
427 | /* detect which hyperthread we are on */ | ||
428 | if (smp_num_siblings == 2) { | ||
429 | unsigned int ebx, apicid; | ||
430 | |||
431 | ebx = cpuid_ebx(1); | ||
432 | apicid = (ebx >> 24) & 0xff; | ||
433 | ht_num = apicid & 1; | ||
434 | } else | ||
435 | #endif | ||
436 | ht_num = 0; | ||
437 | |||
438 | /* performance counters are shared resources | ||
439 | * assign each hyperthread its own set | ||
440 | * (re-use the ESCR0 register, seems safe | ||
441 | * and keeps the cccr_val the same) | ||
442 | */ | ||
443 | if (!ht_num) { | ||
444 | /* logical cpu 0 */ | ||
445 | perfctr_msr = MSR_P4_IQ_PERFCTR0; | ||
446 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
447 | cccr_msr = MSR_P4_IQ_CCCR0; | ||
448 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | ||
449 | } else { | ||
450 | /* logical cpu 1 */ | ||
451 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | ||
452 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
453 | cccr_msr = MSR_P4_IQ_CCCR1; | ||
454 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | ||
455 | } | ||
456 | |||
457 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | ||
458 | | P4_ESCR_OS | ||
459 | | P4_ESCR_USR; | ||
460 | |||
461 | cccr_val |= P4_CCCR_THRESHOLD(15) | ||
462 | | P4_CCCR_COMPLEMENT | ||
463 | | P4_CCCR_COMPARE | ||
464 | | P4_CCCR_REQUIRED; | ||
465 | |||
466 | wrmsr(evntsel_msr, evntsel, 0); | ||
467 | wrmsr(cccr_msr, cccr_val, 0); | ||
468 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); | ||
469 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
470 | cccr_val |= P4_CCCR_ENABLE; | ||
471 | wrmsr(cccr_msr, cccr_val, 0); | ||
472 | wd->perfctr_msr = perfctr_msr; | ||
473 | wd->evntsel_msr = evntsel_msr; | ||
474 | wd->cccr_msr = cccr_msr; | ||
475 | return 1; | ||
476 | } | ||
477 | |||
478 | static void stop_p4_watchdog(void) | ||
479 | { | ||
480 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
481 | wrmsr(wd->cccr_msr, 0, 0); | ||
482 | wrmsr(wd->evntsel_msr, 0, 0); | ||
483 | } | ||
484 | |||
485 | static int p4_reserve(void) | ||
486 | { | ||
487 | if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0)) | ||
488 | return 0; | ||
489 | #ifdef CONFIG_SMP | ||
490 | if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1)) | ||
491 | goto fail1; | ||
492 | #endif | ||
493 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) | ||
494 | goto fail2; | ||
495 | /* RED-PEN why is ESCR1 not reserved here? */ | ||
496 | return 1; | ||
497 | fail2: | ||
498 | #ifdef CONFIG_SMP | ||
499 | if (smp_num_siblings > 1) | ||
500 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); | ||
501 | fail1: | ||
502 | #endif | ||
503 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static void p4_unreserve(void) | ||
508 | { | ||
509 | #ifdef CONFIG_SMP | ||
510 | if (smp_num_siblings > 1) | ||
511 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); | ||
512 | #endif | ||
513 | release_evntsel_nmi(MSR_P4_CRU_ESCR0); | ||
514 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | ||
515 | } | ||
516 | |||
517 | static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
518 | { | ||
519 | unsigned dummy; | ||
520 | /* | ||
521 | * P4 quirks: | ||
522 | * - An overflown perfctr will assert its interrupt | ||
523 | * until the OVF flag in its CCCR is cleared. | ||
524 | * - LVTPC is masked on interrupt and must be | ||
525 | * unmasked by the LVTPC handler. | ||
526 | */ | ||
527 | rdmsrl(wd->cccr_msr, dummy); | ||
528 | dummy &= ~P4_CCCR_OVF; | ||
529 | wrmsrl(wd->cccr_msr, dummy); | ||
530 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
531 | /* start the cycle over again */ | ||
532 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | ||
533 | } | ||
534 | |||
535 | static struct wd_ops p4_wd_ops = { | ||
536 | .reserve = p4_reserve, | ||
537 | .unreserve = p4_unreserve, | ||
538 | .setup = setup_p4_watchdog, | ||
539 | .rearm = p4_rearm, | ||
540 | .stop = stop_p4_watchdog, | ||
541 | /* RED-PEN this is wrong for the other sibling */ | ||
542 | .perfctr = MSR_P4_BPU_PERFCTR0, | ||
543 | .evntsel = MSR_P4_BSU_ESCR0, | ||
544 | .checkbit = 1ULL<<39, | ||
545 | }; | ||
546 | |||
547 | /* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully | ||
548 | all future Intel CPUs. */ | ||
549 | |||
550 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
551 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
552 | |||
553 | static int setup_intel_arch_watchdog(unsigned nmi_hz) | ||
554 | { | ||
555 | unsigned int ebx; | ||
556 | union cpuid10_eax eax; | ||
557 | unsigned int unused; | ||
558 | unsigned int perfctr_msr, evntsel_msr; | ||
559 | unsigned int evntsel; | ||
560 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
561 | |||
562 | /* | ||
563 | * Check whether the Architectural PerfMon supports | ||
564 | * Unhalted Core Cycles Event or not. | ||
565 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | ||
566 | */ | ||
567 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | ||
568 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
569 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
570 | return 0; | ||
571 | |||
572 | perfctr_msr = wd_ops->perfctr; | ||
573 | evntsel_msr = wd_ops->evntsel; | ||
574 | |||
575 | wrmsrl(perfctr_msr, 0UL); | ||
576 | |||
577 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
578 | | ARCH_PERFMON_EVENTSEL_OS | ||
579 | | ARCH_PERFMON_EVENTSEL_USR | ||
580 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
581 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
582 | |||
583 | /* setup the timer */ | ||
584 | wrmsr(evntsel_msr, evntsel, 0); | ||
585 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
586 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); | ||
587 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
588 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
589 | wrmsr(evntsel_msr, evntsel, 0); | ||
590 | |||
591 | wd->perfctr_msr = perfctr_msr; | ||
592 | wd->evntsel_msr = evntsel_msr; | ||
593 | wd->cccr_msr = 0; //unused | ||
594 | wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1); | ||
595 | return 1; | ||
596 | } | ||
597 | |||
598 | static struct wd_ops intel_arch_wd_ops = { | ||
599 | .reserve = single_msr_reserve, | ||
600 | .unreserve = single_msr_unreserve, | ||
601 | .setup = setup_intel_arch_watchdog, | ||
602 | .rearm = p6_rearm, | ||
603 | .stop = single_msr_stop_watchdog, | ||
604 | .perfctr = MSR_ARCH_PERFMON_PERFCTR1, | ||
605 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, | ||
606 | }; | ||
607 | |||
608 | static struct wd_ops coreduo_wd_ops = { | ||
609 | .reserve = single_msr_reserve, | ||
610 | .unreserve = single_msr_unreserve, | ||
611 | .setup = setup_intel_arch_watchdog, | ||
612 | .rearm = p6_rearm, | ||
613 | .stop = single_msr_stop_watchdog, | ||
614 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
615 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
616 | }; | ||
617 | |||
618 | static void probe_nmi_watchdog(void) | ||
619 | { | ||
620 | switch (boot_cpu_data.x86_vendor) { | ||
621 | case X86_VENDOR_AMD: | ||
622 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | ||
623 | boot_cpu_data.x86 != 16) | ||
624 | return; | ||
625 | wd_ops = &k7_wd_ops; | ||
626 | break; | ||
627 | case X86_VENDOR_INTEL: | ||
628 | /* Work around Core Duo (Yonah) errata AE49 where perfctr1 | ||
629 | doesn't have a working enable bit. */ | ||
630 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { | ||
631 | wd_ops = &coreduo_wd_ops; | ||
632 | break; | ||
633 | } | ||
634 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
635 | wd_ops = &intel_arch_wd_ops; | ||
636 | break; | ||
637 | } | ||
638 | switch (boot_cpu_data.x86) { | ||
639 | case 6: | ||
640 | if (boot_cpu_data.x86_model > 0xd) | ||
641 | return; | ||
642 | |||
643 | wd_ops = &p6_wd_ops; | ||
644 | break; | ||
645 | case 15: | ||
646 | if (boot_cpu_data.x86_model > 0x4) | ||
647 | return; | ||
648 | |||
649 | wd_ops = &p4_wd_ops; | ||
650 | break; | ||
651 | default: | ||
652 | return; | ||
653 | } | ||
654 | break; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | /* Interface to nmi.c */ | ||
659 | |||
660 | int lapic_watchdog_init(unsigned nmi_hz) | ||
661 | { | ||
662 | if (!wd_ops) { | ||
663 | probe_nmi_watchdog(); | ||
664 | if (!wd_ops) | ||
665 | return -1; | ||
666 | |||
667 | if (!wd_ops->reserve()) { | ||
668 | printk(KERN_ERR | ||
669 | "NMI watchdog: cannot reserve perfctrs\n"); | ||
670 | return -1; | ||
671 | } | ||
672 | } | ||
673 | |||
674 | if (!(wd_ops->setup(nmi_hz))) { | ||
675 | printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n", | ||
676 | raw_smp_processor_id()); | ||
677 | return -1; | ||
678 | } | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | void lapic_watchdog_stop(void) | ||
684 | { | ||
685 | if (wd_ops) | ||
686 | wd_ops->stop(); | ||
687 | } | ||
688 | |||
689 | unsigned lapic_adjust_nmi_hz(unsigned hz) | ||
690 | { | ||
691 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
692 | if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | ||
693 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) | ||
694 | hz = adjust_for_32bit_ctr(hz); | ||
695 | return hz; | ||
696 | } | ||
697 | |||
698 | int lapic_wd_event(unsigned nmi_hz) | ||
699 | { | ||
700 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
701 | u64 ctr; | ||
702 | rdmsrl(wd->perfctr_msr, ctr); | ||
703 | if (ctr & wd_ops->checkbit) { /* perfctr still running? */ | ||
704 | return 0; | ||
705 | } | ||
706 | wd_ops->rearm(wd, nmi_hz); | ||
707 | return 1; | ||
708 | } | ||
709 | |||
710 | int lapic_watchdog_ok(void) | ||
711 | { | ||
712 | return wd_ops != NULL; | ||
713 | } | ||