diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-02 14:41:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-02 14:41:55 -0400 |
commit | 9a5ee4cc9ef8de5185114237a81f5f395e21d8fd (patch) | |
tree | 378d367ba1d97304051d3be0cec901fe8ab38d60 /arch/i386 | |
parent | b6a8b316c667f914c198a2de62e6729f359b7931 (diff) | |
parent | a369a7100da3b4f5c2269be25160653d2c7013fc (diff) |
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
[PATCH] x86: Don't probe for DDC on VBE1.2
[PATCH] x86-64: Increase NMI watchdog probing timeout
[PATCH] x86-64: Let oprofile reserve MSR on all CPUs
[PATCH] x86-64: Disable local APIC timer use on AMD systems with C1E
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/boot/video.S | 14 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 32 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 34 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 129 |
4 files changed, 148 insertions, 61 deletions
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S index 2c5b5cc55f79..8143c9516cb4 100644 --- a/arch/i386/boot/video.S +++ b/arch/i386/boot/video.S | |||
@@ -571,6 +571,16 @@ setr1: lodsw | |||
571 | jmp _m_s | 571 | jmp _m_s |
572 | 572 | ||
573 | check_vesa: | 573 | check_vesa: |
574 | #ifdef CONFIG_FIRMWARE_EDID | ||
575 | leaw modelist+1024, %di | ||
576 | movw $0x4f00, %ax | ||
577 | int $0x10 | ||
578 | cmpw $0x004f, %ax | ||
579 | jnz setbad | ||
580 | |||
581 | movw 4(%di), %ax | ||
582 | movw %ax, vbe_version | ||
583 | #endif | ||
574 | leaw modelist+1024, %di | 584 | leaw modelist+1024, %di |
575 | subb $VIDEO_FIRST_VESA>>8, %bh | 585 | subb $VIDEO_FIRST_VESA>>8, %bh |
576 | movw %bx, %cx # Get mode information structure | 586 | movw %bx, %cx # Get mode information structure |
@@ -1945,6 +1955,9 @@ store_edid: | |||
1945 | rep | 1955 | rep |
1946 | stosl | 1956 | stosl |
1947 | 1957 | ||
1958 | cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0 | ||
1959 | jl no_edid | ||
1960 | |||
1948 | pushw %es # save ES | 1961 | pushw %es # save ES |
1949 | xorw %di, %di # Report Capability | 1962 | xorw %di, %di # Report Capability |
1950 | pushw %di | 1963 | pushw %di |
@@ -1987,6 +2000,7 @@ do_restore: .byte 0 # Screen contents altered during mode change | |||
1987 | svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes | 2000 | svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes |
1988 | graphic_mode: .byte 0 # Graphic mode with a linear frame buffer | 2001 | graphic_mode: .byte 0 # Graphic mode with a linear frame buffer |
1989 | dac_size: .byte 6 # DAC bit depth | 2002 | dac_size: .byte 6 # DAC bit depth |
2003 | vbe_version: .word 0 # VBE bios version | ||
1990 | 2004 | ||
1991 | # Status messages | 2005 | # Status messages |
1992 | keymsg: .ascii "Press <RETURN> to see video modes available, " | 2006 | keymsg: .ascii "Press <RETURN> to see video modes available, " |
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index e88415282a6f..93aa911646ad 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -272,32 +272,6 @@ static void __devinit setup_APIC_timer(void) | |||
272 | } | 272 | } |
273 | 273 | ||
274 | /* | 274 | /* |
275 | * Detect systems with known broken BIOS implementations | ||
276 | */ | ||
277 | static int __init lapic_check_broken_bios(struct dmi_system_id *d) | ||
278 | { | ||
279 | printk(KERN_NOTICE "%s detected: disabling lapic timer.\n", | ||
280 | d->ident); | ||
281 | local_apic_timer_disabled = 1; | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static struct dmi_system_id __initdata broken_bios_dmi_table[] = { | ||
286 | { | ||
287 | /* | ||
288 | * BIOS exports only C1 state, but uses deeper power | ||
289 | * modes behind the kernels back. | ||
290 | */ | ||
291 | .callback = lapic_check_broken_bios, | ||
292 | .ident = "HP nx6325", | ||
293 | .matches = { | ||
294 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), | ||
295 | }, | ||
296 | }, | ||
297 | {} | ||
298 | }; | ||
299 | |||
300 | /* | ||
301 | * In this functions we calibrate APIC bus clocks to the external timer. | 275 | * In this functions we calibrate APIC bus clocks to the external timer. |
302 | * | 276 | * |
303 | * We want to do the calibration only once since we want to have local timer | 277 | * We want to do the calibration only once since we want to have local timer |
@@ -372,12 +346,12 @@ void __init setup_boot_APIC_clock(void) | |||
372 | long delta, deltapm; | 346 | long delta, deltapm; |
373 | int pm_referenced = 0; | 347 | int pm_referenced = 0; |
374 | 348 | ||
375 | /* Detect know broken systems */ | 349 | if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) |
376 | dmi_check_system(broken_bios_dmi_table); | 350 | local_apic_timer_disabled = 1; |
377 | 351 | ||
378 | /* | 352 | /* |
379 | * The local apic timer can be disabled via the kernel | 353 | * The local apic timer can be disabled via the kernel |
380 | * commandline or from the dmi quirk above. Register the lapic | 354 | * commandline or from the test above. Register the lapic |
381 | * timer as a dummy clock event source on SMP systems, so the | 355 | * timer as a dummy clock event source on SMP systems, so the |
382 | * broadcast mechanism is used. On UP systems simply ignore it. | 356 | * broadcast mechanism is used. On UP systems simply ignore it. |
383 | */ | 357 | */ |
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 41cfea57232b..2d47db482972 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c | |||
@@ -22,6 +22,37 @@ | |||
22 | extern void vide(void); | 22 | extern void vide(void); |
23 | __asm__(".align 4\nvide: ret"); | 23 | __asm__(".align 4\nvide: ret"); |
24 | 24 | ||
25 | #define ENABLE_C1E_MASK 0x18000000 | ||
26 | #define CPUID_PROCESSOR_SIGNATURE 1 | ||
27 | #define CPUID_XFAM 0x0ff00000 | ||
28 | #define CPUID_XFAM_K8 0x00000000 | ||
29 | #define CPUID_XFAM_10H 0x00100000 | ||
30 | #define CPUID_XFAM_11H 0x00200000 | ||
31 | #define CPUID_XMOD 0x000f0000 | ||
32 | #define CPUID_XMOD_REV_F 0x00040000 | ||
33 | |||
34 | /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */ | ||
35 | static __cpuinit int amd_apic_timer_broken(void) | ||
36 | { | ||
37 | u32 lo, hi; | ||
38 | u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | ||
39 | switch (eax & CPUID_XFAM) { | ||
40 | case CPUID_XFAM_K8: | ||
41 | if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) | ||
42 | break; | ||
43 | case CPUID_XFAM_10H: | ||
44 | case CPUID_XFAM_11H: | ||
45 | rdmsr(MSR_K8_ENABLE_C1E, lo, hi); | ||
46 | if (lo & ENABLE_C1E_MASK) | ||
47 | return 1; | ||
48 | break; | ||
49 | default: | ||
50 | /* err on the side of caution */ | ||
51 | return 1; | ||
52 | } | ||
53 | return 0; | ||
54 | } | ||
55 | |||
25 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 56 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
26 | { | 57 | { |
27 | u32 l, h; | 58 | u32 l, h; |
@@ -241,6 +272,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
241 | 272 | ||
242 | if (cpuid_eax(0x80000000) >= 0x80000006) | 273 | if (cpuid_eax(0x80000000) >= 0x80000006) |
243 | num_cache_leaves = 3; | 274 | num_cache_leaves = 3; |
275 | |||
276 | if (amd_apic_timer_broken()) | ||
277 | set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); | ||
244 | } | 278 | } |
245 | 279 | ||
246 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 280 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 14702427b104..a98ba88a8c0c 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
122 | /* checks for a bit availability (hack for oprofile) */ | 122 | /* checks for a bit availability (hack for oprofile) */ |
123 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | 123 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) |
124 | { | 124 | { |
125 | int cpu; | ||
125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 126 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
126 | 127 | for_each_possible_cpu (cpu) { | |
127 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | 128 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) |
129 | return 0; | ||
130 | } | ||
131 | return 1; | ||
128 | } | 132 | } |
129 | 133 | ||
130 | /* checks the an msr for availability */ | 134 | /* checks the an msr for availability */ |
131 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | 135 | int avail_to_resrv_perfctr_nmi(unsigned int msr) |
132 | { | 136 | { |
133 | unsigned int counter; | 137 | unsigned int counter; |
138 | int cpu; | ||
134 | 139 | ||
135 | counter = nmi_perfctr_msr_to_bit(msr); | 140 | counter = nmi_perfctr_msr_to_bit(msr); |
136 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 141 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
137 | 142 | ||
138 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | 143 | for_each_possible_cpu (cpu) { |
144 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) | ||
145 | return 0; | ||
146 | } | ||
147 | return 1; | ||
139 | } | 148 | } |
140 | 149 | ||
141 | int reserve_perfctr_nmi(unsigned int msr) | 150 | static int __reserve_perfctr_nmi(int cpu, unsigned int msr) |
142 | { | 151 | { |
143 | unsigned int counter; | 152 | unsigned int counter; |
153 | if (cpu < 0) | ||
154 | cpu = smp_processor_id(); | ||
144 | 155 | ||
145 | counter = nmi_perfctr_msr_to_bit(msr); | 156 | counter = nmi_perfctr_msr_to_bit(msr); |
146 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 157 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
147 | 158 | ||
148 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | 159 | if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) |
149 | return 1; | 160 | return 1; |
150 | return 0; | 161 | return 0; |
151 | } | 162 | } |
152 | 163 | ||
153 | void release_perfctr_nmi(unsigned int msr) | 164 | static void __release_perfctr_nmi(int cpu, unsigned int msr) |
154 | { | 165 | { |
155 | unsigned int counter; | 166 | unsigned int counter; |
167 | if (cpu < 0) | ||
168 | cpu = smp_processor_id(); | ||
156 | 169 | ||
157 | counter = nmi_perfctr_msr_to_bit(msr); | 170 | counter = nmi_perfctr_msr_to_bit(msr); |
158 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 171 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
159 | 172 | ||
160 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | 173 | clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); |
161 | } | 174 | } |
162 | 175 | ||
163 | int reserve_evntsel_nmi(unsigned int msr) | 176 | int reserve_perfctr_nmi(unsigned int msr) |
177 | { | ||
178 | int cpu, i; | ||
179 | for_each_possible_cpu (cpu) { | ||
180 | if (!__reserve_perfctr_nmi(cpu, msr)) { | ||
181 | for_each_possible_cpu (i) { | ||
182 | if (i >= cpu) | ||
183 | break; | ||
184 | __release_perfctr_nmi(i, msr); | ||
185 | } | ||
186 | return 0; | ||
187 | } | ||
188 | } | ||
189 | return 1; | ||
190 | } | ||
191 | |||
192 | void release_perfctr_nmi(unsigned int msr) | ||
193 | { | ||
194 | int cpu; | ||
195 | for_each_possible_cpu (cpu) { | ||
196 | __release_perfctr_nmi(cpu, msr); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | int __reserve_evntsel_nmi(int cpu, unsigned int msr) | ||
164 | { | 201 | { |
165 | unsigned int counter; | 202 | unsigned int counter; |
203 | if (cpu < 0) | ||
204 | cpu = smp_processor_id(); | ||
166 | 205 | ||
167 | counter = nmi_evntsel_msr_to_bit(msr); | 206 | counter = nmi_evntsel_msr_to_bit(msr); |
168 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 207 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
169 | 208 | ||
170 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) | 209 | if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) |
171 | return 1; | 210 | return 1; |
172 | return 0; | 211 | return 0; |
173 | } | 212 | } |
174 | 213 | ||
175 | void release_evntsel_nmi(unsigned int msr) | 214 | static void __release_evntsel_nmi(int cpu, unsigned int msr) |
176 | { | 215 | { |
177 | unsigned int counter; | 216 | unsigned int counter; |
217 | if (cpu < 0) | ||
218 | cpu = smp_processor_id(); | ||
178 | 219 | ||
179 | counter = nmi_evntsel_msr_to_bit(msr); | 220 | counter = nmi_evntsel_msr_to_bit(msr); |
180 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 221 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
181 | 222 | ||
182 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); | 223 | clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); |
224 | } | ||
225 | |||
226 | int reserve_evntsel_nmi(unsigned int msr) | ||
227 | { | ||
228 | int cpu, i; | ||
229 | for_each_possible_cpu (cpu) { | ||
230 | if (!__reserve_evntsel_nmi(cpu, msr)) { | ||
231 | for_each_possible_cpu (i) { | ||
232 | if (i >= cpu) | ||
233 | break; | ||
234 | __release_evntsel_nmi(i, msr); | ||
235 | } | ||
236 | return 0; | ||
237 | } | ||
238 | } | ||
239 | return 1; | ||
240 | } | ||
241 | |||
242 | void release_evntsel_nmi(unsigned int msr) | ||
243 | { | ||
244 | int cpu; | ||
245 | for_each_possible_cpu (cpu) { | ||
246 | __release_evntsel_nmi(cpu, msr); | ||
247 | } | ||
183 | } | 248 | } |
184 | 249 | ||
185 | static __cpuinit inline int nmi_known_cpu(void) | 250 | static __cpuinit inline int nmi_known_cpu(void) |
@@ -263,7 +328,7 @@ static int __init check_nmi_watchdog(void) | |||
263 | for_each_possible_cpu(cpu) | 328 | for_each_possible_cpu(cpu) |
264 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; | 329 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; |
265 | local_irq_enable(); | 330 | local_irq_enable(); |
266 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 331 | mdelay((20*1000)/nmi_hz); // wait 20 ticks |
267 | 332 | ||
268 | for_each_possible_cpu(cpu) { | 333 | for_each_possible_cpu(cpu) { |
269 | #ifdef CONFIG_SMP | 334 | #ifdef CONFIG_SMP |
@@ -507,10 +572,10 @@ static int setup_k7_watchdog(void) | |||
507 | 572 | ||
508 | perfctr_msr = MSR_K7_PERFCTR0; | 573 | perfctr_msr = MSR_K7_PERFCTR0; |
509 | evntsel_msr = MSR_K7_EVNTSEL0; | 574 | evntsel_msr = MSR_K7_EVNTSEL0; |
510 | if (!reserve_perfctr_nmi(perfctr_msr)) | 575 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
511 | goto fail; | 576 | goto fail; |
512 | 577 | ||
513 | if (!reserve_evntsel_nmi(evntsel_msr)) | 578 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
514 | goto fail1; | 579 | goto fail1; |
515 | 580 | ||
516 | wrmsrl(perfctr_msr, 0UL); | 581 | wrmsrl(perfctr_msr, 0UL); |
@@ -533,7 +598,7 @@ static int setup_k7_watchdog(void) | |||
533 | wd->check_bit = 1ULL<<63; | 598 | wd->check_bit = 1ULL<<63; |
534 | return 1; | 599 | return 1; |
535 | fail1: | 600 | fail1: |
536 | release_perfctr_nmi(perfctr_msr); | 601 | __release_perfctr_nmi(-1, perfctr_msr); |
537 | fail: | 602 | fail: |
538 | return 0; | 603 | return 0; |
539 | } | 604 | } |
@@ -544,8 +609,8 @@ static void stop_k7_watchdog(void) | |||
544 | 609 | ||
545 | wrmsr(wd->evntsel_msr, 0, 0); | 610 | wrmsr(wd->evntsel_msr, 0, 0); |
546 | 611 | ||
547 | release_evntsel_nmi(wd->evntsel_msr); | 612 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
548 | release_perfctr_nmi(wd->perfctr_msr); | 613 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
549 | } | 614 | } |
550 | 615 | ||
551 | #define P6_EVNTSEL0_ENABLE (1 << 22) | 616 | #define P6_EVNTSEL0_ENABLE (1 << 22) |
@@ -563,10 +628,10 @@ static int setup_p6_watchdog(void) | |||
563 | 628 | ||
564 | perfctr_msr = MSR_P6_PERFCTR0; | 629 | perfctr_msr = MSR_P6_PERFCTR0; |
565 | evntsel_msr = MSR_P6_EVNTSEL0; | 630 | evntsel_msr = MSR_P6_EVNTSEL0; |
566 | if (!reserve_perfctr_nmi(perfctr_msr)) | 631 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
567 | goto fail; | 632 | goto fail; |
568 | 633 | ||
569 | if (!reserve_evntsel_nmi(evntsel_msr)) | 634 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
570 | goto fail1; | 635 | goto fail1; |
571 | 636 | ||
572 | wrmsrl(perfctr_msr, 0UL); | 637 | wrmsrl(perfctr_msr, 0UL); |
@@ -590,7 +655,7 @@ static int setup_p6_watchdog(void) | |||
590 | wd->check_bit = 1ULL<<39; | 655 | wd->check_bit = 1ULL<<39; |
591 | return 1; | 656 | return 1; |
592 | fail1: | 657 | fail1: |
593 | release_perfctr_nmi(perfctr_msr); | 658 | __release_perfctr_nmi(-1, perfctr_msr); |
594 | fail: | 659 | fail: |
595 | return 0; | 660 | return 0; |
596 | } | 661 | } |
@@ -601,8 +666,8 @@ static void stop_p6_watchdog(void) | |||
601 | 666 | ||
602 | wrmsr(wd->evntsel_msr, 0, 0); | 667 | wrmsr(wd->evntsel_msr, 0, 0); |
603 | 668 | ||
604 | release_evntsel_nmi(wd->evntsel_msr); | 669 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
605 | release_perfctr_nmi(wd->perfctr_msr); | 670 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
606 | } | 671 | } |
607 | 672 | ||
608 | /* Note that these events don't tick when the CPU idles. This means | 673 | /* Note that these events don't tick when the CPU idles. This means |
@@ -668,10 +733,10 @@ static int setup_p4_watchdog(void) | |||
668 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | 733 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); |
669 | } | 734 | } |
670 | 735 | ||
671 | if (!reserve_perfctr_nmi(perfctr_msr)) | 736 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
672 | goto fail; | 737 | goto fail; |
673 | 738 | ||
674 | if (!reserve_evntsel_nmi(evntsel_msr)) | 739 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
675 | goto fail1; | 740 | goto fail1; |
676 | 741 | ||
677 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 742 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
@@ -695,7 +760,7 @@ static int setup_p4_watchdog(void) | |||
695 | wd->check_bit = 1ULL<<39; | 760 | wd->check_bit = 1ULL<<39; |
696 | return 1; | 761 | return 1; |
697 | fail1: | 762 | fail1: |
698 | release_perfctr_nmi(perfctr_msr); | 763 | __release_perfctr_nmi(-1, perfctr_msr); |
699 | fail: | 764 | fail: |
700 | return 0; | 765 | return 0; |
701 | } | 766 | } |
@@ -707,8 +772,8 @@ static void stop_p4_watchdog(void) | |||
707 | wrmsr(wd->cccr_msr, 0, 0); | 772 | wrmsr(wd->cccr_msr, 0, 0); |
708 | wrmsr(wd->evntsel_msr, 0, 0); | 773 | wrmsr(wd->evntsel_msr, 0, 0); |
709 | 774 | ||
710 | release_evntsel_nmi(wd->evntsel_msr); | 775 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
711 | release_perfctr_nmi(wd->perfctr_msr); | 776 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
712 | } | 777 | } |
713 | 778 | ||
714 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | 779 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL |
@@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void) | |||
736 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | 801 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; |
737 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; | 802 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; |
738 | 803 | ||
739 | if (!reserve_perfctr_nmi(perfctr_msr)) | 804 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
740 | goto fail; | 805 | goto fail; |
741 | 806 | ||
742 | if (!reserve_evntsel_nmi(evntsel_msr)) | 807 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
743 | goto fail1; | 808 | goto fail1; |
744 | 809 | ||
745 | wrmsrl(perfctr_msr, 0UL); | 810 | wrmsrl(perfctr_msr, 0UL); |
@@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void) | |||
764 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); | 829 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); |
765 | return 1; | 830 | return 1; |
766 | fail1: | 831 | fail1: |
767 | release_perfctr_nmi(perfctr_msr); | 832 | __release_perfctr_nmi(-1, perfctr_msr); |
768 | fail: | 833 | fail: |
769 | return 0; | 834 | return 0; |
770 | } | 835 | } |
@@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void) | |||
787 | return; | 852 | return; |
788 | 853 | ||
789 | wrmsr(wd->evntsel_msr, 0, 0); | 854 | wrmsr(wd->evntsel_msr, 0, 0); |
790 | release_evntsel_nmi(wd->evntsel_msr); | 855 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
791 | release_perfctr_nmi(wd->perfctr_msr); | 856 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
792 | } | 857 | } |
793 | 858 | ||
794 | void setup_apic_nmi_watchdog (void *unused) | 859 | void setup_apic_nmi_watchdog (void *unused) |