aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/nmi.c127
-rw-r--r--arch/x86_64/kernel/nmi.c118
2 files changed, 187 insertions, 58 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 14702427b104..851f3b1c5b1c 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
122/* checks for a bit availability (hack for oprofile) */ 122/* checks for a bit availability (hack for oprofile) */
123int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) 123int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
124{ 124{
125 int cpu;
125 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 126 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
126 127 for_each_possible_cpu (cpu) {
127 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); 128 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
129 return 0;
130 }
131 return 1;
128} 132}
129 133
130/* checks the an msr for availability */ 134/* checks the an msr for availability */
131int avail_to_resrv_perfctr_nmi(unsigned int msr) 135int avail_to_resrv_perfctr_nmi(unsigned int msr)
132{ 136{
133 unsigned int counter; 137 unsigned int counter;
138 int cpu;
134 139
135 counter = nmi_perfctr_msr_to_bit(msr); 140 counter = nmi_perfctr_msr_to_bit(msr);
136 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 141 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
137 142
138 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); 143 for_each_possible_cpu (cpu) {
144 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
145 return 0;
146 }
147 return 1;
139} 148}
140 149
141int reserve_perfctr_nmi(unsigned int msr) 150static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
142{ 151{
143 unsigned int counter; 152 unsigned int counter;
153 if (cpu < 0)
154 cpu = smp_processor_id();
144 155
145 counter = nmi_perfctr_msr_to_bit(msr); 156 counter = nmi_perfctr_msr_to_bit(msr);
146 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
147 158
148 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) 159 if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
149 return 1; 160 return 1;
150 return 0; 161 return 0;
151} 162}
152 163
153void release_perfctr_nmi(unsigned int msr) 164static void __release_perfctr_nmi(int cpu, unsigned int msr)
154{ 165{
155 unsigned int counter; 166 unsigned int counter;
167 if (cpu < 0)
168 cpu = smp_processor_id();
156 169
157 counter = nmi_perfctr_msr_to_bit(msr); 170 counter = nmi_perfctr_msr_to_bit(msr);
158 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 171 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
159 172
160 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); 173 clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
161} 174}
162 175
163int reserve_evntsel_nmi(unsigned int msr) 176int reserve_perfctr_nmi(unsigned int msr)
177{
178 int cpu, i;
179 for_each_possible_cpu (cpu) {
180 if (!__reserve_perfctr_nmi(cpu, msr)) {
181 for_each_possible_cpu (i) {
182 if (i >= cpu)
183 break;
184 __release_perfctr_nmi(i, msr);
185 }
186 return 0;
187 }
188 }
189 return 1;
190}
191
192void release_perfctr_nmi(unsigned int msr)
193{
194 int cpu;
195 for_each_possible_cpu (cpu) {
196 __release_perfctr_nmi(cpu, msr);
197 }
198}
199
200int __reserve_evntsel_nmi(int cpu, unsigned int msr)
164{ 201{
165 unsigned int counter; 202 unsigned int counter;
203 if (cpu < 0)
204 cpu = smp_processor_id();
166 205
167 counter = nmi_evntsel_msr_to_bit(msr); 206 counter = nmi_evntsel_msr_to_bit(msr);
168 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 207 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
169 208
170 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) 209 if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
171 return 1; 210 return 1;
172 return 0; 211 return 0;
173} 212}
174 213
175void release_evntsel_nmi(unsigned int msr) 214static void __release_evntsel_nmi(int cpu, unsigned int msr)
176{ 215{
177 unsigned int counter; 216 unsigned int counter;
217 if (cpu < 0)
218 cpu = smp_processor_id();
178 219
179 counter = nmi_evntsel_msr_to_bit(msr); 220 counter = nmi_evntsel_msr_to_bit(msr);
180 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 221 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
181 222
182 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); 223 clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
224}
225
226int reserve_evntsel_nmi(unsigned int msr)
227{
228 int cpu, i;
229 for_each_possible_cpu (cpu) {
230 if (!__reserve_evntsel_nmi(cpu, msr)) {
231 for_each_possible_cpu (i) {
232 if (i >= cpu)
233 break;
234 __release_evntsel_nmi(i, msr);
235 }
236 return 0;
237 }
238 }
239 return 1;
240}
241
242void release_evntsel_nmi(unsigned int msr)
243{
244 int cpu;
245 for_each_possible_cpu (cpu) {
246 __release_evntsel_nmi(cpu, msr);
247 }
183} 248}
184 249
185static __cpuinit inline int nmi_known_cpu(void) 250static __cpuinit inline int nmi_known_cpu(void)
@@ -507,10 +572,10 @@ static int setup_k7_watchdog(void)
507 572
508 perfctr_msr = MSR_K7_PERFCTR0; 573 perfctr_msr = MSR_K7_PERFCTR0;
509 evntsel_msr = MSR_K7_EVNTSEL0; 574 evntsel_msr = MSR_K7_EVNTSEL0;
510 if (!reserve_perfctr_nmi(perfctr_msr)) 575 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
511 goto fail; 576 goto fail;
512 577
513 if (!reserve_evntsel_nmi(evntsel_msr)) 578 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
514 goto fail1; 579 goto fail1;
515 580
516 wrmsrl(perfctr_msr, 0UL); 581 wrmsrl(perfctr_msr, 0UL);
@@ -533,7 +598,7 @@ static int setup_k7_watchdog(void)
533 wd->check_bit = 1ULL<<63; 598 wd->check_bit = 1ULL<<63;
534 return 1; 599 return 1;
535fail1: 600fail1:
536 release_perfctr_nmi(perfctr_msr); 601 __release_perfctr_nmi(-1, perfctr_msr);
537fail: 602fail:
538 return 0; 603 return 0;
539} 604}
@@ -544,8 +609,8 @@ static void stop_k7_watchdog(void)
544 609
545 wrmsr(wd->evntsel_msr, 0, 0); 610 wrmsr(wd->evntsel_msr, 0, 0);
546 611
547 release_evntsel_nmi(wd->evntsel_msr); 612 __release_evntsel_nmi(-1, wd->evntsel_msr);
548 release_perfctr_nmi(wd->perfctr_msr); 613 __release_perfctr_nmi(-1, wd->perfctr_msr);
549} 614}
550 615
551#define P6_EVNTSEL0_ENABLE (1 << 22) 616#define P6_EVNTSEL0_ENABLE (1 << 22)
@@ -563,10 +628,10 @@ static int setup_p6_watchdog(void)
563 628
564 perfctr_msr = MSR_P6_PERFCTR0; 629 perfctr_msr = MSR_P6_PERFCTR0;
565 evntsel_msr = MSR_P6_EVNTSEL0; 630 evntsel_msr = MSR_P6_EVNTSEL0;
566 if (!reserve_perfctr_nmi(perfctr_msr)) 631 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
567 goto fail; 632 goto fail;
568 633
569 if (!reserve_evntsel_nmi(evntsel_msr)) 634 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
570 goto fail1; 635 goto fail1;
571 636
572 wrmsrl(perfctr_msr, 0UL); 637 wrmsrl(perfctr_msr, 0UL);
@@ -590,7 +655,7 @@ static int setup_p6_watchdog(void)
590 wd->check_bit = 1ULL<<39; 655 wd->check_bit = 1ULL<<39;
591 return 1; 656 return 1;
592fail1: 657fail1:
593 release_perfctr_nmi(perfctr_msr); 658 __release_perfctr_nmi(-1, perfctr_msr);
594fail: 659fail:
595 return 0; 660 return 0;
596} 661}
@@ -601,8 +666,8 @@ static void stop_p6_watchdog(void)
601 666
602 wrmsr(wd->evntsel_msr, 0, 0); 667 wrmsr(wd->evntsel_msr, 0, 0);
603 668
604 release_evntsel_nmi(wd->evntsel_msr); 669 __release_evntsel_nmi(-1, wd->evntsel_msr);
605 release_perfctr_nmi(wd->perfctr_msr); 670 __release_perfctr_nmi(-1, wd->perfctr_msr);
606} 671}
607 672
608/* Note that these events don't tick when the CPU idles. This means 673/* Note that these events don't tick when the CPU idles. This means
@@ -668,10 +733,10 @@ static int setup_p4_watchdog(void)
668 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); 733 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
669 } 734 }
670 735
671 if (!reserve_perfctr_nmi(perfctr_msr)) 736 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
672 goto fail; 737 goto fail;
673 738
674 if (!reserve_evntsel_nmi(evntsel_msr)) 739 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
675 goto fail1; 740 goto fail1;
676 741
677 evntsel = P4_ESCR_EVENT_SELECT(0x3F) 742 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
@@ -695,7 +760,7 @@ static int setup_p4_watchdog(void)
695 wd->check_bit = 1ULL<<39; 760 wd->check_bit = 1ULL<<39;
696 return 1; 761 return 1;
697fail1: 762fail1:
698 release_perfctr_nmi(perfctr_msr); 763 __release_perfctr_nmi(-1, perfctr_msr);
699fail: 764fail:
700 return 0; 765 return 0;
701} 766}
@@ -707,8 +772,8 @@ static void stop_p4_watchdog(void)
707 wrmsr(wd->cccr_msr, 0, 0); 772 wrmsr(wd->cccr_msr, 0, 0);
708 wrmsr(wd->evntsel_msr, 0, 0); 773 wrmsr(wd->evntsel_msr, 0, 0);
709 774
710 release_evntsel_nmi(wd->evntsel_msr); 775 __release_evntsel_nmi(-1, wd->evntsel_msr);
711 release_perfctr_nmi(wd->perfctr_msr); 776 __release_perfctr_nmi(-1, wd->perfctr_msr);
712} 777}
713 778
714#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 779#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
@@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void)
736 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; 801 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
737 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; 802 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
738 803
739 if (!reserve_perfctr_nmi(perfctr_msr)) 804 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
740 goto fail; 805 goto fail;
741 806
742 if (!reserve_evntsel_nmi(evntsel_msr)) 807 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
743 goto fail1; 808 goto fail1;
744 809
745 wrmsrl(perfctr_msr, 0UL); 810 wrmsrl(perfctr_msr, 0UL);
@@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void)
764 wd->check_bit = 1ULL << (eax.split.bit_width - 1); 829 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
765 return 1; 830 return 1;
766fail1: 831fail1:
767 release_perfctr_nmi(perfctr_msr); 832 __release_perfctr_nmi(-1, perfctr_msr);
768fail: 833fail:
769 return 0; 834 return 0;
770} 835}
@@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void)
787 return; 852 return;
788 853
789 wrmsr(wd->evntsel_msr, 0, 0); 854 wrmsr(wd->evntsel_msr, 0, 0);
790 release_evntsel_nmi(wd->evntsel_msr); 855 __release_evntsel_nmi(-1, wd->evntsel_msr);
791 release_perfctr_nmi(wd->perfctr_msr); 856 __release_perfctr_nmi(-1, wd->perfctr_msr);
792} 857}
793 858
794void setup_apic_nmi_watchdog (void *unused) 859void setup_apic_nmi_watchdog (void *unused)
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 82d9d85d5270..17cf2d6b5e99 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -108,64 +108,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
108/* checks for a bit availability (hack for oprofile) */ 108/* checks for a bit availability (hack for oprofile) */
109int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) 109int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
110{ 110{
111 int cpu;
111 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 112 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
112 113 for_each_possible_cpu (cpu) {
113 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); 114 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
115 return 0;
116 }
117 return 1;
114} 118}
115 119
116/* checks the an msr for availability */ 120/* checks the an msr for availability */
117int avail_to_resrv_perfctr_nmi(unsigned int msr) 121int avail_to_resrv_perfctr_nmi(unsigned int msr)
118{ 122{
119 unsigned int counter; 123 unsigned int counter;
124 int cpu;
120 125
121 counter = nmi_perfctr_msr_to_bit(msr); 126 counter = nmi_perfctr_msr_to_bit(msr);
122 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 127 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
123 128
124 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); 129 for_each_possible_cpu (cpu) {
130 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
131 return 0;
132 }
133 return 1;
125} 134}
126 135
127int reserve_perfctr_nmi(unsigned int msr) 136static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
128{ 137{
129 unsigned int counter; 138 unsigned int counter;
139 if (cpu < 0)
140 cpu = smp_processor_id();
130 141
131 counter = nmi_perfctr_msr_to_bit(msr); 142 counter = nmi_perfctr_msr_to_bit(msr);
132 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 143 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
133 144
134 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) 145 if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
135 return 1; 146 return 1;
136 return 0; 147 return 0;
137} 148}
138 149
139void release_perfctr_nmi(unsigned int msr) 150static void __release_perfctr_nmi(int cpu, unsigned int msr)
140{ 151{
141 unsigned int counter; 152 unsigned int counter;
153 if (cpu < 0)
154 cpu = smp_processor_id();
142 155
143 counter = nmi_perfctr_msr_to_bit(msr); 156 counter = nmi_perfctr_msr_to_bit(msr);
144 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
145 158
146 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); 159 clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
147} 160}
148 161
149int reserve_evntsel_nmi(unsigned int msr) 162int reserve_perfctr_nmi(unsigned int msr)
163{
164 int cpu, i;
165 for_each_possible_cpu (cpu) {
166 if (!__reserve_perfctr_nmi(cpu, msr)) {
167 for_each_possible_cpu (i) {
168 if (i >= cpu)
169 break;
170 __release_perfctr_nmi(i, msr);
171 }
172 return 0;
173 }
174 }
175 return 1;
176}
177
178void release_perfctr_nmi(unsigned int msr)
179{
180 int cpu;
181 for_each_possible_cpu (cpu)
182 __release_perfctr_nmi(cpu, msr);
183}
184
185int __reserve_evntsel_nmi(int cpu, unsigned int msr)
150{ 186{
151 unsigned int counter; 187 unsigned int counter;
188 if (cpu < 0)
189 cpu = smp_processor_id();
152 190
153 counter = nmi_evntsel_msr_to_bit(msr); 191 counter = nmi_evntsel_msr_to_bit(msr);
154 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 192 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
155 193
156 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner))) 194 if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
157 return 1; 195 return 1;
158 return 0; 196 return 0;
159} 197}
160 198
161void release_evntsel_nmi(unsigned int msr) 199static void __release_evntsel_nmi(int cpu, unsigned int msr)
162{ 200{
163 unsigned int counter; 201 unsigned int counter;
202 if (cpu < 0)
203 cpu = smp_processor_id();
164 204
165 counter = nmi_evntsel_msr_to_bit(msr); 205 counter = nmi_evntsel_msr_to_bit(msr);
166 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 206 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
167 207
168 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)); 208 clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
209}
210
211int reserve_evntsel_nmi(unsigned int msr)
212{
213 int cpu, i;
214 for_each_possible_cpu (cpu) {
215 if (!__reserve_evntsel_nmi(cpu, msr)) {
216 for_each_possible_cpu (i) {
217 if (i >= cpu)
218 break;
219 __release_evntsel_nmi(i, msr);
220 }
221 return 0;
222 }
223 }
224 return 1;
225}
226
227void release_evntsel_nmi(unsigned int msr)
228{
229 int cpu;
230 for_each_possible_cpu (cpu) {
231 __release_evntsel_nmi(cpu, msr);
232 }
169} 233}
170 234
171static __cpuinit inline int nmi_known_cpu(void) 235static __cpuinit inline int nmi_known_cpu(void)
@@ -472,10 +536,10 @@ static int setup_k7_watchdog(void)
472 536
473 perfctr_msr = MSR_K7_PERFCTR0; 537 perfctr_msr = MSR_K7_PERFCTR0;
474 evntsel_msr = MSR_K7_EVNTSEL0; 538 evntsel_msr = MSR_K7_EVNTSEL0;
475 if (!reserve_perfctr_nmi(perfctr_msr)) 539 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
476 goto fail; 540 goto fail;
477 541
478 if (!reserve_evntsel_nmi(evntsel_msr)) 542 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
479 goto fail1; 543 goto fail1;
480 544
481 /* Simulator may not support it */ 545 /* Simulator may not support it */
@@ -501,9 +565,9 @@ static int setup_k7_watchdog(void)
501 wd->check_bit = 1ULL<<63; 565 wd->check_bit = 1ULL<<63;
502 return 1; 566 return 1;
503fail2: 567fail2:
504 release_evntsel_nmi(evntsel_msr); 568 __release_evntsel_nmi(-1, evntsel_msr);
505fail1: 569fail1:
506 release_perfctr_nmi(perfctr_msr); 570 __release_perfctr_nmi(-1, perfctr_msr);
507fail: 571fail:
508 return 0; 572 return 0;
509} 573}
@@ -514,8 +578,8 @@ static void stop_k7_watchdog(void)
514 578
515 wrmsr(wd->evntsel_msr, 0, 0); 579 wrmsr(wd->evntsel_msr, 0, 0);
516 580
517 release_evntsel_nmi(wd->evntsel_msr); 581 __release_evntsel_nmi(-1, wd->evntsel_msr);
518 release_perfctr_nmi(wd->perfctr_msr); 582 __release_perfctr_nmi(-1, wd->perfctr_msr);
519} 583}
520 584
521/* Note that these events don't tick when the CPU idles. This means 585/* Note that these events don't tick when the CPU idles. This means
@@ -581,10 +645,10 @@ static int setup_p4_watchdog(void)
581 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); 645 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
582 } 646 }
583 647
584 if (!reserve_perfctr_nmi(perfctr_msr)) 648 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
585 goto fail; 649 goto fail;
586 650
587 if (!reserve_evntsel_nmi(evntsel_msr)) 651 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
588 goto fail1; 652 goto fail1;
589 653
590 evntsel = P4_ESCR_EVENT_SELECT(0x3F) 654 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
@@ -609,7 +673,7 @@ static int setup_p4_watchdog(void)
609 wd->check_bit = 1ULL<<39; 673 wd->check_bit = 1ULL<<39;
610 return 1; 674 return 1;
611fail1: 675fail1:
612 release_perfctr_nmi(perfctr_msr); 676 __release_perfctr_nmi(-1, perfctr_msr);
613fail: 677fail:
614 return 0; 678 return 0;
615} 679}
@@ -621,8 +685,8 @@ static void stop_p4_watchdog(void)
621 wrmsr(wd->cccr_msr, 0, 0); 685 wrmsr(wd->cccr_msr, 0, 0);
622 wrmsr(wd->evntsel_msr, 0, 0); 686 wrmsr(wd->evntsel_msr, 0, 0);
623 687
624 release_evntsel_nmi(wd->evntsel_msr); 688 __release_evntsel_nmi(-1, wd->evntsel_msr);
625 release_perfctr_nmi(wd->perfctr_msr); 689 __release_perfctr_nmi(-1, wd->perfctr_msr);
626} 690}
627 691
628#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 692#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
@@ -650,10 +714,10 @@ static int setup_intel_arch_watchdog(void)
650 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; 714 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
651 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; 715 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
652 716
653 if (!reserve_perfctr_nmi(perfctr_msr)) 717 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
654 goto fail; 718 goto fail;
655 719
656 if (!reserve_evntsel_nmi(evntsel_msr)) 720 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
657 goto fail1; 721 goto fail1;
658 722
659 wrmsrl(perfctr_msr, 0UL); 723 wrmsrl(perfctr_msr, 0UL);
@@ -680,7 +744,7 @@ static int setup_intel_arch_watchdog(void)
680 wd->check_bit = 1ULL << (eax.split.bit_width - 1); 744 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
681 return 1; 745 return 1;
682fail1: 746fail1:
683 release_perfctr_nmi(perfctr_msr); 747 __release_perfctr_nmi(-1, perfctr_msr);
684fail: 748fail:
685 return 0; 749 return 0;
686} 750}
@@ -704,8 +768,8 @@ static void stop_intel_arch_watchdog(void)
704 768
705 wrmsr(wd->evntsel_msr, 0, 0); 769 wrmsr(wd->evntsel_msr, 0, 0);
706 770
707 release_evntsel_nmi(wd->evntsel_msr); 771 __release_evntsel_nmi(-1, wd->evntsel_msr);
708 release_perfctr_nmi(wd->perfctr_msr); 772 __release_perfctr_nmi(-1, wd->perfctr_msr);
709} 773}
710 774
711void setup_apic_nmi_watchdog(void *unused) 775void setup_apic_nmi_watchdog(void *unused)