diff options
author | Andi Kleen <ak@suse.de> | 2007-04-02 06:14:12 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-04-02 06:14:12 -0400 |
commit | 89e07569e4e4e935b2cec18e9d94f131aecb2e40 (patch) | |
tree | 93c7c3855c82e9ce47505e3d1d6a144100052748 /arch/i386/kernel/nmi.c | |
parent | 3556ddfa9284a86a59a9b78fe5894430f6ab4eef (diff) |
[PATCH] x86-64: Let oprofile reserve MSR on all CPUs
The MSR reservation is per CPU and oprofile would only allocate them
on the CPU it was initialized on. Change this to handle all CPUs.
This also fixes a warning about unprotected use of smp_processor_id()
in preemptible kernels.
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/kernel/nmi.c')
-rw-r--r-- | arch/i386/kernel/nmi.c | 127 |
1 files changed, 96 insertions, 31 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 14702427b104..851f3b1c5b1c 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
122 | /* checks for a bit availability (hack for oprofile) */ | 122 | /* checks for a bit availability (hack for oprofile) */ |
123 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | 123 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) |
124 | { | 124 | { |
125 | int cpu; | ||
125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 126 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
126 | 127 | for_each_possible_cpu (cpu) { | |
127 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | 128 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) |
129 | return 0; | ||
130 | } | ||
131 | return 1; | ||
128 | } | 132 | } |
129 | 133 | ||
130 | /* checks the an msr for availability */ | 134 | /* checks the an msr for availability */ |
131 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | 135 | int avail_to_resrv_perfctr_nmi(unsigned int msr) |
132 | { | 136 | { |
133 | unsigned int counter; | 137 | unsigned int counter; |
138 | int cpu; | ||
134 | 139 | ||
135 | counter = nmi_perfctr_msr_to_bit(msr); | 140 | counter = nmi_perfctr_msr_to_bit(msr); |
136 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 141 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
137 | 142 | ||
138 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | 143 | for_each_possible_cpu (cpu) { |
144 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) | ||
145 | return 0; | ||
146 | } | ||
147 | return 1; | ||
139 | } | 148 | } |
140 | 149 | ||
141 | int reserve_perfctr_nmi(unsigned int msr) | 150 | static int __reserve_perfctr_nmi(int cpu, unsigned int msr) |
142 | { | 151 | { |
143 | unsigned int counter; | 152 | unsigned int counter; |
153 | if (cpu < 0) | ||
154 | cpu = smp_processor_id(); | ||
144 | 155 | ||
145 | counter = nmi_perfctr_msr_to_bit(msr); | 156 | counter = nmi_perfctr_msr_to_bit(msr); |
146 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 157 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
147 | 158 | ||
148 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | 159 | if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) |
149 | return 1; | 160 | return 1; |
150 | return 0; | 161 | return 0; |
151 | } | 162 | } |
152 | 163 | ||
153 | void release_perfctr_nmi(unsigned int msr) | 164 | static void __release_perfctr_nmi(int cpu, unsigned int msr) |
154 | { | 165 | { |
155 | unsigned int counter; | 166 | unsigned int counter; |
167 | if (cpu < 0) | ||
168 | cpu = smp_processor_id(); | ||
156 | 169 | ||
157 | counter = nmi_perfctr_msr_to_bit(msr); | 170 | counter = nmi_perfctr_msr_to_bit(msr); |
158 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 171 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
159 | 172 | ||
160 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | 173 | clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); |
161 | } | 174 | } |
162 | 175 | ||
163 | int reserve_evntsel_nmi(unsigned int msr) | 176 | int reserve_perfctr_nmi(unsigned int msr) |
177 | { | ||
178 | int cpu, i; | ||
179 | for_each_possible_cpu (cpu) { | ||
180 | if (!__reserve_perfctr_nmi(cpu, msr)) { | ||
181 | for_each_possible_cpu (i) { | ||
182 | if (i >= cpu) | ||
183 | break; | ||
184 | __release_perfctr_nmi(i, msr); | ||
185 | } | ||
186 | return 0; | ||
187 | } | ||
188 | } | ||
189 | return 1; | ||
190 | } | ||
191 | |||
192 | void release_perfctr_nmi(unsigned int msr) | ||
193 | { | ||
194 | int cpu; | ||
195 | for_each_possible_cpu (cpu) { | ||
196 | __release_perfctr_nmi(cpu, msr); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | int __reserve_evntsel_nmi(int cpu, unsigned int msr) | ||
164 | { | 201 | { |
165 | unsigned int counter; | 202 | unsigned int counter; |
203 | if (cpu < 0) | ||
204 | cpu = smp_processor_id(); | ||
166 | 205 | ||
167 | counter = nmi_evntsel_msr_to_bit(msr); | 206 | counter = nmi_evntsel_msr_to_bit(msr); |
168 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 207 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
169 | 208 | ||
170 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0])) | 209 | if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) |
171 | return 1; | 210 | return 1; |
172 | return 0; | 211 | return 0; |
173 | } | 212 | } |
174 | 213 | ||
175 | void release_evntsel_nmi(unsigned int msr) | 214 | static void __release_evntsel_nmi(int cpu, unsigned int msr) |
176 | { | 215 | { |
177 | unsigned int counter; | 216 | unsigned int counter; |
217 | if (cpu < 0) | ||
218 | cpu = smp_processor_id(); | ||
178 | 219 | ||
179 | counter = nmi_evntsel_msr_to_bit(msr); | 220 | counter = nmi_evntsel_msr_to_bit(msr); |
180 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 221 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
181 | 222 | ||
182 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]); | 223 | clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); |
224 | } | ||
225 | |||
226 | int reserve_evntsel_nmi(unsigned int msr) | ||
227 | { | ||
228 | int cpu, i; | ||
229 | for_each_possible_cpu (cpu) { | ||
230 | if (!__reserve_evntsel_nmi(cpu, msr)) { | ||
231 | for_each_possible_cpu (i) { | ||
232 | if (i >= cpu) | ||
233 | break; | ||
234 | __release_evntsel_nmi(i, msr); | ||
235 | } | ||
236 | return 0; | ||
237 | } | ||
238 | } | ||
239 | return 1; | ||
240 | } | ||
241 | |||
242 | void release_evntsel_nmi(unsigned int msr) | ||
243 | { | ||
244 | int cpu; | ||
245 | for_each_possible_cpu (cpu) { | ||
246 | __release_evntsel_nmi(cpu, msr); | ||
247 | } | ||
183 | } | 248 | } |
184 | 249 | ||
185 | static __cpuinit inline int nmi_known_cpu(void) | 250 | static __cpuinit inline int nmi_known_cpu(void) |
@@ -507,10 +572,10 @@ static int setup_k7_watchdog(void) | |||
507 | 572 | ||
508 | perfctr_msr = MSR_K7_PERFCTR0; | 573 | perfctr_msr = MSR_K7_PERFCTR0; |
509 | evntsel_msr = MSR_K7_EVNTSEL0; | 574 | evntsel_msr = MSR_K7_EVNTSEL0; |
510 | if (!reserve_perfctr_nmi(perfctr_msr)) | 575 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
511 | goto fail; | 576 | goto fail; |
512 | 577 | ||
513 | if (!reserve_evntsel_nmi(evntsel_msr)) | 578 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
514 | goto fail1; | 579 | goto fail1; |
515 | 580 | ||
516 | wrmsrl(perfctr_msr, 0UL); | 581 | wrmsrl(perfctr_msr, 0UL); |
@@ -533,7 +598,7 @@ static int setup_k7_watchdog(void) | |||
533 | wd->check_bit = 1ULL<<63; | 598 | wd->check_bit = 1ULL<<63; |
534 | return 1; | 599 | return 1; |
535 | fail1: | 600 | fail1: |
536 | release_perfctr_nmi(perfctr_msr); | 601 | __release_perfctr_nmi(-1, perfctr_msr); |
537 | fail: | 602 | fail: |
538 | return 0; | 603 | return 0; |
539 | } | 604 | } |
@@ -544,8 +609,8 @@ static void stop_k7_watchdog(void) | |||
544 | 609 | ||
545 | wrmsr(wd->evntsel_msr, 0, 0); | 610 | wrmsr(wd->evntsel_msr, 0, 0); |
546 | 611 | ||
547 | release_evntsel_nmi(wd->evntsel_msr); | 612 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
548 | release_perfctr_nmi(wd->perfctr_msr); | 613 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
549 | } | 614 | } |
550 | 615 | ||
551 | #define P6_EVNTSEL0_ENABLE (1 << 22) | 616 | #define P6_EVNTSEL0_ENABLE (1 << 22) |
@@ -563,10 +628,10 @@ static int setup_p6_watchdog(void) | |||
563 | 628 | ||
564 | perfctr_msr = MSR_P6_PERFCTR0; | 629 | perfctr_msr = MSR_P6_PERFCTR0; |
565 | evntsel_msr = MSR_P6_EVNTSEL0; | 630 | evntsel_msr = MSR_P6_EVNTSEL0; |
566 | if (!reserve_perfctr_nmi(perfctr_msr)) | 631 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
567 | goto fail; | 632 | goto fail; |
568 | 633 | ||
569 | if (!reserve_evntsel_nmi(evntsel_msr)) | 634 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
570 | goto fail1; | 635 | goto fail1; |
571 | 636 | ||
572 | wrmsrl(perfctr_msr, 0UL); | 637 | wrmsrl(perfctr_msr, 0UL); |
@@ -590,7 +655,7 @@ static int setup_p6_watchdog(void) | |||
590 | wd->check_bit = 1ULL<<39; | 655 | wd->check_bit = 1ULL<<39; |
591 | return 1; | 656 | return 1; |
592 | fail1: | 657 | fail1: |
593 | release_perfctr_nmi(perfctr_msr); | 658 | __release_perfctr_nmi(-1, perfctr_msr); |
594 | fail: | 659 | fail: |
595 | return 0; | 660 | return 0; |
596 | } | 661 | } |
@@ -601,8 +666,8 @@ static void stop_p6_watchdog(void) | |||
601 | 666 | ||
602 | wrmsr(wd->evntsel_msr, 0, 0); | 667 | wrmsr(wd->evntsel_msr, 0, 0); |
603 | 668 | ||
604 | release_evntsel_nmi(wd->evntsel_msr); | 669 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
605 | release_perfctr_nmi(wd->perfctr_msr); | 670 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
606 | } | 671 | } |
607 | 672 | ||
608 | /* Note that these events don't tick when the CPU idles. This means | 673 | /* Note that these events don't tick when the CPU idles. This means |
@@ -668,10 +733,10 @@ static int setup_p4_watchdog(void) | |||
668 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | 733 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); |
669 | } | 734 | } |
670 | 735 | ||
671 | if (!reserve_perfctr_nmi(perfctr_msr)) | 736 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
672 | goto fail; | 737 | goto fail; |
673 | 738 | ||
674 | if (!reserve_evntsel_nmi(evntsel_msr)) | 739 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
675 | goto fail1; | 740 | goto fail1; |
676 | 741 | ||
677 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 742 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
@@ -695,7 +760,7 @@ static int setup_p4_watchdog(void) | |||
695 | wd->check_bit = 1ULL<<39; | 760 | wd->check_bit = 1ULL<<39; |
696 | return 1; | 761 | return 1; |
697 | fail1: | 762 | fail1: |
698 | release_perfctr_nmi(perfctr_msr); | 763 | __release_perfctr_nmi(-1, perfctr_msr); |
699 | fail: | 764 | fail: |
700 | return 0; | 765 | return 0; |
701 | } | 766 | } |
@@ -707,8 +772,8 @@ static void stop_p4_watchdog(void) | |||
707 | wrmsr(wd->cccr_msr, 0, 0); | 772 | wrmsr(wd->cccr_msr, 0, 0); |
708 | wrmsr(wd->evntsel_msr, 0, 0); | 773 | wrmsr(wd->evntsel_msr, 0, 0); |
709 | 774 | ||
710 | release_evntsel_nmi(wd->evntsel_msr); | 775 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
711 | release_perfctr_nmi(wd->perfctr_msr); | 776 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
712 | } | 777 | } |
713 | 778 | ||
714 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | 779 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL |
@@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void) | |||
736 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | 801 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; |
737 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; | 802 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; |
738 | 803 | ||
739 | if (!reserve_perfctr_nmi(perfctr_msr)) | 804 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
740 | goto fail; | 805 | goto fail; |
741 | 806 | ||
742 | if (!reserve_evntsel_nmi(evntsel_msr)) | 807 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
743 | goto fail1; | 808 | goto fail1; |
744 | 809 | ||
745 | wrmsrl(perfctr_msr, 0UL); | 810 | wrmsrl(perfctr_msr, 0UL); |
@@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void) | |||
764 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); | 829 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); |
765 | return 1; | 830 | return 1; |
766 | fail1: | 831 | fail1: |
767 | release_perfctr_nmi(perfctr_msr); | 832 | __release_perfctr_nmi(-1, perfctr_msr); |
768 | fail: | 833 | fail: |
769 | return 0; | 834 | return 0; |
770 | } | 835 | } |
@@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void) | |||
787 | return; | 852 | return; |
788 | 853 | ||
789 | wrmsr(wd->evntsel_msr, 0, 0); | 854 | wrmsr(wd->evntsel_msr, 0, 0); |
790 | release_evntsel_nmi(wd->evntsel_msr); | 855 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
791 | release_perfctr_nmi(wd->perfctr_msr); | 856 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
792 | } | 857 | } |
793 | 858 | ||
794 | void setup_apic_nmi_watchdog (void *unused) | 859 | void setup_apic_nmi_watchdog (void *unused) |