diff options
author | Andi Kleen <ak@suse.de> | 2007-04-02 06:14:12 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-04-02 06:14:12 -0400 |
commit | 89e07569e4e4e935b2cec18e9d94f131aecb2e40 (patch) | |
tree | 93c7c3855c82e9ce47505e3d1d6a144100052748 /arch/x86_64/kernel/nmi.c | |
parent | 3556ddfa9284a86a59a9b78fe5894430f6ab4eef (diff) |
[PATCH] x86-64: Let oprofile reserve MSR on all CPUs
The MSR reservation is per CPU and oprofile would only allocate them
on the CPU it was initialized on. Change this to handle all CPUs.
This also fixes a warning about unprotected use of smp_processor_id()
in preemptible kernels.
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 118 |
1 files changed, 91 insertions, 27 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 82d9d85d5270..17cf2d6b5e99 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -108,64 +108,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
108 | /* checks for a bit availability (hack for oprofile) */ | 108 | /* checks for a bit availability (hack for oprofile) */ |
109 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | 109 | int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) |
110 | { | 110 | { |
111 | int cpu; | ||
111 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 112 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
112 | 113 | for_each_possible_cpu (cpu) { | |
113 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | 114 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) |
115 | return 0; | ||
116 | } | ||
117 | return 1; | ||
114 | } | 118 | } |
115 | 119 | ||
116 | /* checks the an msr for availability */ | 120 | /* checks the an msr for availability */ |
117 | int avail_to_resrv_perfctr_nmi(unsigned int msr) | 121 | int avail_to_resrv_perfctr_nmi(unsigned int msr) |
118 | { | 122 | { |
119 | unsigned int counter; | 123 | unsigned int counter; |
124 | int cpu; | ||
120 | 125 | ||
121 | counter = nmi_perfctr_msr_to_bit(msr); | 126 | counter = nmi_perfctr_msr_to_bit(msr); |
122 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 127 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
123 | 128 | ||
124 | return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner))); | 129 | for_each_possible_cpu (cpu) { |
130 | if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) | ||
131 | return 0; | ||
132 | } | ||
133 | return 1; | ||
125 | } | 134 | } |
126 | 135 | ||
127 | int reserve_perfctr_nmi(unsigned int msr) | 136 | static int __reserve_perfctr_nmi(int cpu, unsigned int msr) |
128 | { | 137 | { |
129 | unsigned int counter; | 138 | unsigned int counter; |
139 | if (cpu < 0) | ||
140 | cpu = smp_processor_id(); | ||
130 | 141 | ||
131 | counter = nmi_perfctr_msr_to_bit(msr); | 142 | counter = nmi_perfctr_msr_to_bit(msr); |
132 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 143 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
133 | 144 | ||
134 | if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner))) | 145 | if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu))) |
135 | return 1; | 146 | return 1; |
136 | return 0; | 147 | return 0; |
137 | } | 148 | } |
138 | 149 | ||
139 | void release_perfctr_nmi(unsigned int msr) | 150 | static void __release_perfctr_nmi(int cpu, unsigned int msr) |
140 | { | 151 | { |
141 | unsigned int counter; | 152 | unsigned int counter; |
153 | if (cpu < 0) | ||
154 | cpu = smp_processor_id(); | ||
142 | 155 | ||
143 | counter = nmi_perfctr_msr_to_bit(msr); | 156 | counter = nmi_perfctr_msr_to_bit(msr); |
144 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 157 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
145 | 158 | ||
146 | clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner)); | 159 | clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)); |
147 | } | 160 | } |
148 | 161 | ||
149 | int reserve_evntsel_nmi(unsigned int msr) | 162 | int reserve_perfctr_nmi(unsigned int msr) |
163 | { | ||
164 | int cpu, i; | ||
165 | for_each_possible_cpu (cpu) { | ||
166 | if (!__reserve_perfctr_nmi(cpu, msr)) { | ||
167 | for_each_possible_cpu (i) { | ||
168 | if (i >= cpu) | ||
169 | break; | ||
170 | __release_perfctr_nmi(i, msr); | ||
171 | } | ||
172 | return 0; | ||
173 | } | ||
174 | } | ||
175 | return 1; | ||
176 | } | ||
177 | |||
178 | void release_perfctr_nmi(unsigned int msr) | ||
179 | { | ||
180 | int cpu; | ||
181 | for_each_possible_cpu (cpu) | ||
182 | __release_perfctr_nmi(cpu, msr); | ||
183 | } | ||
184 | |||
185 | int __reserve_evntsel_nmi(int cpu, unsigned int msr) | ||
150 | { | 186 | { |
151 | unsigned int counter; | 187 | unsigned int counter; |
188 | if (cpu < 0) | ||
189 | cpu = smp_processor_id(); | ||
152 | 190 | ||
153 | counter = nmi_evntsel_msr_to_bit(msr); | 191 | counter = nmi_evntsel_msr_to_bit(msr); |
154 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 192 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
155 | 193 | ||
156 | if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner))) | 194 | if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0])) |
157 | return 1; | 195 | return 1; |
158 | return 0; | 196 | return 0; |
159 | } | 197 | } |
160 | 198 | ||
161 | void release_evntsel_nmi(unsigned int msr) | 199 | static void __release_evntsel_nmi(int cpu, unsigned int msr) |
162 | { | 200 | { |
163 | unsigned int counter; | 201 | unsigned int counter; |
202 | if (cpu < 0) | ||
203 | cpu = smp_processor_id(); | ||
164 | 204 | ||
165 | counter = nmi_evntsel_msr_to_bit(msr); | 205 | counter = nmi_evntsel_msr_to_bit(msr); |
166 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 206 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
167 | 207 | ||
168 | clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)); | 208 | clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]); |
209 | } | ||
210 | |||
211 | int reserve_evntsel_nmi(unsigned int msr) | ||
212 | { | ||
213 | int cpu, i; | ||
214 | for_each_possible_cpu (cpu) { | ||
215 | if (!__reserve_evntsel_nmi(cpu, msr)) { | ||
216 | for_each_possible_cpu (i) { | ||
217 | if (i >= cpu) | ||
218 | break; | ||
219 | __release_evntsel_nmi(i, msr); | ||
220 | } | ||
221 | return 0; | ||
222 | } | ||
223 | } | ||
224 | return 1; | ||
225 | } | ||
226 | |||
227 | void release_evntsel_nmi(unsigned int msr) | ||
228 | { | ||
229 | int cpu; | ||
230 | for_each_possible_cpu (cpu) { | ||
231 | __release_evntsel_nmi(cpu, msr); | ||
232 | } | ||
169 | } | 233 | } |
170 | 234 | ||
171 | static __cpuinit inline int nmi_known_cpu(void) | 235 | static __cpuinit inline int nmi_known_cpu(void) |
@@ -472,10 +536,10 @@ static int setup_k7_watchdog(void) | |||
472 | 536 | ||
473 | perfctr_msr = MSR_K7_PERFCTR0; | 537 | perfctr_msr = MSR_K7_PERFCTR0; |
474 | evntsel_msr = MSR_K7_EVNTSEL0; | 538 | evntsel_msr = MSR_K7_EVNTSEL0; |
475 | if (!reserve_perfctr_nmi(perfctr_msr)) | 539 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
476 | goto fail; | 540 | goto fail; |
477 | 541 | ||
478 | if (!reserve_evntsel_nmi(evntsel_msr)) | 542 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
479 | goto fail1; | 543 | goto fail1; |
480 | 544 | ||
481 | /* Simulator may not support it */ | 545 | /* Simulator may not support it */ |
@@ -501,9 +565,9 @@ static int setup_k7_watchdog(void) | |||
501 | wd->check_bit = 1ULL<<63; | 565 | wd->check_bit = 1ULL<<63; |
502 | return 1; | 566 | return 1; |
503 | fail2: | 567 | fail2: |
504 | release_evntsel_nmi(evntsel_msr); | 568 | __release_evntsel_nmi(-1, evntsel_msr); |
505 | fail1: | 569 | fail1: |
506 | release_perfctr_nmi(perfctr_msr); | 570 | __release_perfctr_nmi(-1, perfctr_msr); |
507 | fail: | 571 | fail: |
508 | return 0; | 572 | return 0; |
509 | } | 573 | } |
@@ -514,8 +578,8 @@ static void stop_k7_watchdog(void) | |||
514 | 578 | ||
515 | wrmsr(wd->evntsel_msr, 0, 0); | 579 | wrmsr(wd->evntsel_msr, 0, 0); |
516 | 580 | ||
517 | release_evntsel_nmi(wd->evntsel_msr); | 581 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
518 | release_perfctr_nmi(wd->perfctr_msr); | 582 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
519 | } | 583 | } |
520 | 584 | ||
521 | /* Note that these events don't tick when the CPU idles. This means | 585 | /* Note that these events don't tick when the CPU idles. This means |
@@ -581,10 +645,10 @@ static int setup_p4_watchdog(void) | |||
581 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | 645 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); |
582 | } | 646 | } |
583 | 647 | ||
584 | if (!reserve_perfctr_nmi(perfctr_msr)) | 648 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
585 | goto fail; | 649 | goto fail; |
586 | 650 | ||
587 | if (!reserve_evntsel_nmi(evntsel_msr)) | 651 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
588 | goto fail1; | 652 | goto fail1; |
589 | 653 | ||
590 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 654 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
@@ -609,7 +673,7 @@ static int setup_p4_watchdog(void) | |||
609 | wd->check_bit = 1ULL<<39; | 673 | wd->check_bit = 1ULL<<39; |
610 | return 1; | 674 | return 1; |
611 | fail1: | 675 | fail1: |
612 | release_perfctr_nmi(perfctr_msr); | 676 | __release_perfctr_nmi(-1, perfctr_msr); |
613 | fail: | 677 | fail: |
614 | return 0; | 678 | return 0; |
615 | } | 679 | } |
@@ -621,8 +685,8 @@ static void stop_p4_watchdog(void) | |||
621 | wrmsr(wd->cccr_msr, 0, 0); | 685 | wrmsr(wd->cccr_msr, 0, 0); |
622 | wrmsr(wd->evntsel_msr, 0, 0); | 686 | wrmsr(wd->evntsel_msr, 0, 0); |
623 | 687 | ||
624 | release_evntsel_nmi(wd->evntsel_msr); | 688 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
625 | release_perfctr_nmi(wd->perfctr_msr); | 689 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
626 | } | 690 | } |
627 | 691 | ||
628 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | 692 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL |
@@ -650,10 +714,10 @@ static int setup_intel_arch_watchdog(void) | |||
650 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | 714 | perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; |
651 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; | 715 | evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; |
652 | 716 | ||
653 | if (!reserve_perfctr_nmi(perfctr_msr)) | 717 | if (!__reserve_perfctr_nmi(-1, perfctr_msr)) |
654 | goto fail; | 718 | goto fail; |
655 | 719 | ||
656 | if (!reserve_evntsel_nmi(evntsel_msr)) | 720 | if (!__reserve_evntsel_nmi(-1, evntsel_msr)) |
657 | goto fail1; | 721 | goto fail1; |
658 | 722 | ||
659 | wrmsrl(perfctr_msr, 0UL); | 723 | wrmsrl(perfctr_msr, 0UL); |
@@ -680,7 +744,7 @@ static int setup_intel_arch_watchdog(void) | |||
680 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); | 744 | wd->check_bit = 1ULL << (eax.split.bit_width - 1); |
681 | return 1; | 745 | return 1; |
682 | fail1: | 746 | fail1: |
683 | release_perfctr_nmi(perfctr_msr); | 747 | __release_perfctr_nmi(-1, perfctr_msr); |
684 | fail: | 748 | fail: |
685 | return 0; | 749 | return 0; |
686 | } | 750 | } |
@@ -704,8 +768,8 @@ static void stop_intel_arch_watchdog(void) | |||
704 | 768 | ||
705 | wrmsr(wd->evntsel_msr, 0, 0); | 769 | wrmsr(wd->evntsel_msr, 0, 0); |
706 | 770 | ||
707 | release_evntsel_nmi(wd->evntsel_msr); | 771 | __release_evntsel_nmi(-1, wd->evntsel_msr); |
708 | release_perfctr_nmi(wd->perfctr_msr); | 772 | __release_perfctr_nmi(-1, wd->perfctr_msr); |
709 | } | 773 | } |
710 | 774 | ||
711 | void setup_apic_nmi_watchdog(void *unused) | 775 | void setup_apic_nmi_watchdog(void *unused) |