diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:07 -0400 |
commit | 4a06bd8508f65ad1dd5cd2046b85694813fa36a2 (patch) | |
tree | 1a41073d4763cf4e7a7e80400bc5c4a453387b04 /arch/x86/kernel/cpu/perf_counter.c | |
parent | 72eae04d3a3075c26d39e1e685acfc8e8c29db64 (diff) |
perf_counter, x86: make x86_pmu data a static struct
Instead of using a pointer to reference to the x86 pmu we now have one
single data structure that is initialized at the beginning. This saves
the pointer access when using this memory.
[ Impact: micro-optimization ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-15-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 50 |
1 files changed, 25 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 7c72a9423636..68597d763389 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -60,7 +60,7 @@ struct x86_pmu { | |||
60 | int max_events; | 60 | int max_events; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static struct x86_pmu *x86_pmu __read_mostly; | 63 | static struct x86_pmu x86_pmu __read_mostly; |
64 | 64 | ||
65 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | 65 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { |
66 | .enabled = 1, | 66 | .enabled = 1, |
@@ -184,12 +184,12 @@ static bool reserve_pmc_hardware(void) | |||
184 | disable_lapic_nmi_watchdog(); | 184 | disable_lapic_nmi_watchdog(); |
185 | 185 | ||
186 | for (i = 0; i < nr_counters_generic; i++) { | 186 | for (i = 0; i < nr_counters_generic; i++) { |
187 | if (!reserve_perfctr_nmi(x86_pmu->perfctr + i)) | 187 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) |
188 | goto perfctr_fail; | 188 | goto perfctr_fail; |
189 | } | 189 | } |
190 | 190 | ||
191 | for (i = 0; i < nr_counters_generic; i++) { | 191 | for (i = 0; i < nr_counters_generic; i++) { |
192 | if (!reserve_evntsel_nmi(x86_pmu->eventsel + i)) | 192 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) |
193 | goto eventsel_fail; | 193 | goto eventsel_fail; |
194 | } | 194 | } |
195 | 195 | ||
@@ -197,13 +197,13 @@ static bool reserve_pmc_hardware(void) | |||
197 | 197 | ||
198 | eventsel_fail: | 198 | eventsel_fail: |
199 | for (i--; i >= 0; i--) | 199 | for (i--; i >= 0; i--) |
200 | release_evntsel_nmi(x86_pmu->eventsel + i); | 200 | release_evntsel_nmi(x86_pmu.eventsel + i); |
201 | 201 | ||
202 | i = nr_counters_generic; | 202 | i = nr_counters_generic; |
203 | 203 | ||
204 | perfctr_fail: | 204 | perfctr_fail: |
205 | for (i--; i >= 0; i--) | 205 | for (i--; i >= 0; i--) |
206 | release_perfctr_nmi(x86_pmu->perfctr + i); | 206 | release_perfctr_nmi(x86_pmu.perfctr + i); |
207 | 207 | ||
208 | if (nmi_watchdog == NMI_LOCAL_APIC) | 208 | if (nmi_watchdog == NMI_LOCAL_APIC) |
209 | enable_lapic_nmi_watchdog(); | 209 | enable_lapic_nmi_watchdog(); |
@@ -216,8 +216,8 @@ static void release_pmc_hardware(void) | |||
216 | int i; | 216 | int i; |
217 | 217 | ||
218 | for (i = 0; i < nr_counters_generic; i++) { | 218 | for (i = 0; i < nr_counters_generic; i++) { |
219 | release_perfctr_nmi(x86_pmu->perfctr + i); | 219 | release_perfctr_nmi(x86_pmu.perfctr + i); |
220 | release_evntsel_nmi(x86_pmu->eventsel + i); | 220 | release_evntsel_nmi(x86_pmu.eventsel + i); |
221 | } | 221 | } |
222 | 222 | ||
223 | if (nmi_watchdog == NMI_LOCAL_APIC) | 223 | if (nmi_watchdog == NMI_LOCAL_APIC) |
@@ -297,14 +297,14 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
297 | * Raw event type provide the config in the event structure | 297 | * Raw event type provide the config in the event structure |
298 | */ | 298 | */ |
299 | if (perf_event_raw(hw_event)) { | 299 | if (perf_event_raw(hw_event)) { |
300 | hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event)); | 300 | hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event)); |
301 | } else { | 301 | } else { |
302 | if (perf_event_id(hw_event) >= x86_pmu->max_events) | 302 | if (perf_event_id(hw_event) >= x86_pmu.max_events) |
303 | return -EINVAL; | 303 | return -EINVAL; |
304 | /* | 304 | /* |
305 | * The generic map: | 305 | * The generic map: |
306 | */ | 306 | */ |
307 | hwc->config |= x86_pmu->event_map(perf_event_id(hw_event)); | 307 | hwc->config |= x86_pmu.event_map(perf_event_id(hw_event)); |
308 | } | 308 | } |
309 | 309 | ||
310 | counter->destroy = hw_perf_counter_destroy; | 310 | counter->destroy = hw_perf_counter_destroy; |
@@ -356,7 +356,7 @@ u64 hw_perf_save_disable(void) | |||
356 | if (unlikely(!perf_counters_initialized)) | 356 | if (unlikely(!perf_counters_initialized)) |
357 | return 0; | 357 | return 0; |
358 | 358 | ||
359 | return x86_pmu->save_disable_all(); | 359 | return x86_pmu.save_disable_all(); |
360 | } | 360 | } |
361 | /* | 361 | /* |
362 | * Exported because of ACPI idle | 362 | * Exported because of ACPI idle |
@@ -396,7 +396,7 @@ void hw_perf_restore(u64 ctrl) | |||
396 | if (unlikely(!perf_counters_initialized)) | 396 | if (unlikely(!perf_counters_initialized)) |
397 | return; | 397 | return; |
398 | 398 | ||
399 | x86_pmu->restore_all(ctrl); | 399 | x86_pmu.restore_all(ctrl); |
400 | } | 400 | } |
401 | /* | 401 | /* |
402 | * Exported because of ACPI idle | 402 | * Exported because of ACPI idle |
@@ -441,7 +441,7 @@ static void hw_perf_enable(int idx, u64 config) | |||
441 | if (unlikely(!perf_counters_initialized)) | 441 | if (unlikely(!perf_counters_initialized)) |
442 | return; | 442 | return; |
443 | 443 | ||
444 | x86_pmu->enable(idx, config); | 444 | x86_pmu.enable(idx, config); |
445 | } | 445 | } |
446 | 446 | ||
447 | static void intel_pmu_disable_counter(int idx, u64 config) | 447 | static void intel_pmu_disable_counter(int idx, u64 config) |
@@ -463,7 +463,7 @@ static void hw_perf_disable(int idx, u64 config) | |||
463 | if (unlikely(!perf_counters_initialized)) | 463 | if (unlikely(!perf_counters_initialized)) |
464 | return; | 464 | return; |
465 | 465 | ||
466 | x86_pmu->disable(idx, config); | 466 | x86_pmu.disable(idx, config); |
467 | } | 467 | } |
468 | 468 | ||
469 | static inline void | 469 | static inline void |
@@ -580,11 +580,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
580 | 580 | ||
581 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | 581 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
582 | 582 | ||
583 | if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS))) | 583 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) |
584 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 584 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
585 | if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES))) | 585 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES))) |
586 | return X86_PMC_IDX_FIXED_CPU_CYCLES; | 586 | return X86_PMC_IDX_FIXED_CPU_CYCLES; |
587 | if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES))) | 587 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES))) |
588 | return X86_PMC_IDX_FIXED_BUS_CYCLES; | 588 | return X86_PMC_IDX_FIXED_BUS_CYCLES; |
589 | 589 | ||
590 | return -1; | 590 | return -1; |
@@ -628,8 +628,8 @@ try_generic: | |||
628 | set_bit(idx, cpuc->used); | 628 | set_bit(idx, cpuc->used); |
629 | hwc->idx = idx; | 629 | hwc->idx = idx; |
630 | } | 630 | } |
631 | hwc->config_base = x86_pmu->eventsel; | 631 | hwc->config_base = x86_pmu.eventsel; |
632 | hwc->counter_base = x86_pmu->perfctr; | 632 | hwc->counter_base = x86_pmu.perfctr; |
633 | } | 633 | } |
634 | 634 | ||
635 | perf_counters_lapic_init(hwc->nmi); | 635 | perf_counters_lapic_init(hwc->nmi); |
@@ -677,8 +677,8 @@ void perf_counter_print_debug(void) | |||
677 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); | 677 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); |
678 | 678 | ||
679 | for (idx = 0; idx < nr_counters_generic; idx++) { | 679 | for (idx = 0; idx < nr_counters_generic; idx++) { |
680 | rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl); | 680 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); |
681 | rdmsrl(x86_pmu->perfctr + idx, pmc_count); | 681 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); |
682 | 682 | ||
683 | prev_left = per_cpu(prev_left[idx], cpu); | 683 | prev_left = per_cpu(prev_left[idx], cpu); |
684 | 684 | ||
@@ -819,7 +819,7 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) | |||
819 | irq_enter(); | 819 | irq_enter(); |
820 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | 820 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
821 | ack_APIC_irq(); | 821 | ack_APIC_irq(); |
822 | x86_pmu->handle_irq(regs, 0); | 822 | x86_pmu.handle_irq(regs, 0); |
823 | irq_exit(); | 823 | irq_exit(); |
824 | } | 824 | } |
825 | 825 | ||
@@ -876,7 +876,7 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
876 | regs = args->regs; | 876 | regs = args->regs; |
877 | 877 | ||
878 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 878 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
879 | ret = x86_pmu->handle_irq(regs, 1); | 879 | ret = x86_pmu.handle_irq(regs, 1); |
880 | 880 | ||
881 | return ret ? NOTIFY_STOP : NOTIFY_OK; | 881 | return ret ? NOTIFY_STOP : NOTIFY_OK; |
882 | } | 882 | } |
@@ -940,7 +940,7 @@ static int intel_pmu_init(void) | |||
940 | pr_info("... bit width: %d\n", eax.split.bit_width); | 940 | pr_info("... bit width: %d\n", eax.split.bit_width); |
941 | pr_info("... mask length: %d\n", eax.split.mask_length); | 941 | pr_info("... mask length: %d\n", eax.split.mask_length); |
942 | 942 | ||
943 | x86_pmu = &intel_pmu; | 943 | x86_pmu = intel_pmu; |
944 | 944 | ||
945 | nr_counters_generic = eax.split.num_counters; | 945 | nr_counters_generic = eax.split.num_counters; |
946 | nr_counters_fixed = edx.split.num_counters_fixed; | 946 | nr_counters_fixed = edx.split.num_counters_fixed; |
@@ -951,7 +951,7 @@ static int intel_pmu_init(void) | |||
951 | 951 | ||
952 | static int amd_pmu_init(void) | 952 | static int amd_pmu_init(void) |
953 | { | 953 | { |
954 | x86_pmu = &amd_pmu; | 954 | x86_pmu = amd_pmu; |
955 | 955 | ||
956 | nr_counters_generic = 4; | 956 | nr_counters_generic = 4; |
957 | nr_counters_fixed = 0; | 957 | nr_counters_fixed = 0; |