diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-21 12:15:07 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-21 12:15:07 -0400 |
| commit | 43c1266ce4dc06bfd236cec31e11e9ecd69c0bef (patch) | |
| tree | 40a86739ca4c36200f447f655b01c57cfe646e26 /arch/x86/kernel/cpu | |
| parent | b8c7f1dc5ca4e0d10709182233cdab932cef593d (diff) | |
| parent | 57c0c15b5244320065374ad2c54f4fbec77a6428 (diff) | |
Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Tidy up after the big rename
perf: Do the big rename: Performance Counters -> Performance Events
perf_counter: Rename 'event' to event_id/hw_event
perf_counter: Rename list_entry -> group_entry, counter_list -> group_list
Manually resolved some fairly trivial conflicts with the tracing tree in
include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
Diffstat (limited to 'arch/x86/kernel/cpu')
| -rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.c (renamed from arch/x86/kernel/cpu/perf_counter.c) | 556 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 2 |
4 files changed, 282 insertions, 282 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 8dd30638fe44..68537e957a9b 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
| @@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o | |||
| 27 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o | 27 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
| 28 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | 28 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o |
| 29 | 29 | ||
| 30 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o | 30 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
| 31 | 31 | ||
| 32 | obj-$(CONFIG_X86_MCE) += mcheck/ | 32 | obj-$(CONFIG_X86_MCE) += mcheck/ |
| 33 | obj-$(CONFIG_MTRR) += mtrr/ | 33 | obj-$(CONFIG_MTRR) += mtrr/ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2fea97eccf77..cc25c2b4a567 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
| 14 | 14 | ||
| 15 | #include <asm/stackprotector.h> | 15 | #include <asm/stackprotector.h> |
| 16 | #include <asm/perf_counter.h> | 16 | #include <asm/perf_event.h> |
| 17 | #include <asm/mmu_context.h> | 17 | #include <asm/mmu_context.h> |
| 18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
| 19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
| @@ -869,7 +869,7 @@ void __init identify_boot_cpu(void) | |||
| 869 | #else | 869 | #else |
| 870 | vgetcpu_set_mode(); | 870 | vgetcpu_set_mode(); |
| 871 | #endif | 871 | #endif |
| 872 | init_hw_perf_counters(); | 872 | init_hw_perf_events(); |
| 873 | } | 873 | } |
| 874 | 874 | ||
| 875 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 875 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c index a6c8b27553cd..a3c7adb06b78 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Performance counter x86 architecture code | 2 | * Performance events x86 architecture code |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar |
| @@ -11,7 +11,7 @@ | |||
| 11 | * For licencing details see kernel-base/COPYING | 11 | * For licencing details see kernel-base/COPYING |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/perf_counter.h> | 14 | #include <linux/perf_event.h> |
| 15 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
| 16 | #include <linux/notifier.h> | 16 | #include <linux/notifier.h> |
| 17 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
| @@ -27,10 +27,10 @@ | |||
| 27 | #include <asm/stacktrace.h> | 27 | #include <asm/stacktrace.h> |
| 28 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
| 29 | 29 | ||
| 30 | static u64 perf_counter_mask __read_mostly; | 30 | static u64 perf_event_mask __read_mostly; |
| 31 | 31 | ||
| 32 | /* The maximal number of PEBS counters: */ | 32 | /* The maximal number of PEBS events: */ |
| 33 | #define MAX_PEBS_COUNTERS 4 | 33 | #define MAX_PEBS_EVENTS 4 |
| 34 | 34 | ||
| 35 | /* The size of a BTS record in bytes: */ | 35 | /* The size of a BTS record in bytes: */ |
| 36 | #define BTS_RECORD_SIZE 24 | 36 | #define BTS_RECORD_SIZE 24 |
| @@ -65,11 +65,11 @@ struct debug_store { | |||
| 65 | u64 pebs_index; | 65 | u64 pebs_index; |
| 66 | u64 pebs_absolute_maximum; | 66 | u64 pebs_absolute_maximum; |
| 67 | u64 pebs_interrupt_threshold; | 67 | u64 pebs_interrupt_threshold; |
| 68 | u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; | 68 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | struct cpu_hw_counters { | 71 | struct cpu_hw_events { |
| 72 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 72 | struct perf_event *events[X86_PMC_IDX_MAX]; |
| 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
| 75 | unsigned long interrupts; | 75 | unsigned long interrupts; |
| @@ -86,17 +86,17 @@ struct x86_pmu { | |||
| 86 | int (*handle_irq)(struct pt_regs *); | 86 | int (*handle_irq)(struct pt_regs *); |
| 87 | void (*disable_all)(void); | 87 | void (*disable_all)(void); |
| 88 | void (*enable_all)(void); | 88 | void (*enable_all)(void); |
| 89 | void (*enable)(struct hw_perf_counter *, int); | 89 | void (*enable)(struct hw_perf_event *, int); |
| 90 | void (*disable)(struct hw_perf_counter *, int); | 90 | void (*disable)(struct hw_perf_event *, int); |
| 91 | unsigned eventsel; | 91 | unsigned eventsel; |
| 92 | unsigned perfctr; | 92 | unsigned perfctr; |
| 93 | u64 (*event_map)(int); | 93 | u64 (*event_map)(int); |
| 94 | u64 (*raw_event)(u64); | 94 | u64 (*raw_event)(u64); |
| 95 | int max_events; | 95 | int max_events; |
| 96 | int num_counters; | 96 | int num_events; |
| 97 | int num_counters_fixed; | 97 | int num_events_fixed; |
| 98 | int counter_bits; | 98 | int event_bits; |
| 99 | u64 counter_mask; | 99 | u64 event_mask; |
| 100 | int apic; | 100 | int apic; |
| 101 | u64 max_period; | 101 | u64 max_period; |
| 102 | u64 intel_ctrl; | 102 | u64 intel_ctrl; |
| @@ -106,7 +106,7 @@ struct x86_pmu { | |||
| 106 | 106 | ||
| 107 | static struct x86_pmu x86_pmu __read_mostly; | 107 | static struct x86_pmu x86_pmu __read_mostly; |
| 108 | 108 | ||
| 109 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | 109 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { |
| 110 | .enabled = 1, | 110 | .enabled = 1, |
| 111 | }; | 111 | }; |
| 112 | 112 | ||
| @@ -124,35 +124,35 @@ static const u64 p6_perfmon_event_map[] = | |||
| 124 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | 124 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | static u64 p6_pmu_event_map(int event) | 127 | static u64 p6_pmu_event_map(int hw_event) |
| 128 | { | 128 | { |
| 129 | return p6_perfmon_event_map[event]; | 129 | return p6_perfmon_event_map[hw_event]; |
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | /* | 132 | /* |
| 133 | * Counter setting that is specified not to count anything. | 133 | * Event setting that is specified not to count anything. |
| 134 | * We use this to effectively disable a counter. | 134 | * We use this to effectively disable a counter. |
| 135 | * | 135 | * |
| 136 | * L2_RQSTS with 0 MESI unit mask. | 136 | * L2_RQSTS with 0 MESI unit mask. |
| 137 | */ | 137 | */ |
| 138 | #define P6_NOP_COUNTER 0x0000002EULL | 138 | #define P6_NOP_EVENT 0x0000002EULL |
| 139 | 139 | ||
| 140 | static u64 p6_pmu_raw_event(u64 event) | 140 | static u64 p6_pmu_raw_event(u64 hw_event) |
| 141 | { | 141 | { |
| 142 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | 142 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL |
| 143 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL | 143 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL |
| 144 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL | 144 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL |
| 145 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL | 145 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL |
| 146 | #define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL | 146 | #define P6_EVNTSEL_REG_MASK 0xFF000000ULL |
| 147 | 147 | ||
| 148 | #define P6_EVNTSEL_MASK \ | 148 | #define P6_EVNTSEL_MASK \ |
| 149 | (P6_EVNTSEL_EVENT_MASK | \ | 149 | (P6_EVNTSEL_EVENT_MASK | \ |
| 150 | P6_EVNTSEL_UNIT_MASK | \ | 150 | P6_EVNTSEL_UNIT_MASK | \ |
| 151 | P6_EVNTSEL_EDGE_MASK | \ | 151 | P6_EVNTSEL_EDGE_MASK | \ |
| 152 | P6_EVNTSEL_INV_MASK | \ | 152 | P6_EVNTSEL_INV_MASK | \ |
| 153 | P6_EVNTSEL_COUNTER_MASK) | 153 | P6_EVNTSEL_REG_MASK) |
| 154 | 154 | ||
| 155 | return event & P6_EVNTSEL_MASK; | 155 | return hw_event & P6_EVNTSEL_MASK; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | 158 | ||
| @@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] = | |||
| 170 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | 170 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, |
| 171 | }; | 171 | }; |
| 172 | 172 | ||
| 173 | static u64 intel_pmu_event_map(int event) | 173 | static u64 intel_pmu_event_map(int hw_event) |
| 174 | { | 174 | { |
| 175 | return intel_perfmon_event_map[event]; | 175 | return intel_perfmon_event_map[hw_event]; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | /* | 178 | /* |
| 179 | * Generalized hw caching related event table, filled | 179 | * Generalized hw caching related hw_event table, filled |
| 180 | * in on a per model basis. A value of 0 means | 180 | * in on a per model basis. A value of 0 means |
| 181 | * 'not supported', -1 means 'event makes no sense on | 181 | * 'not supported', -1 means 'hw_event makes no sense on |
| 182 | * this CPU', any other value means the raw event | 182 | * this CPU', any other value means the raw hw_event |
| 183 | * ID. | 183 | * ID. |
| 184 | */ | 184 | */ |
| 185 | 185 | ||
| @@ -463,22 +463,22 @@ static const u64 atom_hw_cache_event_ids | |||
| 463 | }, | 463 | }, |
| 464 | }; | 464 | }; |
| 465 | 465 | ||
| 466 | static u64 intel_pmu_raw_event(u64 event) | 466 | static u64 intel_pmu_raw_event(u64 hw_event) |
| 467 | { | 467 | { |
| 468 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL | 468 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL |
| 469 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL | 469 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL |
| 470 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL | 470 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL |
| 471 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL | 471 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL |
| 472 | #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL | 472 | #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL |
| 473 | 473 | ||
| 474 | #define CORE_EVNTSEL_MASK \ | 474 | #define CORE_EVNTSEL_MASK \ |
| 475 | (CORE_EVNTSEL_EVENT_MASK | \ | 475 | (CORE_EVNTSEL_EVENT_MASK | \ |
| 476 | CORE_EVNTSEL_UNIT_MASK | \ | 476 | CORE_EVNTSEL_UNIT_MASK | \ |
| 477 | CORE_EVNTSEL_EDGE_MASK | \ | 477 | CORE_EVNTSEL_EDGE_MASK | \ |
| 478 | CORE_EVNTSEL_INV_MASK | \ | 478 | CORE_EVNTSEL_INV_MASK | \ |
| 479 | CORE_EVNTSEL_COUNTER_MASK) | 479 | CORE_EVNTSEL_REG_MASK) |
| 480 | 480 | ||
| 481 | return event & CORE_EVNTSEL_MASK; | 481 | return hw_event & CORE_EVNTSEL_MASK; |
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | static const u64 amd_hw_cache_event_ids | 484 | static const u64 amd_hw_cache_event_ids |
| @@ -585,39 +585,39 @@ static const u64 amd_perfmon_event_map[] = | |||
| 585 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | 585 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, |
| 586 | }; | 586 | }; |
| 587 | 587 | ||
| 588 | static u64 amd_pmu_event_map(int event) | 588 | static u64 amd_pmu_event_map(int hw_event) |
| 589 | { | 589 | { |
| 590 | return amd_perfmon_event_map[event]; | 590 | return amd_perfmon_event_map[hw_event]; |
| 591 | } | 591 | } |
| 592 | 592 | ||
| 593 | static u64 amd_pmu_raw_event(u64 event) | 593 | static u64 amd_pmu_raw_event(u64 hw_event) |
| 594 | { | 594 | { |
| 595 | #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL | 595 | #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL |
| 596 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL | 596 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL |
| 597 | #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL | 597 | #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL |
| 598 | #define K7_EVNTSEL_INV_MASK 0x000800000ULL | 598 | #define K7_EVNTSEL_INV_MASK 0x000800000ULL |
| 599 | #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL | 599 | #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL |
| 600 | 600 | ||
| 601 | #define K7_EVNTSEL_MASK \ | 601 | #define K7_EVNTSEL_MASK \ |
| 602 | (K7_EVNTSEL_EVENT_MASK | \ | 602 | (K7_EVNTSEL_EVENT_MASK | \ |
| 603 | K7_EVNTSEL_UNIT_MASK | \ | 603 | K7_EVNTSEL_UNIT_MASK | \ |
| 604 | K7_EVNTSEL_EDGE_MASK | \ | 604 | K7_EVNTSEL_EDGE_MASK | \ |
| 605 | K7_EVNTSEL_INV_MASK | \ | 605 | K7_EVNTSEL_INV_MASK | \ |
| 606 | K7_EVNTSEL_COUNTER_MASK) | 606 | K7_EVNTSEL_REG_MASK) |
| 607 | 607 | ||
| 608 | return event & K7_EVNTSEL_MASK; | 608 | return hw_event & K7_EVNTSEL_MASK; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | /* | 611 | /* |
| 612 | * Propagate counter elapsed time into the generic counter. | 612 | * Propagate event elapsed time into the generic event. |
| 613 | * Can only be executed on the CPU where the counter is active. | 613 | * Can only be executed on the CPU where the event is active. |
| 614 | * Returns the delta events processed. | 614 | * Returns the delta events processed. |
| 615 | */ | 615 | */ |
| 616 | static u64 | 616 | static u64 |
| 617 | x86_perf_counter_update(struct perf_counter *counter, | 617 | x86_perf_event_update(struct perf_event *event, |
| 618 | struct hw_perf_counter *hwc, int idx) | 618 | struct hw_perf_event *hwc, int idx) |
| 619 | { | 619 | { |
| 620 | int shift = 64 - x86_pmu.counter_bits; | 620 | int shift = 64 - x86_pmu.event_bits; |
| 621 | u64 prev_raw_count, new_raw_count; | 621 | u64 prev_raw_count, new_raw_count; |
| 622 | s64 delta; | 622 | s64 delta; |
| 623 | 623 | ||
| @@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter, | |||
| 625 | return 0; | 625 | return 0; |
| 626 | 626 | ||
| 627 | /* | 627 | /* |
| 628 | * Careful: an NMI might modify the previous counter value. | 628 | * Careful: an NMI might modify the previous event value. |
| 629 | * | 629 | * |
| 630 | * Our tactic to handle this is to first atomically read and | 630 | * Our tactic to handle this is to first atomically read and |
| 631 | * exchange a new raw count - then add that new-prev delta | 631 | * exchange a new raw count - then add that new-prev delta |
| 632 | * count to the generic counter atomically: | 632 | * count to the generic event atomically: |
| 633 | */ | 633 | */ |
| 634 | again: | 634 | again: |
| 635 | prev_raw_count = atomic64_read(&hwc->prev_count); | 635 | prev_raw_count = atomic64_read(&hwc->prev_count); |
| 636 | rdmsrl(hwc->counter_base + idx, new_raw_count); | 636 | rdmsrl(hwc->event_base + idx, new_raw_count); |
| 637 | 637 | ||
| 638 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 638 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 639 | new_raw_count) != prev_raw_count) | 639 | new_raw_count) != prev_raw_count) |
| @@ -642,7 +642,7 @@ again: | |||
| 642 | /* | 642 | /* |
| 643 | * Now we have the new raw value and have updated the prev | 643 | * Now we have the new raw value and have updated the prev |
| 644 | * timestamp already. We can now calculate the elapsed delta | 644 | * timestamp already. We can now calculate the elapsed delta |
| 645 | * (counter-)time and add that to the generic counter. | 645 | * (event-)time and add that to the generic event. |
| 646 | * | 646 | * |
| 647 | * Careful, not all hw sign-extends above the physical width | 647 | * Careful, not all hw sign-extends above the physical width |
| 648 | * of the count. | 648 | * of the count. |
| @@ -650,13 +650,13 @@ again: | |||
| 650 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 650 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
| 651 | delta >>= shift; | 651 | delta >>= shift; |
| 652 | 652 | ||
| 653 | atomic64_add(delta, &counter->count); | 653 | atomic64_add(delta, &event->count); |
| 654 | atomic64_sub(delta, &hwc->period_left); | 654 | atomic64_sub(delta, &hwc->period_left); |
| 655 | 655 | ||
| 656 | return new_raw_count; | 656 | return new_raw_count; |
| 657 | } | 657 | } |
| 658 | 658 | ||
| 659 | static atomic_t active_counters; | 659 | static atomic_t active_events; |
| 660 | static DEFINE_MUTEX(pmc_reserve_mutex); | 660 | static DEFINE_MUTEX(pmc_reserve_mutex); |
| 661 | 661 | ||
| 662 | static bool reserve_pmc_hardware(void) | 662 | static bool reserve_pmc_hardware(void) |
| @@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void) | |||
| 667 | if (nmi_watchdog == NMI_LOCAL_APIC) | 667 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 668 | disable_lapic_nmi_watchdog(); | 668 | disable_lapic_nmi_watchdog(); |
| 669 | 669 | ||
| 670 | for (i = 0; i < x86_pmu.num_counters; i++) { | 670 | for (i = 0; i < x86_pmu.num_events; i++) { |
| 671 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 671 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) |
| 672 | goto perfctr_fail; | 672 | goto perfctr_fail; |
| 673 | } | 673 | } |
| 674 | 674 | ||
| 675 | for (i = 0; i < x86_pmu.num_counters; i++) { | 675 | for (i = 0; i < x86_pmu.num_events; i++) { |
| 676 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 676 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) |
| 677 | goto eventsel_fail; | 677 | goto eventsel_fail; |
| 678 | } | 678 | } |
| @@ -685,7 +685,7 @@ eventsel_fail: | |||
| 685 | for (i--; i >= 0; i--) | 685 | for (i--; i >= 0; i--) |
| 686 | release_evntsel_nmi(x86_pmu.eventsel + i); | 686 | release_evntsel_nmi(x86_pmu.eventsel + i); |
| 687 | 687 | ||
| 688 | i = x86_pmu.num_counters; | 688 | i = x86_pmu.num_events; |
| 689 | 689 | ||
| 690 | perfctr_fail: | 690 | perfctr_fail: |
| 691 | for (i--; i >= 0; i--) | 691 | for (i--; i >= 0; i--) |
| @@ -703,7 +703,7 @@ static void release_pmc_hardware(void) | |||
| 703 | #ifdef CONFIG_X86_LOCAL_APIC | 703 | #ifdef CONFIG_X86_LOCAL_APIC |
| 704 | int i; | 704 | int i; |
| 705 | 705 | ||
| 706 | for (i = 0; i < x86_pmu.num_counters; i++) { | 706 | for (i = 0; i < x86_pmu.num_events; i++) { |
| 707 | release_perfctr_nmi(x86_pmu.perfctr + i); | 707 | release_perfctr_nmi(x86_pmu.perfctr + i); |
| 708 | release_evntsel_nmi(x86_pmu.eventsel + i); | 708 | release_evntsel_nmi(x86_pmu.eventsel + i); |
| 709 | } | 709 | } |
| @@ -720,7 +720,7 @@ static inline bool bts_available(void) | |||
| 720 | 720 | ||
| 721 | static inline void init_debug_store_on_cpu(int cpu) | 721 | static inline void init_debug_store_on_cpu(int cpu) |
| 722 | { | 722 | { |
| 723 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | 723 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
| 724 | 724 | ||
| 725 | if (!ds) | 725 | if (!ds) |
| 726 | return; | 726 | return; |
| @@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu) | |||
| 732 | 732 | ||
| 733 | static inline void fini_debug_store_on_cpu(int cpu) | 733 | static inline void fini_debug_store_on_cpu(int cpu) |
| 734 | { | 734 | { |
| 735 | if (!per_cpu(cpu_hw_counters, cpu).ds) | 735 | if (!per_cpu(cpu_hw_events, cpu).ds) |
| 736 | return; | 736 | return; |
| 737 | 737 | ||
| 738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | 738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); |
| @@ -751,12 +751,12 @@ static void release_bts_hardware(void) | |||
| 751 | fini_debug_store_on_cpu(cpu); | 751 | fini_debug_store_on_cpu(cpu); |
| 752 | 752 | ||
| 753 | for_each_possible_cpu(cpu) { | 753 | for_each_possible_cpu(cpu) { |
| 754 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | 754 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
| 755 | 755 | ||
| 756 | if (!ds) | 756 | if (!ds) |
| 757 | continue; | 757 | continue; |
| 758 | 758 | ||
| 759 | per_cpu(cpu_hw_counters, cpu).ds = NULL; | 759 | per_cpu(cpu_hw_events, cpu).ds = NULL; |
| 760 | 760 | ||
| 761 | kfree((void *)(unsigned long)ds->bts_buffer_base); | 761 | kfree((void *)(unsigned long)ds->bts_buffer_base); |
| 762 | kfree(ds); | 762 | kfree(ds); |
| @@ -796,7 +796,7 @@ static int reserve_bts_hardware(void) | |||
| 796 | ds->bts_interrupt_threshold = | 796 | ds->bts_interrupt_threshold = |
| 797 | ds->bts_absolute_maximum - BTS_OVFL_TH; | 797 | ds->bts_absolute_maximum - BTS_OVFL_TH; |
| 798 | 798 | ||
| 799 | per_cpu(cpu_hw_counters, cpu).ds = ds; | 799 | per_cpu(cpu_hw_events, cpu).ds = ds; |
| 800 | err = 0; | 800 | err = 0; |
| 801 | } | 801 | } |
| 802 | 802 | ||
| @@ -812,9 +812,9 @@ static int reserve_bts_hardware(void) | |||
| 812 | return err; | 812 | return err; |
| 813 | } | 813 | } |
| 814 | 814 | ||
| 815 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 815 | static void hw_perf_event_destroy(struct perf_event *event) |
| 816 | { | 816 | { |
| 817 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | 817 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { |
| 818 | release_pmc_hardware(); | 818 | release_pmc_hardware(); |
| 819 | release_bts_hardware(); | 819 | release_bts_hardware(); |
| 820 | mutex_unlock(&pmc_reserve_mutex); | 820 | mutex_unlock(&pmc_reserve_mutex); |
| @@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void) | |||
| 827 | } | 827 | } |
| 828 | 828 | ||
| 829 | static inline int | 829 | static inline int |
| 830 | set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | 830 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) |
| 831 | { | 831 | { |
| 832 | unsigned int cache_type, cache_op, cache_result; | 832 | unsigned int cache_type, cache_op, cache_result; |
| 833 | u64 config, val; | 833 | u64 config, val; |
| @@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config) | |||
| 880 | 880 | ||
| 881 | static void intel_pmu_disable_bts(void) | 881 | static void intel_pmu_disable_bts(void) |
| 882 | { | 882 | { |
| 883 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 883 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 884 | unsigned long debugctlmsr; | 884 | unsigned long debugctlmsr; |
| 885 | 885 | ||
| 886 | if (!cpuc->ds) | 886 | if (!cpuc->ds) |
| @@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void) | |||
| 898 | /* | 898 | /* |
| 899 | * Setup the hardware configuration for a given attr_type | 899 | * Setup the hardware configuration for a given attr_type |
| 900 | */ | 900 | */ |
| 901 | static int __hw_perf_counter_init(struct perf_counter *counter) | 901 | static int __hw_perf_event_init(struct perf_event *event) |
| 902 | { | 902 | { |
| 903 | struct perf_counter_attr *attr = &counter->attr; | 903 | struct perf_event_attr *attr = &event->attr; |
| 904 | struct hw_perf_counter *hwc = &counter->hw; | 904 | struct hw_perf_event *hwc = &event->hw; |
| 905 | u64 config; | 905 | u64 config; |
| 906 | int err; | 906 | int err; |
| 907 | 907 | ||
| @@ -909,22 +909,22 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
| 909 | return -ENODEV; | 909 | return -ENODEV; |
| 910 | 910 | ||
| 911 | err = 0; | 911 | err = 0; |
| 912 | if (!atomic_inc_not_zero(&active_counters)) { | 912 | if (!atomic_inc_not_zero(&active_events)) { |
| 913 | mutex_lock(&pmc_reserve_mutex); | 913 | mutex_lock(&pmc_reserve_mutex); |
| 914 | if (atomic_read(&active_counters) == 0) { | 914 | if (atomic_read(&active_events) == 0) { |
| 915 | if (!reserve_pmc_hardware()) | 915 | if (!reserve_pmc_hardware()) |
| 916 | err = -EBUSY; | 916 | err = -EBUSY; |
| 917 | else | 917 | else |
| 918 | err = reserve_bts_hardware(); | 918 | err = reserve_bts_hardware(); |
| 919 | } | 919 | } |
| 920 | if (!err) | 920 | if (!err) |
| 921 | atomic_inc(&active_counters); | 921 | atomic_inc(&active_events); |
| 922 | mutex_unlock(&pmc_reserve_mutex); | 922 | mutex_unlock(&pmc_reserve_mutex); |
| 923 | } | 923 | } |
| 924 | if (err) | 924 | if (err) |
| 925 | return err; | 925 | return err; |
| 926 | 926 | ||
| 927 | counter->destroy = hw_perf_counter_destroy; | 927 | event->destroy = hw_perf_event_destroy; |
| 928 | 928 | ||
| 929 | /* | 929 | /* |
| 930 | * Generate PMC IRQs: | 930 | * Generate PMC IRQs: |
| @@ -948,15 +948,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
| 948 | /* | 948 | /* |
| 949 | * If we have a PMU initialized but no APIC | 949 | * If we have a PMU initialized but no APIC |
| 950 | * interrupts, we cannot sample hardware | 950 | * interrupts, we cannot sample hardware |
| 951 | * counters (user-space has to fall back and | 951 | * events (user-space has to fall back and |
| 952 | * sample via a hrtimer based software counter): | 952 | * sample via a hrtimer based software event): |
| 953 | */ | 953 | */ |
| 954 | if (!x86_pmu.apic) | 954 | if (!x86_pmu.apic) |
| 955 | return -EOPNOTSUPP; | 955 | return -EOPNOTSUPP; |
| 956 | } | 956 | } |
| 957 | 957 | ||
| 958 | /* | 958 | /* |
| 959 | * Raw event type provide the config in the event structure | 959 | * Raw hw_event type provide the config in the hw_event structure |
| 960 | */ | 960 | */ |
| 961 | if (attr->type == PERF_TYPE_RAW) { | 961 | if (attr->type == PERF_TYPE_RAW) { |
| 962 | hwc->config |= x86_pmu.raw_event(attr->config); | 962 | hwc->config |= x86_pmu.raw_event(attr->config); |
| @@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
| 1001 | 1001 | ||
| 1002 | static void p6_pmu_disable_all(void) | 1002 | static void p6_pmu_disable_all(void) |
| 1003 | { | 1003 | { |
| 1004 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1004 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1005 | u64 val; | 1005 | u64 val; |
| 1006 | 1006 | ||
| 1007 | if (!cpuc->enabled) | 1007 | if (!cpuc->enabled) |
| @@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void) | |||
| 1018 | 1018 | ||
| 1019 | static void intel_pmu_disable_all(void) | 1019 | static void intel_pmu_disable_all(void) |
| 1020 | { | 1020 | { |
| 1021 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1021 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1022 | 1022 | ||
| 1023 | if (!cpuc->enabled) | 1023 | if (!cpuc->enabled) |
| 1024 | return; | 1024 | return; |
| @@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void) | |||
| 1034 | 1034 | ||
| 1035 | static void amd_pmu_disable_all(void) | 1035 | static void amd_pmu_disable_all(void) |
| 1036 | { | 1036 | { |
| 1037 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1037 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1038 | int idx; | 1038 | int idx; |
| 1039 | 1039 | ||
| 1040 | if (!cpuc->enabled) | 1040 | if (!cpuc->enabled) |
| @@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void) | |||
| 1043 | cpuc->enabled = 0; | 1043 | cpuc->enabled = 0; |
| 1044 | /* | 1044 | /* |
| 1045 | * ensure we write the disable before we start disabling the | 1045 | * ensure we write the disable before we start disabling the |
| 1046 | * counters proper, so that amd_pmu_enable_counter() does the | 1046 | * events proper, so that amd_pmu_enable_event() does the |
| 1047 | * right thing. | 1047 | * right thing. |
| 1048 | */ | 1048 | */ |
| 1049 | barrier(); | 1049 | barrier(); |
| 1050 | 1050 | ||
| 1051 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1051 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
| 1052 | u64 val; | 1052 | u64 val; |
| 1053 | 1053 | ||
| 1054 | if (!test_bit(idx, cpuc->active_mask)) | 1054 | if (!test_bit(idx, cpuc->active_mask)) |
| @@ -1070,7 +1070,7 @@ void hw_perf_disable(void) | |||
| 1070 | 1070 | ||
| 1071 | static void p6_pmu_enable_all(void) | 1071 | static void p6_pmu_enable_all(void) |
| 1072 | { | 1072 | { |
| 1073 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1073 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1074 | unsigned long val; | 1074 | unsigned long val; |
| 1075 | 1075 | ||
| 1076 | if (cpuc->enabled) | 1076 | if (cpuc->enabled) |
| @@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void) | |||
| 1087 | 1087 | ||
| 1088 | static void intel_pmu_enable_all(void) | 1088 | static void intel_pmu_enable_all(void) |
| 1089 | { | 1089 | { |
| 1090 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1090 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1091 | 1091 | ||
| 1092 | if (cpuc->enabled) | 1092 | if (cpuc->enabled) |
| 1093 | return; | 1093 | return; |
| @@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void) | |||
| 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
| 1099 | 1099 | ||
| 1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | 1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { |
| 1101 | struct perf_counter *counter = | 1101 | struct perf_event *event = |
| 1102 | cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | 1102 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; |
| 1103 | 1103 | ||
| 1104 | if (WARN_ON_ONCE(!counter)) | 1104 | if (WARN_ON_ONCE(!event)) |
| 1105 | return; | 1105 | return; |
| 1106 | 1106 | ||
| 1107 | intel_pmu_enable_bts(counter->hw.config); | 1107 | intel_pmu_enable_bts(event->hw.config); |
| 1108 | } | 1108 | } |
| 1109 | } | 1109 | } |
| 1110 | 1110 | ||
| 1111 | static void amd_pmu_enable_all(void) | 1111 | static void amd_pmu_enable_all(void) |
| 1112 | { | 1112 | { |
| 1113 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1113 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1114 | int idx; | 1114 | int idx; |
| 1115 | 1115 | ||
| 1116 | if (cpuc->enabled) | 1116 | if (cpuc->enabled) |
| @@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void) | |||
| 1119 | cpuc->enabled = 1; | 1119 | cpuc->enabled = 1; |
| 1120 | barrier(); | 1120 | barrier(); |
| 1121 | 1121 | ||
| 1122 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1122 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
| 1123 | struct perf_counter *counter = cpuc->counters[idx]; | 1123 | struct perf_event *event = cpuc->events[idx]; |
| 1124 | u64 val; | 1124 | u64 val; |
| 1125 | 1125 | ||
| 1126 | if (!test_bit(idx, cpuc->active_mask)) | 1126 | if (!test_bit(idx, cpuc->active_mask)) |
| 1127 | continue; | 1127 | continue; |
| 1128 | 1128 | ||
| 1129 | val = counter->hw.config; | 1129 | val = event->hw.config; |
| 1130 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 1130 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| 1131 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | 1131 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
| 1132 | } | 1132 | } |
| @@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
| 1153 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | 1153 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); |
| 1154 | } | 1154 | } |
| 1155 | 1155 | ||
| 1156 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1156 | static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
| 1157 | { | 1157 | { |
| 1158 | (void)checking_wrmsrl(hwc->config_base + idx, | 1158 | (void)checking_wrmsrl(hwc->config_base + idx, |
| 1159 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 1159 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
| 1160 | } | 1160 | } |
| 1161 | 1161 | ||
| 1162 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1162 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 1163 | { | 1163 | { |
| 1164 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); | 1164 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); |
| 1165 | } | 1165 | } |
| 1166 | 1166 | ||
| 1167 | static inline void | 1167 | static inline void |
| 1168 | intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) | 1168 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) |
| 1169 | { | 1169 | { |
| 1170 | int idx = __idx - X86_PMC_IDX_FIXED; | 1170 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 1171 | u64 ctrl_val, mask; | 1171 | u64 ctrl_val, mask; |
| @@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
| 1178 | } | 1178 | } |
| 1179 | 1179 | ||
| 1180 | static inline void | 1180 | static inline void |
| 1181 | p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1181 | p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 1182 | { | 1182 | { |
| 1183 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1183 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1184 | u64 val = P6_NOP_COUNTER; | 1184 | u64 val = P6_NOP_EVENT; |
| 1185 | 1185 | ||
| 1186 | if (cpuc->enabled) | 1186 | if (cpuc->enabled) |
| 1187 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 1187 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
| @@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
| 1190 | } | 1190 | } |
| 1191 | 1191 | ||
| 1192 | static inline void | 1192 | static inline void |
| 1193 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1193 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 1194 | { | 1194 | { |
| 1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { |
| 1196 | intel_pmu_disable_bts(); | 1196 | intel_pmu_disable_bts(); |
| @@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
| 1202 | return; | 1202 | return; |
| 1203 | } | 1203 | } |
| 1204 | 1204 | ||
| 1205 | x86_pmu_disable_counter(hwc, idx); | 1205 | x86_pmu_disable_event(hwc, idx); |
| 1206 | } | 1206 | } |
| 1207 | 1207 | ||
| 1208 | static inline void | 1208 | static inline void |
| 1209 | amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1209 | amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 1210 | { | 1210 | { |
| 1211 | x86_pmu_disable_counter(hwc, idx); | 1211 | x86_pmu_disable_event(hwc, idx); |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| 1214 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 1214 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
| 1215 | 1215 | ||
| 1216 | /* | 1216 | /* |
| 1217 | * Set the next IRQ period, based on the hwc->period_left value. | 1217 | * Set the next IRQ period, based on the hwc->period_left value. |
| 1218 | * To be called with the counter disabled in hw: | 1218 | * To be called with the event disabled in hw: |
| 1219 | */ | 1219 | */ |
| 1220 | static int | 1220 | static int |
| 1221 | x86_perf_counter_set_period(struct perf_counter *counter, | 1221 | x86_perf_event_set_period(struct perf_event *event, |
| 1222 | struct hw_perf_counter *hwc, int idx) | 1222 | struct hw_perf_event *hwc, int idx) |
| 1223 | { | 1223 | { |
| 1224 | s64 left = atomic64_read(&hwc->period_left); | 1224 | s64 left = atomic64_read(&hwc->period_left); |
| 1225 | s64 period = hwc->sample_period; | 1225 | s64 period = hwc->sample_period; |
| @@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
| 1245 | ret = 1; | 1245 | ret = 1; |
| 1246 | } | 1246 | } |
| 1247 | /* | 1247 | /* |
| 1248 | * Quirk: certain CPUs dont like it if just 1 event is left: | 1248 | * Quirk: certain CPUs dont like it if just 1 hw_event is left: |
| 1249 | */ | 1249 | */ |
| 1250 | if (unlikely(left < 2)) | 1250 | if (unlikely(left < 2)) |
| 1251 | left = 2; | 1251 | left = 2; |
| @@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
| 1256 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; | 1256 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; |
| 1257 | 1257 | ||
| 1258 | /* | 1258 | /* |
| 1259 | * The hw counter starts counting from this counter offset, | 1259 | * The hw event starts counting from this event offset, |
| 1260 | * mark it to be able to extra future deltas: | 1260 | * mark it to be able to extra future deltas: |
| 1261 | */ | 1261 | */ |
| 1262 | atomic64_set(&hwc->prev_count, (u64)-left); | 1262 | atomic64_set(&hwc->prev_count, (u64)-left); |
| 1263 | 1263 | ||
| 1264 | err = checking_wrmsrl(hwc->counter_base + idx, | 1264 | err = checking_wrmsrl(hwc->event_base + idx, |
| 1265 | (u64)(-left) & x86_pmu.counter_mask); | 1265 | (u64)(-left) & x86_pmu.event_mask); |
| 1266 | 1266 | ||
| 1267 | perf_counter_update_userpage(counter); | 1267 | perf_event_update_userpage(event); |
| 1268 | 1268 | ||
| 1269 | return ret; | 1269 | return ret; |
| 1270 | } | 1270 | } |
| 1271 | 1271 | ||
| 1272 | static inline void | 1272 | static inline void |
| 1273 | intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | 1273 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) |
| 1274 | { | 1274 | { |
| 1275 | int idx = __idx - X86_PMC_IDX_FIXED; | 1275 | int idx = __idx - X86_PMC_IDX_FIXED; |
| 1276 | u64 ctrl_val, bits, mask; | 1276 | u64 ctrl_val, bits, mask; |
| @@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
| 1295 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 1295 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1298 | static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
| 1299 | { | 1299 | { |
| 1300 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1300 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1301 | u64 val; | 1301 | u64 val; |
| 1302 | 1302 | ||
| 1303 | val = hwc->config; | 1303 | val = hwc->config; |
| @@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
| 1310 | 1310 | ||
| 1311 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1311 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
| 1312 | { | 1312 | { |
| 1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { |
| 1314 | if (!__get_cpu_var(cpu_hw_counters).enabled) | 1314 | if (!__get_cpu_var(cpu_hw_events).enabled) |
| 1315 | return; | 1315 | return; |
| 1316 | 1316 | ||
| 1317 | intel_pmu_enable_bts(hwc->config); | 1317 | intel_pmu_enable_bts(hwc->config); |
| @@ -1323,134 +1323,134 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
| 1323 | return; | 1323 | return; |
| 1324 | } | 1324 | } |
| 1325 | 1325 | ||
| 1326 | x86_pmu_enable_counter(hwc, idx); | 1326 | x86_pmu_enable_event(hwc, idx); |
| 1327 | } | 1327 | } |
| 1328 | 1328 | ||
| 1329 | static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1329 | static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) |
| 1330 | { | 1330 | { |
| 1331 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1331 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1332 | 1332 | ||
| 1333 | if (cpuc->enabled) | 1333 | if (cpuc->enabled) |
| 1334 | x86_pmu_enable_counter(hwc, idx); | 1334 | x86_pmu_enable_event(hwc, idx); |
| 1335 | } | 1335 | } |
| 1336 | 1336 | ||
| 1337 | static int | 1337 | static int |
| 1338 | fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | 1338 | fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) |
| 1339 | { | 1339 | { |
| 1340 | unsigned int event; | 1340 | unsigned int hw_event; |
| 1341 | 1341 | ||
| 1342 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | 1342 | hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
| 1343 | 1343 | ||
| 1344 | if (unlikely((event == | 1344 | if (unlikely((hw_event == |
| 1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | 1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && |
| 1346 | (hwc->sample_period == 1))) | 1346 | (hwc->sample_period == 1))) |
| 1347 | return X86_PMC_IDX_FIXED_BTS; | 1347 | return X86_PMC_IDX_FIXED_BTS; |
| 1348 | 1348 | ||
| 1349 | if (!x86_pmu.num_counters_fixed) | 1349 | if (!x86_pmu.num_events_fixed) |
| 1350 | return -1; | 1350 | return -1; |
| 1351 | 1351 | ||
| 1352 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1352 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
| 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
| 1354 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1354 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
| 1355 | return X86_PMC_IDX_FIXED_CPU_CYCLES; | 1355 | return X86_PMC_IDX_FIXED_CPU_CYCLES; |
| 1356 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) | 1356 | if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) |
| 1357 | return X86_PMC_IDX_FIXED_BUS_CYCLES; | 1357 | return X86_PMC_IDX_FIXED_BUS_CYCLES; |
| 1358 | 1358 | ||
| 1359 | return -1; | 1359 | return -1; |
| 1360 | } | 1360 | } |
| 1361 | 1361 | ||
| 1362 | /* | 1362 | /* |
| 1363 | * Find a PMC slot for the freshly enabled / scheduled in counter: | 1363 | * Find a PMC slot for the freshly enabled / scheduled in event: |
| 1364 | */ | 1364 | */ |
| 1365 | static int x86_pmu_enable(struct perf_counter *counter) | 1365 | static int x86_pmu_enable(struct perf_event *event) |
| 1366 | { | 1366 | { |
| 1367 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1367 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1368 | struct hw_perf_counter *hwc = &counter->hw; | 1368 | struct hw_perf_event *hwc = &event->hw; |
| 1369 | int idx; | 1369 | int idx; |
| 1370 | 1370 | ||
| 1371 | idx = fixed_mode_idx(counter, hwc); | 1371 | idx = fixed_mode_idx(event, hwc); |
| 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { | 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
| 1373 | /* BTS is already occupied. */ | 1373 | /* BTS is already occupied. */ |
| 1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | 1374 | if (test_and_set_bit(idx, cpuc->used_mask)) |
| 1375 | return -EAGAIN; | 1375 | return -EAGAIN; |
| 1376 | 1376 | ||
| 1377 | hwc->config_base = 0; | 1377 | hwc->config_base = 0; |
| 1378 | hwc->counter_base = 0; | 1378 | hwc->event_base = 0; |
| 1379 | hwc->idx = idx; | 1379 | hwc->idx = idx; |
| 1380 | } else if (idx >= 0) { | 1380 | } else if (idx >= 0) { |
| 1381 | /* | 1381 | /* |
| 1382 | * Try to get the fixed counter, if that is already taken | 1382 | * Try to get the fixed event, if that is already taken |
| 1383 | * then try to get a generic counter: | 1383 | * then try to get a generic event: |
| 1384 | */ | 1384 | */ |
| 1385 | if (test_and_set_bit(idx, cpuc->used_mask)) | 1385 | if (test_and_set_bit(idx, cpuc->used_mask)) |
| 1386 | goto try_generic; | 1386 | goto try_generic; |
| 1387 | 1387 | ||
| 1388 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | 1388 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
| 1389 | /* | 1389 | /* |
| 1390 | * We set it so that counter_base + idx in wrmsr/rdmsr maps to | 1390 | * We set it so that event_base + idx in wrmsr/rdmsr maps to |
| 1391 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | 1391 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: |
| 1392 | */ | 1392 | */ |
| 1393 | hwc->counter_base = | 1393 | hwc->event_base = |
| 1394 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | 1394 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; |
| 1395 | hwc->idx = idx; | 1395 | hwc->idx = idx; |
| 1396 | } else { | 1396 | } else { |
| 1397 | idx = hwc->idx; | 1397 | idx = hwc->idx; |
| 1398 | /* Try to get the previous generic counter again */ | 1398 | /* Try to get the previous generic event again */ |
| 1399 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 1399 | if (test_and_set_bit(idx, cpuc->used_mask)) { |
| 1400 | try_generic: | 1400 | try_generic: |
| 1401 | idx = find_first_zero_bit(cpuc->used_mask, | 1401 | idx = find_first_zero_bit(cpuc->used_mask, |
| 1402 | x86_pmu.num_counters); | 1402 | x86_pmu.num_events); |
| 1403 | if (idx == x86_pmu.num_counters) | 1403 | if (idx == x86_pmu.num_events) |
| 1404 | return -EAGAIN; | 1404 | return -EAGAIN; |
| 1405 | 1405 | ||
| 1406 | set_bit(idx, cpuc->used_mask); | 1406 | set_bit(idx, cpuc->used_mask); |
| 1407 | hwc->idx = idx; | 1407 | hwc->idx = idx; |
| 1408 | } | 1408 | } |
| 1409 | hwc->config_base = x86_pmu.eventsel; | 1409 | hwc->config_base = x86_pmu.eventsel; |
| 1410 | hwc->counter_base = x86_pmu.perfctr; | 1410 | hwc->event_base = x86_pmu.perfctr; |
| 1411 | } | 1411 | } |
| 1412 | 1412 | ||
| 1413 | perf_counters_lapic_init(); | 1413 | perf_events_lapic_init(); |
| 1414 | 1414 | ||
| 1415 | x86_pmu.disable(hwc, idx); | 1415 | x86_pmu.disable(hwc, idx); |
| 1416 | 1416 | ||
| 1417 | cpuc->counters[idx] = counter; | 1417 | cpuc->events[idx] = event; |
| 1418 | set_bit(idx, cpuc->active_mask); | 1418 | set_bit(idx, cpuc->active_mask); |
| 1419 | 1419 | ||
| 1420 | x86_perf_counter_set_period(counter, hwc, idx); | 1420 | x86_perf_event_set_period(event, hwc, idx); |
| 1421 | x86_pmu.enable(hwc, idx); | 1421 | x86_pmu.enable(hwc, idx); |
| 1422 | 1422 | ||
| 1423 | perf_counter_update_userpage(counter); | 1423 | perf_event_update_userpage(event); |
| 1424 | 1424 | ||
| 1425 | return 0; | 1425 | return 0; |
| 1426 | } | 1426 | } |
| 1427 | 1427 | ||
| 1428 | static void x86_pmu_unthrottle(struct perf_counter *counter) | 1428 | static void x86_pmu_unthrottle(struct perf_event *event) |
| 1429 | { | 1429 | { |
| 1430 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1430 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1431 | struct hw_perf_counter *hwc = &counter->hw; | 1431 | struct hw_perf_event *hwc = &event->hw; |
| 1432 | 1432 | ||
| 1433 | if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || | 1433 | if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || |
| 1434 | cpuc->counters[hwc->idx] != counter)) | 1434 | cpuc->events[hwc->idx] != event)) |
| 1435 | return; | 1435 | return; |
| 1436 | 1436 | ||
| 1437 | x86_pmu.enable(hwc, hwc->idx); | 1437 | x86_pmu.enable(hwc, hwc->idx); |
| 1438 | } | 1438 | } |
| 1439 | 1439 | ||
| 1440 | void perf_counter_print_debug(void) | 1440 | void perf_event_print_debug(void) |
| 1441 | { | 1441 | { |
| 1442 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; | 1442 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; |
| 1443 | struct cpu_hw_counters *cpuc; | 1443 | struct cpu_hw_events *cpuc; |
| 1444 | unsigned long flags; | 1444 | unsigned long flags; |
| 1445 | int cpu, idx; | 1445 | int cpu, idx; |
| 1446 | 1446 | ||
| 1447 | if (!x86_pmu.num_counters) | 1447 | if (!x86_pmu.num_events) |
| 1448 | return; | 1448 | return; |
| 1449 | 1449 | ||
| 1450 | local_irq_save(flags); | 1450 | local_irq_save(flags); |
| 1451 | 1451 | ||
| 1452 | cpu = smp_processor_id(); | 1452 | cpu = smp_processor_id(); |
| 1453 | cpuc = &per_cpu(cpu_hw_counters, cpu); | 1453 | cpuc = &per_cpu(cpu_hw_events, cpu); |
| 1454 | 1454 | ||
| 1455 | if (x86_pmu.version >= 2) { | 1455 | if (x86_pmu.version >= 2) { |
| 1456 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); | 1456 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
| @@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void) | |||
| 1466 | } | 1466 | } |
| 1467 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); | 1467 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); |
| 1468 | 1468 | ||
| 1469 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1469 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
| 1470 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1470 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); |
| 1471 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1471 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); |
| 1472 | 1472 | ||
| @@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void) | |||
| 1479 | pr_info("CPU#%d: gen-PMC%d left: %016llx\n", | 1479 | pr_info("CPU#%d: gen-PMC%d left: %016llx\n", |
| 1480 | cpu, idx, prev_left); | 1480 | cpu, idx, prev_left); |
| 1481 | } | 1481 | } |
| 1482 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1482 | for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { |
| 1483 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); | 1483 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); |
| 1484 | 1484 | ||
| 1485 | pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", | 1485 | pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", |
| @@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void) | |||
| 1488 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
| 1489 | } | 1489 | } |
| 1490 | 1490 | ||
| 1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) | 1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) |
| 1492 | { | 1492 | { |
| 1493 | struct debug_store *ds = cpuc->ds; | 1493 | struct debug_store *ds = cpuc->ds; |
| 1494 | struct bts_record { | 1494 | struct bts_record { |
| @@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) | |||
| 1496 | u64 to; | 1496 | u64 to; |
| 1497 | u64 flags; | 1497 | u64 flags; |
| 1498 | }; | 1498 | }; |
| 1499 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | 1499 | struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; |
| 1500 | struct bts_record *at, *top; | 1500 | struct bts_record *at, *top; |
| 1501 | struct perf_output_handle handle; | 1501 | struct perf_output_handle handle; |
| 1502 | struct perf_event_header header; | 1502 | struct perf_event_header header; |
| 1503 | struct perf_sample_data data; | 1503 | struct perf_sample_data data; |
| 1504 | struct pt_regs regs; | 1504 | struct pt_regs regs; |
| 1505 | 1505 | ||
| 1506 | if (!counter) | 1506 | if (!event) |
| 1507 | return; | 1507 | return; |
| 1508 | 1508 | ||
| 1509 | if (!ds) | 1509 | if (!ds) |
| @@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) | |||
| 1518 | ds->bts_index = ds->bts_buffer_base; | 1518 | ds->bts_index = ds->bts_buffer_base; |
| 1519 | 1519 | ||
| 1520 | 1520 | ||
| 1521 | data.period = counter->hw.last_period; | 1521 | data.period = event->hw.last_period; |
| 1522 | data.addr = 0; | 1522 | data.addr = 0; |
| 1523 | regs.ip = 0; | 1523 | regs.ip = 0; |
| 1524 | 1524 | ||
| @@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) | |||
| 1527 | * We will overwrite the from and to address before we output | 1527 | * We will overwrite the from and to address before we output |
| 1528 | * the sample. | 1528 | * the sample. |
| 1529 | */ | 1529 | */ |
| 1530 | perf_prepare_sample(&header, &data, counter, ®s); | 1530 | perf_prepare_sample(&header, &data, event, ®s); |
| 1531 | 1531 | ||
| 1532 | if (perf_output_begin(&handle, counter, | 1532 | if (perf_output_begin(&handle, event, |
| 1533 | header.size * (top - at), 1, 1)) | 1533 | header.size * (top - at), 1, 1)) |
| 1534 | return; | 1534 | return; |
| 1535 | 1535 | ||
| @@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) | |||
| 1537 | data.ip = at->from; | 1537 | data.ip = at->from; |
| 1538 | data.addr = at->to; | 1538 | data.addr = at->to; |
| 1539 | 1539 | ||
| 1540 | perf_output_sample(&handle, &header, &data, counter); | 1540 | perf_output_sample(&handle, &header, &data, event); |
| 1541 | } | 1541 | } |
| 1542 | 1542 | ||
| 1543 | perf_output_end(&handle); | 1543 | perf_output_end(&handle); |
| 1544 | 1544 | ||
| 1545 | /* There's new data available. */ | 1545 | /* There's new data available. */ |
| 1546 | counter->hw.interrupts++; | 1546 | event->hw.interrupts++; |
| 1547 | counter->pending_kill = POLL_IN; | 1547 | event->pending_kill = POLL_IN; |
| 1548 | } | 1548 | } |
| 1549 | 1549 | ||
| 1550 | static void x86_pmu_disable(struct perf_counter *counter) | 1550 | static void x86_pmu_disable(struct perf_event *event) |
| 1551 | { | 1551 | { |
| 1552 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1552 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 1553 | struct hw_perf_counter *hwc = &counter->hw; | 1553 | struct hw_perf_event *hwc = &event->hw; |
| 1554 | int idx = hwc->idx; | 1554 | int idx = hwc->idx; |
| 1555 | 1555 | ||
| 1556 | /* | 1556 | /* |
| @@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
| 1562 | 1562 | ||
| 1563 | /* | 1563 | /* |
| 1564 | * Make sure the cleared pointer becomes visible before we | 1564 | * Make sure the cleared pointer becomes visible before we |
| 1565 | * (potentially) free the counter: | 1565 | * (potentially) free the event: |
| 1566 | */ | 1566 | */ |
| 1567 | barrier(); | 1567 | barrier(); |
| 1568 | 1568 | ||
| 1569 | /* | 1569 | /* |
| 1570 | * Drain the remaining delta count out of a counter | 1570 | * Drain the remaining delta count out of a event |
| 1571 | * that we are disabling: | 1571 | * that we are disabling: |
| 1572 | */ | 1572 | */ |
| 1573 | x86_perf_counter_update(counter, hwc, idx); | 1573 | x86_perf_event_update(event, hwc, idx); |
| 1574 | 1574 | ||
| 1575 | /* Drain the remaining BTS records. */ | 1575 | /* Drain the remaining BTS records. */ |
| 1576 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) | 1576 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) |
| 1577 | intel_pmu_drain_bts_buffer(cpuc); | 1577 | intel_pmu_drain_bts_buffer(cpuc); |
| 1578 | 1578 | ||
| 1579 | cpuc->counters[idx] = NULL; | 1579 | cpuc->events[idx] = NULL; |
| 1580 | clear_bit(idx, cpuc->used_mask); | 1580 | clear_bit(idx, cpuc->used_mask); |
| 1581 | 1581 | ||
| 1582 | perf_counter_update_userpage(counter); | 1582 | perf_event_update_userpage(event); |
| 1583 | } | 1583 | } |
| 1584 | 1584 | ||
| 1585 | /* | 1585 | /* |
| 1586 | * Save and restart an expired counter. Called by NMI contexts, | 1586 | * Save and restart an expired event. Called by NMI contexts, |
| 1587 | * so it has to be careful about preempting normal counter ops: | 1587 | * so it has to be careful about preempting normal event ops: |
| 1588 | */ | 1588 | */ |
| 1589 | static int intel_pmu_save_and_restart(struct perf_counter *counter) | 1589 | static int intel_pmu_save_and_restart(struct perf_event *event) |
| 1590 | { | 1590 | { |
| 1591 | struct hw_perf_counter *hwc = &counter->hw; | 1591 | struct hw_perf_event *hwc = &event->hw; |
| 1592 | int idx = hwc->idx; | 1592 | int idx = hwc->idx; |
| 1593 | int ret; | 1593 | int ret; |
| 1594 | 1594 | ||
| 1595 | x86_perf_counter_update(counter, hwc, idx); | 1595 | x86_perf_event_update(event, hwc, idx); |
| 1596 | ret = x86_perf_counter_set_period(counter, hwc, idx); | 1596 | ret = x86_perf_event_set_period(event, hwc, idx); |
| 1597 | 1597 | ||
| 1598 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 1598 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
| 1599 | intel_pmu_enable_counter(hwc, idx); | 1599 | intel_pmu_enable_event(hwc, idx); |
| 1600 | 1600 | ||
| 1601 | return ret; | 1601 | return ret; |
| 1602 | } | 1602 | } |
| 1603 | 1603 | ||
| 1604 | static void intel_pmu_reset(void) | 1604 | static void intel_pmu_reset(void) |
| 1605 | { | 1605 | { |
| 1606 | struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; | 1606 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; |
| 1607 | unsigned long flags; | 1607 | unsigned long flags; |
| 1608 | int idx; | 1608 | int idx; |
| 1609 | 1609 | ||
| 1610 | if (!x86_pmu.num_counters) | 1610 | if (!x86_pmu.num_events) |
| 1611 | return; | 1611 | return; |
| 1612 | 1612 | ||
| 1613 | local_irq_save(flags); | 1613 | local_irq_save(flags); |
| 1614 | 1614 | ||
| 1615 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | 1615 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); |
| 1616 | 1616 | ||
| 1617 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1617 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
| 1618 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | 1618 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); |
| 1619 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | 1619 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); |
| 1620 | } | 1620 | } |
| 1621 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1621 | for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { |
| 1622 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1622 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
| 1623 | } | 1623 | } |
| 1624 | if (ds) | 1624 | if (ds) |
| @@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void) | |||
| 1630 | static int p6_pmu_handle_irq(struct pt_regs *regs) | 1630 | static int p6_pmu_handle_irq(struct pt_regs *regs) |
| 1631 | { | 1631 | { |
| 1632 | struct perf_sample_data data; | 1632 | struct perf_sample_data data; |
| 1633 | struct cpu_hw_counters *cpuc; | 1633 | struct cpu_hw_events *cpuc; |
| 1634 | struct perf_counter *counter; | 1634 | struct perf_event *event; |
| 1635 | struct hw_perf_counter *hwc; | 1635 | struct hw_perf_event *hwc; |
| 1636 | int idx, handled = 0; | 1636 | int idx, handled = 0; |
| 1637 | u64 val; | 1637 | u64 val; |
| 1638 | 1638 | ||
| 1639 | data.addr = 0; | 1639 | data.addr = 0; |
| 1640 | 1640 | ||
| 1641 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1641 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 1642 | 1642 | ||
| 1643 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1643 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
| 1644 | if (!test_bit(idx, cpuc->active_mask)) | 1644 | if (!test_bit(idx, cpuc->active_mask)) |
| 1645 | continue; | 1645 | continue; |
| 1646 | 1646 | ||
| 1647 | counter = cpuc->counters[idx]; | 1647 | event = cpuc->events[idx]; |
| 1648 | hwc = &counter->hw; | 1648 | hwc = &event->hw; |
| 1649 | 1649 | ||
| 1650 | val = x86_perf_counter_update(counter, hwc, idx); | 1650 | val = x86_perf_event_update(event, hwc, idx); |
| 1651 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 1651 | if (val & (1ULL << (x86_pmu.event_bits - 1))) |
| 1652 | continue; | 1652 | continue; |
| 1653 | 1653 | ||
| 1654 | /* | 1654 | /* |
| 1655 | * counter overflow | 1655 | * event overflow |
| 1656 | */ | 1656 | */ |
| 1657 | handled = 1; | 1657 | handled = 1; |
| 1658 | data.period = counter->hw.last_period; | 1658 | data.period = event->hw.last_period; |
| 1659 | 1659 | ||
| 1660 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | 1660 | if (!x86_perf_event_set_period(event, hwc, idx)) |
| 1661 | continue; | 1661 | continue; |
| 1662 | 1662 | ||
| 1663 | if (perf_counter_overflow(counter, 1, &data, regs)) | 1663 | if (perf_event_overflow(event, 1, &data, regs)) |
| 1664 | p6_pmu_disable_counter(hwc, idx); | 1664 | p6_pmu_disable_event(hwc, idx); |
| 1665 | } | 1665 | } |
| 1666 | 1666 | ||
| 1667 | if (handled) | 1667 | if (handled) |
| @@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) | |||
| 1677 | static int intel_pmu_handle_irq(struct pt_regs *regs) | 1677 | static int intel_pmu_handle_irq(struct pt_regs *regs) |
| 1678 | { | 1678 | { |
| 1679 | struct perf_sample_data data; | 1679 | struct perf_sample_data data; |
| 1680 | struct cpu_hw_counters *cpuc; | 1680 | struct cpu_hw_events *cpuc; |
| 1681 | int bit, loops; | 1681 | int bit, loops; |
| 1682 | u64 ack, status; | 1682 | u64 ack, status; |
| 1683 | 1683 | ||
| 1684 | data.addr = 0; | 1684 | data.addr = 0; |
| 1685 | 1685 | ||
| 1686 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1686 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 1687 | 1687 | ||
| 1688 | perf_disable(); | 1688 | perf_disable(); |
| 1689 | intel_pmu_drain_bts_buffer(cpuc); | 1689 | intel_pmu_drain_bts_buffer(cpuc); |
| @@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 1696 | loops = 0; | 1696 | loops = 0; |
| 1697 | again: | 1697 | again: |
| 1698 | if (++loops > 100) { | 1698 | if (++loops > 100) { |
| 1699 | WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); | 1699 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
| 1700 | perf_counter_print_debug(); | 1700 | perf_event_print_debug(); |
| 1701 | intel_pmu_reset(); | 1701 | intel_pmu_reset(); |
| 1702 | perf_enable(); | 1702 | perf_enable(); |
| 1703 | return 1; | 1703 | return 1; |
| @@ -1706,19 +1706,19 @@ again: | |||
| 1706 | inc_irq_stat(apic_perf_irqs); | 1706 | inc_irq_stat(apic_perf_irqs); |
| 1707 | ack = status; | 1707 | ack = status; |
| 1708 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 1708 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
| 1709 | struct perf_counter *counter = cpuc->counters[bit]; | 1709 | struct perf_event *event = cpuc->events[bit]; |
| 1710 | 1710 | ||
| 1711 | clear_bit(bit, (unsigned long *) &status); | 1711 | clear_bit(bit, (unsigned long *) &status); |
| 1712 | if (!test_bit(bit, cpuc->active_mask)) | 1712 | if (!test_bit(bit, cpuc->active_mask)) |
| 1713 | continue; | 1713 | continue; |
| 1714 | 1714 | ||
| 1715 | if (!intel_pmu_save_and_restart(counter)) | 1715 | if (!intel_pmu_save_and_restart(event)) |
| 1716 | continue; | 1716 | continue; |
| 1717 | 1717 | ||
| 1718 | data.period = counter->hw.last_period; | 1718 | data.period = event->hw.last_period; |
| 1719 | 1719 | ||
| 1720 | if (perf_counter_overflow(counter, 1, &data, regs)) | 1720 | if (perf_event_overflow(event, 1, &data, regs)) |
| 1721 | intel_pmu_disable_counter(&counter->hw, bit); | 1721 | intel_pmu_disable_event(&event->hw, bit); |
| 1722 | } | 1722 | } |
| 1723 | 1723 | ||
| 1724 | intel_pmu_ack_status(ack); | 1724 | intel_pmu_ack_status(ack); |
| @@ -1738,38 +1738,38 @@ again: | |||
| 1738 | static int amd_pmu_handle_irq(struct pt_regs *regs) | 1738 | static int amd_pmu_handle_irq(struct pt_regs *regs) |
| 1739 | { | 1739 | { |
| 1740 | struct perf_sample_data data; | 1740 | struct perf_sample_data data; |
| 1741 | struct cpu_hw_counters *cpuc; | 1741 | struct cpu_hw_events *cpuc; |
| 1742 | struct perf_counter *counter; | 1742 | struct perf_event *event; |
| 1743 | struct hw_perf_counter *hwc; | 1743 | struct hw_perf_event *hwc; |
| 1744 | int idx, handled = 0; | 1744 | int idx, handled = 0; |
| 1745 | u64 val; | 1745 | u64 val; |
| 1746 | 1746 | ||
| 1747 | data.addr = 0; | 1747 | data.addr = 0; |
| 1748 | 1748 | ||
| 1749 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1749 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 1750 | 1750 | ||
| 1751 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1751 | for (idx = 0; idx < x86_pmu.num_events; idx++) { |
| 1752 | if (!test_bit(idx, cpuc->active_mask)) | 1752 | if (!test_bit(idx, cpuc->active_mask)) |
| 1753 | continue; | 1753 | continue; |
| 1754 | 1754 | ||
| 1755 | counter = cpuc->counters[idx]; | 1755 | event = cpuc->events[idx]; |
| 1756 | hwc = &counter->hw; | 1756 | hwc = &event->hw; |
| 1757 | 1757 | ||
| 1758 | val = x86_perf_counter_update(counter, hwc, idx); | 1758 | val = x86_perf_event_update(event, hwc, idx); |
| 1759 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 1759 | if (val & (1ULL << (x86_pmu.event_bits - 1))) |
| 1760 | continue; | 1760 | continue; |
| 1761 | 1761 | ||
| 1762 | /* | 1762 | /* |
| 1763 | * counter overflow | 1763 | * event overflow |
| 1764 | */ | 1764 | */ |
| 1765 | handled = 1; | 1765 | handled = 1; |
| 1766 | data.period = counter->hw.last_period; | 1766 | data.period = event->hw.last_period; |
| 1767 | 1767 | ||
| 1768 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | 1768 | if (!x86_perf_event_set_period(event, hwc, idx)) |
| 1769 | continue; | 1769 | continue; |
| 1770 | 1770 | ||
| 1771 | if (perf_counter_overflow(counter, 1, &data, regs)) | 1771 | if (perf_event_overflow(event, 1, &data, regs)) |
| 1772 | amd_pmu_disable_counter(hwc, idx); | 1772 | amd_pmu_disable_event(hwc, idx); |
| 1773 | } | 1773 | } |
| 1774 | 1774 | ||
| 1775 | if (handled) | 1775 | if (handled) |
| @@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs) | |||
| 1783 | irq_enter(); | 1783 | irq_enter(); |
| 1784 | ack_APIC_irq(); | 1784 | ack_APIC_irq(); |
| 1785 | inc_irq_stat(apic_pending_irqs); | 1785 | inc_irq_stat(apic_pending_irqs); |
| 1786 | perf_counter_do_pending(); | 1786 | perf_event_do_pending(); |
| 1787 | irq_exit(); | 1787 | irq_exit(); |
| 1788 | } | 1788 | } |
| 1789 | 1789 | ||
| 1790 | void set_perf_counter_pending(void) | 1790 | void set_perf_event_pending(void) |
| 1791 | { | 1791 | { |
| 1792 | #ifdef CONFIG_X86_LOCAL_APIC | 1792 | #ifdef CONFIG_X86_LOCAL_APIC |
| 1793 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | 1793 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); |
| 1794 | #endif | 1794 | #endif |
| 1795 | } | 1795 | } |
| 1796 | 1796 | ||
| 1797 | void perf_counters_lapic_init(void) | 1797 | void perf_events_lapic_init(void) |
| 1798 | { | 1798 | { |
| 1799 | #ifdef CONFIG_X86_LOCAL_APIC | 1799 | #ifdef CONFIG_X86_LOCAL_APIC |
| 1800 | if (!x86_pmu.apic || !x86_pmu_initialized()) | 1800 | if (!x86_pmu.apic || !x86_pmu_initialized()) |
| @@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void) | |||
| 1808 | } | 1808 | } |
| 1809 | 1809 | ||
| 1810 | static int __kprobes | 1810 | static int __kprobes |
| 1811 | perf_counter_nmi_handler(struct notifier_block *self, | 1811 | perf_event_nmi_handler(struct notifier_block *self, |
| 1812 | unsigned long cmd, void *__args) | 1812 | unsigned long cmd, void *__args) |
| 1813 | { | 1813 | { |
| 1814 | struct die_args *args = __args; | 1814 | struct die_args *args = __args; |
| 1815 | struct pt_regs *regs; | 1815 | struct pt_regs *regs; |
| 1816 | 1816 | ||
| 1817 | if (!atomic_read(&active_counters)) | 1817 | if (!atomic_read(&active_events)) |
| 1818 | return NOTIFY_DONE; | 1818 | return NOTIFY_DONE; |
| 1819 | 1819 | ||
| 1820 | switch (cmd) { | 1820 | switch (cmd) { |
| @@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
| 1833 | #endif | 1833 | #endif |
| 1834 | /* | 1834 | /* |
| 1835 | * Can't rely on the handled return value to say it was our NMI, two | 1835 | * Can't rely on the handled return value to say it was our NMI, two |
| 1836 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. | 1836 | * events could trigger 'simultaneously' raising two back-to-back NMIs. |
| 1837 | * | 1837 | * |
| 1838 | * If the first NMI handles both, the latter will be empty and daze | 1838 | * If the first NMI handles both, the latter will be empty and daze |
| 1839 | * the CPU. | 1839 | * the CPU. |
| @@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
| 1843 | return NOTIFY_STOP; | 1843 | return NOTIFY_STOP; |
| 1844 | } | 1844 | } |
| 1845 | 1845 | ||
| 1846 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | 1846 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
| 1847 | .notifier_call = perf_counter_nmi_handler, | 1847 | .notifier_call = perf_event_nmi_handler, |
| 1848 | .next = NULL, | 1848 | .next = NULL, |
| 1849 | .priority = 1 | 1849 | .priority = 1 |
| 1850 | }; | 1850 | }; |
| @@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = { | |||
| 1854 | .handle_irq = p6_pmu_handle_irq, | 1854 | .handle_irq = p6_pmu_handle_irq, |
| 1855 | .disable_all = p6_pmu_disable_all, | 1855 | .disable_all = p6_pmu_disable_all, |
| 1856 | .enable_all = p6_pmu_enable_all, | 1856 | .enable_all = p6_pmu_enable_all, |
| 1857 | .enable = p6_pmu_enable_counter, | 1857 | .enable = p6_pmu_enable_event, |
| 1858 | .disable = p6_pmu_disable_counter, | 1858 | .disable = p6_pmu_disable_event, |
| 1859 | .eventsel = MSR_P6_EVNTSEL0, | 1859 | .eventsel = MSR_P6_EVNTSEL0, |
| 1860 | .perfctr = MSR_P6_PERFCTR0, | 1860 | .perfctr = MSR_P6_PERFCTR0, |
| 1861 | .event_map = p6_pmu_event_map, | 1861 | .event_map = p6_pmu_event_map, |
| @@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = { | |||
| 1864 | .apic = 1, | 1864 | .apic = 1, |
| 1865 | .max_period = (1ULL << 31) - 1, | 1865 | .max_period = (1ULL << 31) - 1, |
| 1866 | .version = 0, | 1866 | .version = 0, |
| 1867 | .num_counters = 2, | 1867 | .num_events = 2, |
| 1868 | /* | 1868 | /* |
| 1869 | * Counters have 40 bits implemented. However they are designed such | 1869 | * Events have 40 bits implemented. However they are designed such |
| 1870 | * that bits [32-39] are sign extensions of bit 31. As such the | 1870 | * that bits [32-39] are sign extensions of bit 31. As such the |
| 1871 | * effective width of a counter for P6-like PMU is 32 bits only. | 1871 | * effective width of a event for P6-like PMU is 32 bits only. |
| 1872 | * | 1872 | * |
| 1873 | * See IA-32 Intel Architecture Software developer manual Vol 3B | 1873 | * See IA-32 Intel Architecture Software developer manual Vol 3B |
| 1874 | */ | 1874 | */ |
| 1875 | .counter_bits = 32, | 1875 | .event_bits = 32, |
| 1876 | .counter_mask = (1ULL << 32) - 1, | 1876 | .event_mask = (1ULL << 32) - 1, |
| 1877 | }; | 1877 | }; |
| 1878 | 1878 | ||
| 1879 | static struct x86_pmu intel_pmu = { | 1879 | static struct x86_pmu intel_pmu = { |
| @@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = { | |||
| 1881 | .handle_irq = intel_pmu_handle_irq, | 1881 | .handle_irq = intel_pmu_handle_irq, |
| 1882 | .disable_all = intel_pmu_disable_all, | 1882 | .disable_all = intel_pmu_disable_all, |
| 1883 | .enable_all = intel_pmu_enable_all, | 1883 | .enable_all = intel_pmu_enable_all, |
| 1884 | .enable = intel_pmu_enable_counter, | 1884 | .enable = intel_pmu_enable_event, |
| 1885 | .disable = intel_pmu_disable_counter, | 1885 | .disable = intel_pmu_disable_event, |
| 1886 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | 1886 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
| 1887 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | 1887 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, |
| 1888 | .event_map = intel_pmu_event_map, | 1888 | .event_map = intel_pmu_event_map, |
| @@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = { | |||
| 1892 | /* | 1892 | /* |
| 1893 | * Intel PMCs cannot be accessed sanely above 32 bit width, | 1893 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
| 1894 | * so we install an artificial 1<<31 period regardless of | 1894 | * so we install an artificial 1<<31 period regardless of |
| 1895 | * the generic counter period: | 1895 | * the generic event period: |
| 1896 | */ | 1896 | */ |
| 1897 | .max_period = (1ULL << 31) - 1, | 1897 | .max_period = (1ULL << 31) - 1, |
| 1898 | .enable_bts = intel_pmu_enable_bts, | 1898 | .enable_bts = intel_pmu_enable_bts, |
| @@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = { | |||
| 1904 | .handle_irq = amd_pmu_handle_irq, | 1904 | .handle_irq = amd_pmu_handle_irq, |
| 1905 | .disable_all = amd_pmu_disable_all, | 1905 | .disable_all = amd_pmu_disable_all, |
| 1906 | .enable_all = amd_pmu_enable_all, | 1906 | .enable_all = amd_pmu_enable_all, |
| 1907 | .enable = amd_pmu_enable_counter, | 1907 | .enable = amd_pmu_enable_event, |
| 1908 | .disable = amd_pmu_disable_counter, | 1908 | .disable = amd_pmu_disable_event, |
| 1909 | .eventsel = MSR_K7_EVNTSEL0, | 1909 | .eventsel = MSR_K7_EVNTSEL0, |
| 1910 | .perfctr = MSR_K7_PERFCTR0, | 1910 | .perfctr = MSR_K7_PERFCTR0, |
| 1911 | .event_map = amd_pmu_event_map, | 1911 | .event_map = amd_pmu_event_map, |
| 1912 | .raw_event = amd_pmu_raw_event, | 1912 | .raw_event = amd_pmu_raw_event, |
| 1913 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | 1913 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
| 1914 | .num_counters = 4, | 1914 | .num_events = 4, |
| 1915 | .counter_bits = 48, | 1915 | .event_bits = 48, |
| 1916 | .counter_mask = (1ULL << 48) - 1, | 1916 | .event_mask = (1ULL << 48) - 1, |
| 1917 | .apic = 1, | 1917 | .apic = 1, |
| 1918 | /* use highest bit to detect overflow */ | 1918 | /* use highest bit to detect overflow */ |
| 1919 | .max_period = (1ULL << 47) - 1, | 1919 | .max_period = (1ULL << 47) - 1, |
| @@ -1970,7 +1970,7 @@ static int intel_pmu_init(void) | |||
| 1970 | 1970 | ||
| 1971 | /* | 1971 | /* |
| 1972 | * Check whether the Architectural PerfMon supports | 1972 | * Check whether the Architectural PerfMon supports |
| 1973 | * Branch Misses Retired Event or not. | 1973 | * Branch Misses Retired hw_event or not. |
| 1974 | */ | 1974 | */ |
| 1975 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | 1975 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); |
| 1976 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | 1976 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) |
| @@ -1982,15 +1982,15 @@ static int intel_pmu_init(void) | |||
| 1982 | 1982 | ||
| 1983 | x86_pmu = intel_pmu; | 1983 | x86_pmu = intel_pmu; |
| 1984 | x86_pmu.version = version; | 1984 | x86_pmu.version = version; |
| 1985 | x86_pmu.num_counters = eax.split.num_counters; | 1985 | x86_pmu.num_events = eax.split.num_events; |
| 1986 | x86_pmu.counter_bits = eax.split.bit_width; | 1986 | x86_pmu.event_bits = eax.split.bit_width; |
| 1987 | x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; | 1987 | x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; |
| 1988 | 1988 | ||
| 1989 | /* | 1989 | /* |
| 1990 | * Quirk: v2 perfmon does not report fixed-purpose counters, so | 1990 | * Quirk: v2 perfmon does not report fixed-purpose events, so |
| 1991 | * assume at least 3 counters: | 1991 | * assume at least 3 events: |
| 1992 | */ | 1992 | */ |
| 1993 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); | 1993 | x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); |
| 1994 | 1994 | ||
| 1995 | /* | 1995 | /* |
| 1996 | * Install the hw-cache-events table: | 1996 | * Install the hw-cache-events table: |
| @@ -2037,11 +2037,11 @@ static int amd_pmu_init(void) | |||
| 2037 | return 0; | 2037 | return 0; |
| 2038 | } | 2038 | } |
| 2039 | 2039 | ||
| 2040 | void __init init_hw_perf_counters(void) | 2040 | void __init init_hw_perf_events(void) |
| 2041 | { | 2041 | { |
| 2042 | int err; | 2042 | int err; |
| 2043 | 2043 | ||
| 2044 | pr_info("Performance Counters: "); | 2044 | pr_info("Performance Events: "); |
| 2045 | 2045 | ||
| 2046 | switch (boot_cpu_data.x86_vendor) { | 2046 | switch (boot_cpu_data.x86_vendor) { |
| 2047 | case X86_VENDOR_INTEL: | 2047 | case X86_VENDOR_INTEL: |
| @@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void) | |||
| 2054 | return; | 2054 | return; |
| 2055 | } | 2055 | } |
| 2056 | if (err != 0) { | 2056 | if (err != 0) { |
| 2057 | pr_cont("no PMU driver, software counters only.\n"); | 2057 | pr_cont("no PMU driver, software events only.\n"); |
| 2058 | return; | 2058 | return; |
| 2059 | } | 2059 | } |
| 2060 | 2060 | ||
| 2061 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 2061 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
| 2062 | 2062 | ||
| 2063 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { | 2063 | if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { |
| 2064 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | 2064 | WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", |
| 2065 | x86_pmu.num_counters, X86_PMC_MAX_GENERIC); | 2065 | x86_pmu.num_events, X86_PMC_MAX_GENERIC); |
| 2066 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | 2066 | x86_pmu.num_events = X86_PMC_MAX_GENERIC; |
| 2067 | } | 2067 | } |
| 2068 | perf_counter_mask = (1 << x86_pmu.num_counters) - 1; | 2068 | perf_event_mask = (1 << x86_pmu.num_events) - 1; |
| 2069 | perf_max_counters = x86_pmu.num_counters; | 2069 | perf_max_events = x86_pmu.num_events; |
| 2070 | 2070 | ||
| 2071 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { | 2071 | if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { |
| 2072 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", | 2072 | WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", |
| 2073 | x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); | 2073 | x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED); |
| 2074 | x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; | 2074 | x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; |
| 2075 | } | 2075 | } |
| 2076 | 2076 | ||
| 2077 | perf_counter_mask |= | 2077 | perf_event_mask |= |
| 2078 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 2078 | ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; |
| 2079 | x86_pmu.intel_ctrl = perf_counter_mask; | 2079 | x86_pmu.intel_ctrl = perf_event_mask; |
| 2080 | 2080 | ||
| 2081 | perf_counters_lapic_init(); | 2081 | perf_events_lapic_init(); |
| 2082 | register_die_notifier(&perf_counter_nmi_notifier); | 2082 | register_die_notifier(&perf_event_nmi_notifier); |
| 2083 | 2083 | ||
| 2084 | pr_info("... version: %d\n", x86_pmu.version); | 2084 | pr_info("... version: %d\n", x86_pmu.version); |
| 2085 | pr_info("... bit width: %d\n", x86_pmu.counter_bits); | 2085 | pr_info("... bit width: %d\n", x86_pmu.event_bits); |
| 2086 | pr_info("... generic counters: %d\n", x86_pmu.num_counters); | 2086 | pr_info("... generic registers: %d\n", x86_pmu.num_events); |
| 2087 | pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); | 2087 | pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); |
| 2088 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); | 2088 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); |
| 2089 | pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); | 2089 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); |
| 2090 | pr_info("... counter mask: %016Lx\n", perf_counter_mask); | 2090 | pr_info("... event mask: %016Lx\n", perf_event_mask); |
| 2091 | } | 2091 | } |
| 2092 | 2092 | ||
| 2093 | static inline void x86_pmu_read(struct perf_counter *counter) | 2093 | static inline void x86_pmu_read(struct perf_event *event) |
| 2094 | { | 2094 | { |
| 2095 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); | 2095 | x86_perf_event_update(event, &event->hw, event->hw.idx); |
| 2096 | } | 2096 | } |
| 2097 | 2097 | ||
| 2098 | static const struct pmu pmu = { | 2098 | static const struct pmu pmu = { |
| @@ -2102,14 +2102,14 @@ static const struct pmu pmu = { | |||
| 2102 | .unthrottle = x86_pmu_unthrottle, | 2102 | .unthrottle = x86_pmu_unthrottle, |
| 2103 | }; | 2103 | }; |
| 2104 | 2104 | ||
| 2105 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | 2105 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
| 2106 | { | 2106 | { |
| 2107 | int err; | 2107 | int err; |
| 2108 | 2108 | ||
| 2109 | err = __hw_perf_counter_init(counter); | 2109 | err = __hw_perf_event_init(event); |
| 2110 | if (err) { | 2110 | if (err) { |
| 2111 | if (counter->destroy) | 2111 | if (event->destroy) |
| 2112 | counter->destroy(counter); | 2112 | event->destroy(event); |
| 2113 | return ERR_PTR(err); | 2113 | return ERR_PTR(err); |
| 2114 | } | 2114 | } |
| 2115 | 2115 | ||
| @@ -2292,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
| 2292 | return entry; | 2292 | return entry; |
| 2293 | } | 2293 | } |
| 2294 | 2294 | ||
| 2295 | void hw_perf_counter_setup_online(int cpu) | 2295 | void hw_perf_event_setup_online(int cpu) |
| 2296 | { | 2296 | { |
| 2297 | init_debug_store_on_cpu(cpu); | 2297 | init_debug_store_on_cpu(cpu); |
| 2298 | } | 2298 | } |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 392bea43b890..fab786f60ed6 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
| 21 | 21 | ||
| 22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
| 23 | #include <asm/perf_counter.h> | 23 | #include <asm/perf_event.h> |
| 24 | 24 | ||
| 25 | struct nmi_watchdog_ctlblk { | 25 | struct nmi_watchdog_ctlblk { |
| 26 | unsigned int cccr_msr; | 26 | unsigned int cccr_msr; |
