aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 06:02:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 08:28:04 -0400
commitcdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (patch)
tree81f98a3ab46c589792057fe2392c1e10f8ad7893 /arch/x86/kernel
parentdfc65094d0313cc48969fa60bcf33d693aeb05a7 (diff)
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c (renamed from arch/x86/kernel/cpu/perf_counter.c)504
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
8 files changed, 262 insertions, 262 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a34601f52987..754174d09deb 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -14,7 +14,7 @@
14 * Mikael Pettersson : PM converted to driver model. 14 * Mikael Pettersson : PM converted to driver model.
15 */ 15 */
16 16
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
19#include <linux/mc146818rtc.h> 19#include <linux/mc146818rtc.h>
20#include <linux/acpi_pmtmr.h> 20#include <linux/acpi_pmtmr.h>
@@ -35,7 +35,7 @@
35#include <linux/smp.h> 35#include <linux/smp.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37 37
38#include <asm/perf_counter.h> 38#include <asm/perf_event.h>
39#include <asm/x86_init.h> 39#include <asm/x86_init.h>
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41#include <asm/atomic.h> 41#include <asm/atomic.h>
@@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
1189 apic_write(APIC_ESR, 0); 1189 apic_write(APIC_ESR, 0);
1190 } 1190 }
1191#endif 1191#endif
1192 perf_counters_lapic_init(); 1192 perf_events_lapic_init();
1193 1193
1194 preempt_disable(); 1194 preempt_disable();
1195 1195
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 8dd30638fe44..68537e957a9b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
27obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o 27obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
28obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o 28obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
29 29
30obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 30obj-$(CONFIG_PERF_EVENTS) += perf_event.o
31 31
32obj-$(CONFIG_X86_MCE) += mcheck/ 32obj-$(CONFIG_X86_MCE) += mcheck/
33obj-$(CONFIG_MTRR) += mtrr/ 33obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2fea97eccf77..cc25c2b4a567 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,7 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14 14
15#include <asm/stackprotector.h> 15#include <asm/stackprotector.h>
16#include <asm/perf_counter.h> 16#include <asm/perf_event.h>
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/hypervisor.h> 18#include <asm/hypervisor.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
@@ -869,7 +869,7 @@ void __init identify_boot_cpu(void)
869#else 869#else
870 vgetcpu_set_mode(); 870 vgetcpu_set_mode();
871#endif 871#endif
872 init_hw_perf_counters(); 872 init_hw_perf_events();
873} 873}
874 874
875void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 875void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c
index b1f115696c84..0d03629fb1a5 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter x86 architecture code 2 * Performance events x86 architecture code
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -11,7 +11,7 @@
11 * For licencing details see kernel-base/COPYING 11 * For licencing details see kernel-base/COPYING
12 */ 12 */
13 13
14#include <linux/perf_counter.h> 14#include <linux/perf_event.h>
15#include <linux/capability.h> 15#include <linux/capability.h>
16#include <linux/notifier.h> 16#include <linux/notifier.h>
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
@@ -27,10 +27,10 @@
27#include <asm/stacktrace.h> 27#include <asm/stacktrace.h>
28#include <asm/nmi.h> 28#include <asm/nmi.h>
29 29
30static u64 perf_counter_mask __read_mostly; 30static u64 perf_event_mask __read_mostly;
31 31
32/* The maximal number of PEBS counters: */ 32/* The maximal number of PEBS events: */
33#define MAX_PEBS_COUNTERS 4 33#define MAX_PEBS_EVENTS 4
34 34
35/* The size of a BTS record in bytes: */ 35/* The size of a BTS record in bytes: */
36#define BTS_RECORD_SIZE 24 36#define BTS_RECORD_SIZE 24
@@ -65,11 +65,11 @@ struct debug_store {
65 u64 pebs_index; 65 u64 pebs_index;
66 u64 pebs_absolute_maximum; 66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold; 67 u64 pebs_interrupt_threshold;
68 u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; 68 u64 pebs_event_reset[MAX_PEBS_EVENTS];
69}; 69};
70 70
71struct cpu_hw_counters { 71struct cpu_hw_events {
72 struct perf_counter *counters[X86_PMC_IDX_MAX]; 72 struct perf_event *events[X86_PMC_IDX_MAX];
73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 unsigned long interrupts; 75 unsigned long interrupts;
@@ -86,17 +86,17 @@ struct x86_pmu {
86 int (*handle_irq)(struct pt_regs *); 86 int (*handle_irq)(struct pt_regs *);
87 void (*disable_all)(void); 87 void (*disable_all)(void);
88 void (*enable_all)(void); 88 void (*enable_all)(void);
89 void (*enable)(struct hw_perf_counter *, int); 89 void (*enable)(struct hw_perf_event *, int);
90 void (*disable)(struct hw_perf_counter *, int); 90 void (*disable)(struct hw_perf_event *, int);
91 unsigned eventsel; 91 unsigned eventsel;
92 unsigned perfctr; 92 unsigned perfctr;
93 u64 (*event_map)(int); 93 u64 (*event_map)(int);
94 u64 (*raw_event)(u64); 94 u64 (*raw_event)(u64);
95 int max_events; 95 int max_events;
96 int num_counters; 96 int num_events;
97 int num_counters_fixed; 97 int num_events_fixed;
98 int counter_bits; 98 int event_bits;
99 u64 counter_mask; 99 u64 event_mask;
100 int apic; 100 int apic;
101 u64 max_period; 101 u64 max_period;
102 u64 intel_ctrl; 102 u64 intel_ctrl;
@@ -106,7 +106,7 @@ struct x86_pmu {
106 106
107static struct x86_pmu x86_pmu __read_mostly; 107static struct x86_pmu x86_pmu __read_mostly;
108 108
109static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { 109static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
110 .enabled = 1, 110 .enabled = 1,
111}; 111};
112 112
@@ -130,12 +130,12 @@ static u64 p6_pmu_event_map(int hw_event)
130} 130}
131 131
132/* 132/*
133 * Counter setting that is specified not to count anything. 133 * Event setting that is specified not to count anything.
134 * We use this to effectively disable a counter. 134 * We use this to effectively disable a counter.
135 * 135 *
136 * L2_RQSTS with 0 MESI unit mask. 136 * L2_RQSTS with 0 MESI unit mask.
137 */ 137 */
138#define P6_NOP_COUNTER 0x0000002EULL 138#define P6_NOP_EVENT 0x0000002EULL
139 139
140static u64 p6_pmu_raw_event(u64 hw_event) 140static u64 p6_pmu_raw_event(u64 hw_event)
141{ 141{
@@ -143,14 +143,14 @@ static u64 p6_pmu_raw_event(u64 hw_event)
143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL 143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL 144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
145#define P6_EVNTSEL_INV_MASK 0x00800000ULL 145#define P6_EVNTSEL_INV_MASK 0x00800000ULL
146#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL 146#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
147 147
148#define P6_EVNTSEL_MASK \ 148#define P6_EVNTSEL_MASK \
149 (P6_EVNTSEL_EVENT_MASK | \ 149 (P6_EVNTSEL_EVENT_MASK | \
150 P6_EVNTSEL_UNIT_MASK | \ 150 P6_EVNTSEL_UNIT_MASK | \
151 P6_EVNTSEL_EDGE_MASK | \ 151 P6_EVNTSEL_EDGE_MASK | \
152 P6_EVNTSEL_INV_MASK | \ 152 P6_EVNTSEL_INV_MASK | \
153 P6_EVNTSEL_COUNTER_MASK) 153 P6_EVNTSEL_REG_MASK)
154 154
155 return hw_event & P6_EVNTSEL_MASK; 155 return hw_event & P6_EVNTSEL_MASK;
156} 156}
@@ -469,14 +469,14 @@ static u64 intel_pmu_raw_event(u64 hw_event)
469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL 469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL 470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL 471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
472#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL 472#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
473 473
474#define CORE_EVNTSEL_MASK \ 474#define CORE_EVNTSEL_MASK \
475 (CORE_EVNTSEL_EVENT_MASK | \ 475 (CORE_EVNTSEL_EVENT_MASK | \
476 CORE_EVNTSEL_UNIT_MASK | \ 476 CORE_EVNTSEL_UNIT_MASK | \
477 CORE_EVNTSEL_EDGE_MASK | \ 477 CORE_EVNTSEL_EDGE_MASK | \
478 CORE_EVNTSEL_INV_MASK | \ 478 CORE_EVNTSEL_INV_MASK | \
479 CORE_EVNTSEL_COUNTER_MASK) 479 CORE_EVNTSEL_REG_MASK)
480 480
481 return hw_event & CORE_EVNTSEL_MASK; 481 return hw_event & CORE_EVNTSEL_MASK;
482} 482}
@@ -596,28 +596,28 @@ static u64 amd_pmu_raw_event(u64 hw_event)
596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL 596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL 597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
598#define K7_EVNTSEL_INV_MASK 0x000800000ULL 598#define K7_EVNTSEL_INV_MASK 0x000800000ULL
599#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL 599#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
600 600
601#define K7_EVNTSEL_MASK \ 601#define K7_EVNTSEL_MASK \
602 (K7_EVNTSEL_EVENT_MASK | \ 602 (K7_EVNTSEL_EVENT_MASK | \
603 K7_EVNTSEL_UNIT_MASK | \ 603 K7_EVNTSEL_UNIT_MASK | \
604 K7_EVNTSEL_EDGE_MASK | \ 604 K7_EVNTSEL_EDGE_MASK | \
605 K7_EVNTSEL_INV_MASK | \ 605 K7_EVNTSEL_INV_MASK | \
606 K7_EVNTSEL_COUNTER_MASK) 606 K7_EVNTSEL_REG_MASK)
607 607
608 return hw_event & K7_EVNTSEL_MASK; 608 return hw_event & K7_EVNTSEL_MASK;
609} 609}
610 610
611/* 611/*
612 * Propagate counter elapsed time into the generic counter. 612 * Propagate event elapsed time into the generic event.
613 * Can only be executed on the CPU where the counter is active. 613 * Can only be executed on the CPU where the event is active.
614 * Returns the delta events processed. 614 * Returns the delta events processed.
615 */ 615 */
616static u64 616static u64
617x86_perf_counter_update(struct perf_counter *counter, 617x86_perf_event_update(struct perf_event *event,
618 struct hw_perf_counter *hwc, int idx) 618 struct hw_perf_event *hwc, int idx)
619{ 619{
620 int shift = 64 - x86_pmu.counter_bits; 620 int shift = 64 - x86_pmu.event_bits;
621 u64 prev_raw_count, new_raw_count; 621 u64 prev_raw_count, new_raw_count;
622 s64 delta; 622 s64 delta;
623 623
@@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter,
625 return 0; 625 return 0;
626 626
627 /* 627 /*
628 * Careful: an NMI might modify the previous counter value. 628 * Careful: an NMI might modify the previous event value.
629 * 629 *
630 * Our tactic to handle this is to first atomically read and 630 * Our tactic to handle this is to first atomically read and
631 * exchange a new raw count - then add that new-prev delta 631 * exchange a new raw count - then add that new-prev delta
632 * count to the generic counter atomically: 632 * count to the generic event atomically:
633 */ 633 */
634again: 634again:
635 prev_raw_count = atomic64_read(&hwc->prev_count); 635 prev_raw_count = atomic64_read(&hwc->prev_count);
636 rdmsrl(hwc->counter_base + idx, new_raw_count); 636 rdmsrl(hwc->event_base + idx, new_raw_count);
637 637
638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
639 new_raw_count) != prev_raw_count) 639 new_raw_count) != prev_raw_count)
@@ -642,7 +642,7 @@ again:
642 /* 642 /*
643 * Now we have the new raw value and have updated the prev 643 * Now we have the new raw value and have updated the prev
644 * timestamp already. We can now calculate the elapsed delta 644 * timestamp already. We can now calculate the elapsed delta
645 * (counter-)time and add that to the generic counter. 645 * (event-)time and add that to the generic event.
646 * 646 *
647 * Careful, not all hw sign-extends above the physical width 647 * Careful, not all hw sign-extends above the physical width
648 * of the count. 648 * of the count.
@@ -650,13 +650,13 @@ again:
650 delta = (new_raw_count << shift) - (prev_raw_count << shift); 650 delta = (new_raw_count << shift) - (prev_raw_count << shift);
651 delta >>= shift; 651 delta >>= shift;
652 652
653 atomic64_add(delta, &counter->count); 653 atomic64_add(delta, &event->count);
654 atomic64_sub(delta, &hwc->period_left); 654 atomic64_sub(delta, &hwc->period_left);
655 655
656 return new_raw_count; 656 return new_raw_count;
657} 657}
658 658
659static atomic_t active_counters; 659static atomic_t active_events;
660static DEFINE_MUTEX(pmc_reserve_mutex); 660static DEFINE_MUTEX(pmc_reserve_mutex);
661 661
662static bool reserve_pmc_hardware(void) 662static bool reserve_pmc_hardware(void)
@@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void)
667 if (nmi_watchdog == NMI_LOCAL_APIC) 667 if (nmi_watchdog == NMI_LOCAL_APIC)
668 disable_lapic_nmi_watchdog(); 668 disable_lapic_nmi_watchdog();
669 669
670 for (i = 0; i < x86_pmu.num_counters; i++) { 670 for (i = 0; i < x86_pmu.num_events; i++) {
671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
672 goto perfctr_fail; 672 goto perfctr_fail;
673 } 673 }
674 674
675 for (i = 0; i < x86_pmu.num_counters; i++) { 675 for (i = 0; i < x86_pmu.num_events; i++) {
676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
677 goto eventsel_fail; 677 goto eventsel_fail;
678 } 678 }
@@ -685,7 +685,7 @@ eventsel_fail:
685 for (i--; i >= 0; i--) 685 for (i--; i >= 0; i--)
686 release_evntsel_nmi(x86_pmu.eventsel + i); 686 release_evntsel_nmi(x86_pmu.eventsel + i);
687 687
688 i = x86_pmu.num_counters; 688 i = x86_pmu.num_events;
689 689
690perfctr_fail: 690perfctr_fail:
691 for (i--; i >= 0; i--) 691 for (i--; i >= 0; i--)
@@ -703,7 +703,7 @@ static void release_pmc_hardware(void)
703#ifdef CONFIG_X86_LOCAL_APIC 703#ifdef CONFIG_X86_LOCAL_APIC
704 int i; 704 int i;
705 705
706 for (i = 0; i < x86_pmu.num_counters; i++) { 706 for (i = 0; i < x86_pmu.num_events; i++) {
707 release_perfctr_nmi(x86_pmu.perfctr + i); 707 release_perfctr_nmi(x86_pmu.perfctr + i);
708 release_evntsel_nmi(x86_pmu.eventsel + i); 708 release_evntsel_nmi(x86_pmu.eventsel + i);
709 } 709 }
@@ -720,7 +720,7 @@ static inline bool bts_available(void)
720 720
721static inline void init_debug_store_on_cpu(int cpu) 721static inline void init_debug_store_on_cpu(int cpu)
722{ 722{
723 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 723 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
724 724
725 if (!ds) 725 if (!ds)
726 return; 726 return;
@@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu)
732 732
733static inline void fini_debug_store_on_cpu(int cpu) 733static inline void fini_debug_store_on_cpu(int cpu)
734{ 734{
735 if (!per_cpu(cpu_hw_counters, cpu).ds) 735 if (!per_cpu(cpu_hw_events, cpu).ds)
736 return; 736 return;
737 737
738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
@@ -751,12 +751,12 @@ static void release_bts_hardware(void)
751 fini_debug_store_on_cpu(cpu); 751 fini_debug_store_on_cpu(cpu);
752 752
753 for_each_possible_cpu(cpu) { 753 for_each_possible_cpu(cpu) {
754 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 754 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
755 755
756 if (!ds) 756 if (!ds)
757 continue; 757 continue;
758 758
759 per_cpu(cpu_hw_counters, cpu).ds = NULL; 759 per_cpu(cpu_hw_events, cpu).ds = NULL;
760 760
761 kfree((void *)(unsigned long)ds->bts_buffer_base); 761 kfree((void *)(unsigned long)ds->bts_buffer_base);
762 kfree(ds); 762 kfree(ds);
@@ -796,7 +796,7 @@ static int reserve_bts_hardware(void)
796 ds->bts_interrupt_threshold = 796 ds->bts_interrupt_threshold =
797 ds->bts_absolute_maximum - BTS_OVFL_TH; 797 ds->bts_absolute_maximum - BTS_OVFL_TH;
798 798
799 per_cpu(cpu_hw_counters, cpu).ds = ds; 799 per_cpu(cpu_hw_events, cpu).ds = ds;
800 err = 0; 800 err = 0;
801 } 801 }
802 802
@@ -812,9 +812,9 @@ static int reserve_bts_hardware(void)
812 return err; 812 return err;
813} 813}
814 814
815static void hw_perf_counter_destroy(struct perf_counter *counter) 815static void hw_perf_event_destroy(struct perf_event *event)
816{ 816{
817 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { 817 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
818 release_pmc_hardware(); 818 release_pmc_hardware();
819 release_bts_hardware(); 819 release_bts_hardware();
820 mutex_unlock(&pmc_reserve_mutex); 820 mutex_unlock(&pmc_reserve_mutex);
@@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void)
827} 827}
828 828
829static inline int 829static inline int
830set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) 830set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
831{ 831{
832 unsigned int cache_type, cache_op, cache_result; 832 unsigned int cache_type, cache_op, cache_result;
833 u64 config, val; 833 u64 config, val;
@@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config)
880 880
881static void intel_pmu_disable_bts(void) 881static void intel_pmu_disable_bts(void)
882{ 882{
883 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
884 unsigned long debugctlmsr; 884 unsigned long debugctlmsr;
885 885
886 if (!cpuc->ds) 886 if (!cpuc->ds)
@@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void)
898/* 898/*
899 * Setup the hardware configuration for a given attr_type 899 * Setup the hardware configuration for a given attr_type
900 */ 900 */
901static int __hw_perf_counter_init(struct perf_counter *counter) 901static int __hw_perf_event_init(struct perf_event *event)
902{ 902{
903 struct perf_counter_attr *attr = &counter->attr; 903 struct perf_event_attr *attr = &event->attr;
904 struct hw_perf_counter *hwc = &counter->hw; 904 struct hw_perf_event *hwc = &event->hw;
905 u64 config; 905 u64 config;
906 int err; 906 int err;
907 907
@@ -909,22 +909,22 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
909 return -ENODEV; 909 return -ENODEV;
910 910
911 err = 0; 911 err = 0;
912 if (!atomic_inc_not_zero(&active_counters)) { 912 if (!atomic_inc_not_zero(&active_events)) {
913 mutex_lock(&pmc_reserve_mutex); 913 mutex_lock(&pmc_reserve_mutex);
914 if (atomic_read(&active_counters) == 0) { 914 if (atomic_read(&active_events) == 0) {
915 if (!reserve_pmc_hardware()) 915 if (!reserve_pmc_hardware())
916 err = -EBUSY; 916 err = -EBUSY;
917 else 917 else
918 err = reserve_bts_hardware(); 918 err = reserve_bts_hardware();
919 } 919 }
920 if (!err) 920 if (!err)
921 atomic_inc(&active_counters); 921 atomic_inc(&active_events);
922 mutex_unlock(&pmc_reserve_mutex); 922 mutex_unlock(&pmc_reserve_mutex);
923 } 923 }
924 if (err) 924 if (err)
925 return err; 925 return err;
926 926
927 counter->destroy = hw_perf_counter_destroy; 927 event->destroy = hw_perf_event_destroy;
928 928
929 /* 929 /*
930 * Generate PMC IRQs: 930 * Generate PMC IRQs:
@@ -948,8 +948,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
948 /* 948 /*
949 * If we have a PMU initialized but no APIC 949 * If we have a PMU initialized but no APIC
950 * interrupts, we cannot sample hardware 950 * interrupts, we cannot sample hardware
951 * counters (user-space has to fall back and 951 * events (user-space has to fall back and
952 * sample via a hrtimer based software counter): 952 * sample via a hrtimer based software event):
953 */ 953 */
954 if (!x86_pmu.apic) 954 if (!x86_pmu.apic)
955 return -EOPNOTSUPP; 955 return -EOPNOTSUPP;
@@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
1001 1001
1002static void p6_pmu_disable_all(void) 1002static void p6_pmu_disable_all(void)
1003{ 1003{
1004 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005 u64 val; 1005 u64 val;
1006 1006
1007 if (!cpuc->enabled) 1007 if (!cpuc->enabled)
@@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void)
1018 1018
1019static void intel_pmu_disable_all(void) 1019static void intel_pmu_disable_all(void)
1020{ 1020{
1021 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1021 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1022 1022
1023 if (!cpuc->enabled) 1023 if (!cpuc->enabled)
1024 return; 1024 return;
@@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void)
1034 1034
1035static void amd_pmu_disable_all(void) 1035static void amd_pmu_disable_all(void)
1036{ 1036{
1037 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1037 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1038 int idx; 1038 int idx;
1039 1039
1040 if (!cpuc->enabled) 1040 if (!cpuc->enabled)
@@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void)
1043 cpuc->enabled = 0; 1043 cpuc->enabled = 0;
1044 /* 1044 /*
1045 * ensure we write the disable before we start disabling the 1045 * ensure we write the disable before we start disabling the
1046 * counters proper, so that amd_pmu_enable_counter() does the 1046 * events proper, so that amd_pmu_enable_event() does the
1047 * right thing. 1047 * right thing.
1048 */ 1048 */
1049 barrier(); 1049 barrier();
1050 1050
1051 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1051 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1052 u64 val; 1052 u64 val;
1053 1053
1054 if (!test_bit(idx, cpuc->active_mask)) 1054 if (!test_bit(idx, cpuc->active_mask))
@@ -1070,7 +1070,7 @@ void hw_perf_disable(void)
1070 1070
1071static void p6_pmu_enable_all(void) 1071static void p6_pmu_enable_all(void)
1072{ 1072{
1073 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1073 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1074 unsigned long val; 1074 unsigned long val;
1075 1075
1076 if (cpuc->enabled) 1076 if (cpuc->enabled)
@@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void)
1087 1087
1088static void intel_pmu_enable_all(void) 1088static void intel_pmu_enable_all(void)
1089{ 1089{
1090 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1090 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1091 1091
1092 if (cpuc->enabled) 1092 if (cpuc->enabled)
1093 return; 1093 return;
@@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void)
1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1099 1099
1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1101 struct perf_counter *counter = 1101 struct perf_event *event =
1102 cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1102 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1103 1103
1104 if (WARN_ON_ONCE(!counter)) 1104 if (WARN_ON_ONCE(!event))
1105 return; 1105 return;
1106 1106
1107 intel_pmu_enable_bts(counter->hw.config); 1107 intel_pmu_enable_bts(event->hw.config);
1108 } 1108 }
1109} 1109}
1110 1110
1111static void amd_pmu_enable_all(void) 1111static void amd_pmu_enable_all(void)
1112{ 1112{
1113 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1113 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1114 int idx; 1114 int idx;
1115 1115
1116 if (cpuc->enabled) 1116 if (cpuc->enabled)
@@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void)
1119 cpuc->enabled = 1; 1119 cpuc->enabled = 1;
1120 barrier(); 1120 barrier();
1121 1121
1122 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1122 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1123 struct perf_counter *counter = cpuc->counters[idx]; 1123 struct perf_event *event = cpuc->events[idx];
1124 u64 val; 1124 u64 val;
1125 1125
1126 if (!test_bit(idx, cpuc->active_mask)) 1126 if (!test_bit(idx, cpuc->active_mask))
1127 continue; 1127 continue;
1128 1128
1129 val = counter->hw.config; 1129 val = event->hw.config;
1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1132 } 1132 }
@@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack)
1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1154} 1154}
1155 1155
1156static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1156static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1157{ 1157{
1158 (void)checking_wrmsrl(hwc->config_base + idx, 1158 (void)checking_wrmsrl(hwc->config_base + idx,
1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1160} 1160}
1161 1161
1162static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1162static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1163{ 1163{
1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1165} 1165}
1166 1166
1167static inline void 1167static inline void
1168intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) 1168intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1169{ 1169{
1170 int idx = __idx - X86_PMC_IDX_FIXED; 1170 int idx = __idx - X86_PMC_IDX_FIXED;
1171 u64 ctrl_val, mask; 1171 u64 ctrl_val, mask;
@@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
1178} 1178}
1179 1179
1180static inline void 1180static inline void
1181p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1181p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1182{ 1182{
1183 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1184 u64 val = P6_NOP_COUNTER; 1184 u64 val = P6_NOP_EVENT;
1185 1185
1186 if (cpuc->enabled) 1186 if (cpuc->enabled)
1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1190} 1190}
1191 1191
1192static inline void 1192static inline void
1193intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1193intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1194{ 1194{
1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1196 intel_pmu_disable_bts(); 1196 intel_pmu_disable_bts();
@@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1202 return; 1202 return;
1203 } 1203 }
1204 1204
1205 x86_pmu_disable_counter(hwc, idx); 1205 x86_pmu_disable_event(hwc, idx);
1206} 1206}
1207 1207
1208static inline void 1208static inline void
1209amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1209amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1210{ 1210{
1211 x86_pmu_disable_counter(hwc, idx); 1211 x86_pmu_disable_event(hwc, idx);
1212} 1212}
1213 1213
1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1215 1215
1216/* 1216/*
1217 * Set the next IRQ period, based on the hwc->period_left value. 1217 * Set the next IRQ period, based on the hwc->period_left value.
1218 * To be called with the counter disabled in hw: 1218 * To be called with the event disabled in hw:
1219 */ 1219 */
1220static int 1220static int
1221x86_perf_counter_set_period(struct perf_counter *counter, 1221x86_perf_event_set_period(struct perf_event *event,
1222 struct hw_perf_counter *hwc, int idx) 1222 struct hw_perf_event *hwc, int idx)
1223{ 1223{
1224 s64 left = atomic64_read(&hwc->period_left); 1224 s64 left = atomic64_read(&hwc->period_left);
1225 s64 period = hwc->sample_period; 1225 s64 period = hwc->sample_period;
@@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1257 1257
1258 /* 1258 /*
1259 * The hw counter starts counting from this counter offset, 1259 * The hw event starts counting from this event offset,
1260 * mark it to be able to extra future deltas: 1260 * mark it to be able to extra future deltas:
1261 */ 1261 */
1262 atomic64_set(&hwc->prev_count, (u64)-left); 1262 atomic64_set(&hwc->prev_count, (u64)-left);
1263 1263
1264 err = checking_wrmsrl(hwc->counter_base + idx, 1264 err = checking_wrmsrl(hwc->event_base + idx,
1265 (u64)(-left) & x86_pmu.counter_mask); 1265 (u64)(-left) & x86_pmu.event_mask);
1266 1266
1267 perf_counter_update_userpage(counter); 1267 perf_event_update_userpage(event);
1268 1268
1269 return ret; 1269 return ret;
1270} 1270}
1271 1271
1272static inline void 1272static inline void
1273intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) 1273intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1274{ 1274{
1275 int idx = __idx - X86_PMC_IDX_FIXED; 1275 int idx = __idx - X86_PMC_IDX_FIXED;
1276 u64 ctrl_val, bits, mask; 1276 u64 ctrl_val, bits, mask;
@@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
1295 err = checking_wrmsrl(hwc->config_base, ctrl_val); 1295 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1296} 1296}
1297 1297
1298static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1298static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1299{ 1299{
1300 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1300 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1301 u64 val; 1301 u64 val;
1302 1302
1303 val = hwc->config; 1303 val = hwc->config;
@@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1308} 1308}
1309 1309
1310 1310
1311static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1311static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1312{ 1312{
1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1314 if (!__get_cpu_var(cpu_hw_counters).enabled) 1314 if (!__get_cpu_var(cpu_hw_events).enabled)
1315 return; 1315 return;
1316 1316
1317 intel_pmu_enable_bts(hwc->config); 1317 intel_pmu_enable_bts(hwc->config);
@@ -1323,19 +1323,19 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1323 return; 1323 return;
1324 } 1324 }
1325 1325
1326 x86_pmu_enable_counter(hwc, idx); 1326 x86_pmu_enable_event(hwc, idx);
1327} 1327}
1328 1328
1329static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1329static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1330{ 1330{
1331 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1331 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1332 1332
1333 if (cpuc->enabled) 1333 if (cpuc->enabled)
1334 x86_pmu_enable_counter(hwc, idx); 1334 x86_pmu_enable_event(hwc, idx);
1335} 1335}
1336 1336
1337static int 1337static int
1338fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) 1338fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
1339{ 1339{
1340 unsigned int hw_event; 1340 unsigned int hw_event;
1341 1341
@@ -1346,7 +1346,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
1346 (hwc->sample_period == 1))) 1346 (hwc->sample_period == 1)))
1347 return X86_PMC_IDX_FIXED_BTS; 1347 return X86_PMC_IDX_FIXED_BTS;
1348 1348
1349 if (!x86_pmu.num_counters_fixed) 1349 if (!x86_pmu.num_events_fixed)
1350 return -1; 1350 return -1;
1351 1351
1352 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1352 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1360,97 +1360,97 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
1360} 1360}
1361 1361
1362/* 1362/*
1363 * Find a PMC slot for the freshly enabled / scheduled in counter: 1363 * Find a PMC slot for the freshly enabled / scheduled in event:
1364 */ 1364 */
1365static int x86_pmu_enable(struct perf_counter *counter) 1365static int x86_pmu_enable(struct perf_event *event)
1366{ 1366{
1367 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1367 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1368 struct hw_perf_counter *hwc = &counter->hw; 1368 struct hw_perf_event *hwc = &event->hw;
1369 int idx; 1369 int idx;
1370 1370
1371 idx = fixed_mode_idx(counter, hwc); 1371 idx = fixed_mode_idx(event, hwc);
1372 if (idx == X86_PMC_IDX_FIXED_BTS) { 1372 if (idx == X86_PMC_IDX_FIXED_BTS) {
1373 /* BTS is already occupied. */ 1373 /* BTS is already occupied. */
1374 if (test_and_set_bit(idx, cpuc->used_mask)) 1374 if (test_and_set_bit(idx, cpuc->used_mask))
1375 return -EAGAIN; 1375 return -EAGAIN;
1376 1376
1377 hwc->config_base = 0; 1377 hwc->config_base = 0;
1378 hwc->counter_base = 0; 1378 hwc->event_base = 0;
1379 hwc->idx = idx; 1379 hwc->idx = idx;
1380 } else if (idx >= 0) { 1380 } else if (idx >= 0) {
1381 /* 1381 /*
1382 * Try to get the fixed counter, if that is already taken 1382 * Try to get the fixed event, if that is already taken
1383 * then try to get a generic counter: 1383 * then try to get a generic event:
1384 */ 1384 */
1385 if (test_and_set_bit(idx, cpuc->used_mask)) 1385 if (test_and_set_bit(idx, cpuc->used_mask))
1386 goto try_generic; 1386 goto try_generic;
1387 1387
1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1389 /* 1389 /*
1390 * We set it so that counter_base + idx in wrmsr/rdmsr maps to 1390 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: 1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1392 */ 1392 */
1393 hwc->counter_base = 1393 hwc->event_base =
1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; 1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1395 hwc->idx = idx; 1395 hwc->idx = idx;
1396 } else { 1396 } else {
1397 idx = hwc->idx; 1397 idx = hwc->idx;
1398 /* Try to get the previous generic counter again */ 1398 /* Try to get the previous generic event again */
1399 if (test_and_set_bit(idx, cpuc->used_mask)) { 1399 if (test_and_set_bit(idx, cpuc->used_mask)) {
1400try_generic: 1400try_generic:
1401 idx = find_first_zero_bit(cpuc->used_mask, 1401 idx = find_first_zero_bit(cpuc->used_mask,
1402 x86_pmu.num_counters); 1402 x86_pmu.num_events);
1403 if (idx == x86_pmu.num_counters) 1403 if (idx == x86_pmu.num_events)
1404 return -EAGAIN; 1404 return -EAGAIN;
1405 1405
1406 set_bit(idx, cpuc->used_mask); 1406 set_bit(idx, cpuc->used_mask);
1407 hwc->idx = idx; 1407 hwc->idx = idx;
1408 } 1408 }
1409 hwc->config_base = x86_pmu.eventsel; 1409 hwc->config_base = x86_pmu.eventsel;
1410 hwc->counter_base = x86_pmu.perfctr; 1410 hwc->event_base = x86_pmu.perfctr;
1411 } 1411 }
1412 1412
1413 perf_counters_lapic_init(); 1413 perf_events_lapic_init();
1414 1414
1415 x86_pmu.disable(hwc, idx); 1415 x86_pmu.disable(hwc, idx);
1416 1416
1417 cpuc->counters[idx] = counter; 1417 cpuc->events[idx] = event;
1418 set_bit(idx, cpuc->active_mask); 1418 set_bit(idx, cpuc->active_mask);
1419 1419
1420 x86_perf_counter_set_period(counter, hwc, idx); 1420 x86_perf_event_set_period(event, hwc, idx);
1421 x86_pmu.enable(hwc, idx); 1421 x86_pmu.enable(hwc, idx);
1422 1422
1423 perf_counter_update_userpage(counter); 1423 perf_event_update_userpage(event);
1424 1424
1425 return 0; 1425 return 0;
1426} 1426}
1427 1427
1428static void x86_pmu_unthrottle(struct perf_counter *counter) 1428static void x86_pmu_unthrottle(struct perf_event *event)
1429{ 1429{
1430 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1430 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1431 struct hw_perf_counter *hwc = &counter->hw; 1431 struct hw_perf_event *hwc = &event->hw;
1432 1432
1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || 1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1434 cpuc->counters[hwc->idx] != counter)) 1434 cpuc->events[hwc->idx] != event))
1435 return; 1435 return;
1436 1436
1437 x86_pmu.enable(hwc, hwc->idx); 1437 x86_pmu.enable(hwc, hwc->idx);
1438} 1438}
1439 1439
1440void perf_counter_print_debug(void) 1440void perf_event_print_debug(void)
1441{ 1441{
1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1443 struct cpu_hw_counters *cpuc; 1443 struct cpu_hw_events *cpuc;
1444 unsigned long flags; 1444 unsigned long flags;
1445 int cpu, idx; 1445 int cpu, idx;
1446 1446
1447 if (!x86_pmu.num_counters) 1447 if (!x86_pmu.num_events)
1448 return; 1448 return;
1449 1449
1450 local_irq_save(flags); 1450 local_irq_save(flags);
1451 1451
1452 cpu = smp_processor_id(); 1452 cpu = smp_processor_id();
1453 cpuc = &per_cpu(cpu_hw_counters, cpu); 1453 cpuc = &per_cpu(cpu_hw_events, cpu);
1454 1454
1455 if (x86_pmu.version >= 2) { 1455 if (x86_pmu.version >= 2) {
1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
@@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void)
1466 } 1466 }
1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); 1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
1468 1468
1469 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1469 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1472 1472
@@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void)
1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1480 cpu, idx, prev_left); 1480 cpu, idx, prev_left);
1481 } 1481 }
1482 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1482 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1484 1484
1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void)
1488 local_irq_restore(flags); 1488 local_irq_restore(flags);
1489} 1489}
1490 1490
1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) 1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1492{ 1492{
1493 struct debug_store *ds = cpuc->ds; 1493 struct debug_store *ds = cpuc->ds;
1494 struct bts_record { 1494 struct bts_record {
@@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1496 u64 to; 1496 u64 to;
1497 u64 flags; 1497 u64 flags;
1498 }; 1498 };
1499 struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1499 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1500 struct bts_record *at, *top; 1500 struct bts_record *at, *top;
1501 struct perf_output_handle handle; 1501 struct perf_output_handle handle;
1502 struct perf_event_header header; 1502 struct perf_event_header header;
1503 struct perf_sample_data data; 1503 struct perf_sample_data data;
1504 struct pt_regs regs; 1504 struct pt_regs regs;
1505 1505
1506 if (!counter) 1506 if (!event)
1507 return; 1507 return;
1508 1508
1509 if (!ds) 1509 if (!ds)
@@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1518 ds->bts_index = ds->bts_buffer_base; 1518 ds->bts_index = ds->bts_buffer_base;
1519 1519
1520 1520
1521 data.period = counter->hw.last_period; 1521 data.period = event->hw.last_period;
1522 data.addr = 0; 1522 data.addr = 0;
1523 regs.ip = 0; 1523 regs.ip = 0;
1524 1524
@@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1527 * We will overwrite the from and to address before we output 1527 * We will overwrite the from and to address before we output
1528 * the sample. 1528 * the sample.
1529 */ 1529 */
1530 perf_prepare_sample(&header, &data, counter, &regs); 1530 perf_prepare_sample(&header, &data, event, &regs);
1531 1531
1532 if (perf_output_begin(&handle, counter, 1532 if (perf_output_begin(&handle, event,
1533 header.size * (top - at), 1, 1)) 1533 header.size * (top - at), 1, 1))
1534 return; 1534 return;
1535 1535
@@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1537 data.ip = at->from; 1537 data.ip = at->from;
1538 data.addr = at->to; 1538 data.addr = at->to;
1539 1539
1540 perf_output_sample(&handle, &header, &data, counter); 1540 perf_output_sample(&handle, &header, &data, event);
1541 } 1541 }
1542 1542
1543 perf_output_end(&handle); 1543 perf_output_end(&handle);
1544 1544
1545 /* There's new data available. */ 1545 /* There's new data available. */
1546 counter->hw.interrupts++; 1546 event->hw.interrupts++;
1547 counter->pending_kill = POLL_IN; 1547 event->pending_kill = POLL_IN;
1548} 1548}
1549 1549
1550static void x86_pmu_disable(struct perf_counter *counter) 1550static void x86_pmu_disable(struct perf_event *event)
1551{ 1551{
1552 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1552 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1553 struct hw_perf_counter *hwc = &counter->hw; 1553 struct hw_perf_event *hwc = &event->hw;
1554 int idx = hwc->idx; 1554 int idx = hwc->idx;
1555 1555
1556 /* 1556 /*
@@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter)
1562 1562
1563 /* 1563 /*
1564 * Make sure the cleared pointer becomes visible before we 1564 * Make sure the cleared pointer becomes visible before we
1565 * (potentially) free the counter: 1565 * (potentially) free the event:
1566 */ 1566 */
1567 barrier(); 1567 barrier();
1568 1568
1569 /* 1569 /*
1570 * Drain the remaining delta count out of a counter 1570 * Drain the remaining delta count out of a event
1571 * that we are disabling: 1571 * that we are disabling:
1572 */ 1572 */
1573 x86_perf_counter_update(counter, hwc, idx); 1573 x86_perf_event_update(event, hwc, idx);
1574 1574
1575 /* Drain the remaining BTS records. */ 1575 /* Drain the remaining BTS records. */
1576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) 1576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1577 intel_pmu_drain_bts_buffer(cpuc); 1577 intel_pmu_drain_bts_buffer(cpuc);
1578 1578
1579 cpuc->counters[idx] = NULL; 1579 cpuc->events[idx] = NULL;
1580 clear_bit(idx, cpuc->used_mask); 1580 clear_bit(idx, cpuc->used_mask);
1581 1581
1582 perf_counter_update_userpage(counter); 1582 perf_event_update_userpage(event);
1583} 1583}
1584 1584
1585/* 1585/*
1586 * Save and restart an expired counter. Called by NMI contexts, 1586 * Save and restart an expired event. Called by NMI contexts,
1587 * so it has to be careful about preempting normal counter ops: 1587 * so it has to be careful about preempting normal event ops:
1588 */ 1588 */
1589static int intel_pmu_save_and_restart(struct perf_counter *counter) 1589static int intel_pmu_save_and_restart(struct perf_event *event)
1590{ 1590{
1591 struct hw_perf_counter *hwc = &counter->hw; 1591 struct hw_perf_event *hwc = &event->hw;
1592 int idx = hwc->idx; 1592 int idx = hwc->idx;
1593 int ret; 1593 int ret;
1594 1594
1595 x86_perf_counter_update(counter, hwc, idx); 1595 x86_perf_event_update(event, hwc, idx);
1596 ret = x86_perf_counter_set_period(counter, hwc, idx); 1596 ret = x86_perf_event_set_period(event, hwc, idx);
1597 1597
1598 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 1598 if (event->state == PERF_EVENT_STATE_ACTIVE)
1599 intel_pmu_enable_counter(hwc, idx); 1599 intel_pmu_enable_event(hwc, idx);
1600 1600
1601 return ret; 1601 return ret;
1602} 1602}
1603 1603
1604static void intel_pmu_reset(void) 1604static void intel_pmu_reset(void)
1605{ 1605{
1606 struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; 1606 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1607 unsigned long flags; 1607 unsigned long flags;
1608 int idx; 1608 int idx;
1609 1609
1610 if (!x86_pmu.num_counters) 1610 if (!x86_pmu.num_events)
1611 return; 1611 return;
1612 1612
1613 local_irq_save(flags); 1613 local_irq_save(flags);
1614 1614
1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1616 1616
1617 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1617 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); 1618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); 1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1620 } 1620 }
1621 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1621 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 1622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1623 } 1623 }
1624 if (ds) 1624 if (ds)
@@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void)
1630static int p6_pmu_handle_irq(struct pt_regs *regs) 1630static int p6_pmu_handle_irq(struct pt_regs *regs)
1631{ 1631{
1632 struct perf_sample_data data; 1632 struct perf_sample_data data;
1633 struct cpu_hw_counters *cpuc; 1633 struct cpu_hw_events *cpuc;
1634 struct perf_counter *counter; 1634 struct perf_event *event;
1635 struct hw_perf_counter *hwc; 1635 struct hw_perf_event *hwc;
1636 int idx, handled = 0; 1636 int idx, handled = 0;
1637 u64 val; 1637 u64 val;
1638 1638
1639 data.addr = 0; 1639 data.addr = 0;
1640 1640
1641 cpuc = &__get_cpu_var(cpu_hw_counters); 1641 cpuc = &__get_cpu_var(cpu_hw_events);
1642 1642
1643 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1643 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1644 if (!test_bit(idx, cpuc->active_mask)) 1644 if (!test_bit(idx, cpuc->active_mask))
1645 continue; 1645 continue;
1646 1646
1647 counter = cpuc->counters[idx]; 1647 event = cpuc->events[idx];
1648 hwc = &counter->hw; 1648 hwc = &event->hw;
1649 1649
1650 val = x86_perf_counter_update(counter, hwc, idx); 1650 val = x86_perf_event_update(event, hwc, idx);
1651 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1651 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1652 continue; 1652 continue;
1653 1653
1654 /* 1654 /*
1655 * counter overflow 1655 * event overflow
1656 */ 1656 */
1657 handled = 1; 1657 handled = 1;
1658 data.period = counter->hw.last_period; 1658 data.period = event->hw.last_period;
1659 1659
1660 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1660 if (!x86_perf_event_set_period(event, hwc, idx))
1661 continue; 1661 continue;
1662 1662
1663 if (perf_counter_overflow(counter, 1, &data, regs)) 1663 if (perf_event_overflow(event, 1, &data, regs))
1664 p6_pmu_disable_counter(hwc, idx); 1664 p6_pmu_disable_event(hwc, idx);
1665 } 1665 }
1666 1666
1667 if (handled) 1667 if (handled)
@@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
1677static int intel_pmu_handle_irq(struct pt_regs *regs) 1677static int intel_pmu_handle_irq(struct pt_regs *regs)
1678{ 1678{
1679 struct perf_sample_data data; 1679 struct perf_sample_data data;
1680 struct cpu_hw_counters *cpuc; 1680 struct cpu_hw_events *cpuc;
1681 int bit, loops; 1681 int bit, loops;
1682 u64 ack, status; 1682 u64 ack, status;
1683 1683
1684 data.addr = 0; 1684 data.addr = 0;
1685 1685
1686 cpuc = &__get_cpu_var(cpu_hw_counters); 1686 cpuc = &__get_cpu_var(cpu_hw_events);
1687 1687
1688 perf_disable(); 1688 perf_disable();
1689 intel_pmu_drain_bts_buffer(cpuc); 1689 intel_pmu_drain_bts_buffer(cpuc);
@@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1696 loops = 0; 1696 loops = 0;
1697again: 1697again:
1698 if (++loops > 100) { 1698 if (++loops > 100) {
1699 WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); 1699 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1700 perf_counter_print_debug(); 1700 perf_event_print_debug();
1701 intel_pmu_reset(); 1701 intel_pmu_reset();
1702 perf_enable(); 1702 perf_enable();
1703 return 1; 1703 return 1;
@@ -1706,19 +1706,19 @@ again:
1706 inc_irq_stat(apic_perf_irqs); 1706 inc_irq_stat(apic_perf_irqs);
1707 ack = status; 1707 ack = status;
1708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1709 struct perf_counter *counter = cpuc->counters[bit]; 1709 struct perf_event *event = cpuc->events[bit];
1710 1710
1711 clear_bit(bit, (unsigned long *) &status); 1711 clear_bit(bit, (unsigned long *) &status);
1712 if (!test_bit(bit, cpuc->active_mask)) 1712 if (!test_bit(bit, cpuc->active_mask))
1713 continue; 1713 continue;
1714 1714
1715 if (!intel_pmu_save_and_restart(counter)) 1715 if (!intel_pmu_save_and_restart(event))
1716 continue; 1716 continue;
1717 1717
1718 data.period = counter->hw.last_period; 1718 data.period = event->hw.last_period;
1719 1719
1720 if (perf_counter_overflow(counter, 1, &data, regs)) 1720 if (perf_event_overflow(event, 1, &data, regs))
1721 intel_pmu_disable_counter(&counter->hw, bit); 1721 intel_pmu_disable_event(&event->hw, bit);
1722 } 1722 }
1723 1723
1724 intel_pmu_ack_status(ack); 1724 intel_pmu_ack_status(ack);
@@ -1738,38 +1738,38 @@ again:
1738static int amd_pmu_handle_irq(struct pt_regs *regs) 1738static int amd_pmu_handle_irq(struct pt_regs *regs)
1739{ 1739{
1740 struct perf_sample_data data; 1740 struct perf_sample_data data;
1741 struct cpu_hw_counters *cpuc; 1741 struct cpu_hw_events *cpuc;
1742 struct perf_counter *counter; 1742 struct perf_event *event;
1743 struct hw_perf_counter *hwc; 1743 struct hw_perf_event *hwc;
1744 int idx, handled = 0; 1744 int idx, handled = 0;
1745 u64 val; 1745 u64 val;
1746 1746
1747 data.addr = 0; 1747 data.addr = 0;
1748 1748
1749 cpuc = &__get_cpu_var(cpu_hw_counters); 1749 cpuc = &__get_cpu_var(cpu_hw_events);
1750 1750
1751 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1751 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1752 if (!test_bit(idx, cpuc->active_mask)) 1752 if (!test_bit(idx, cpuc->active_mask))
1753 continue; 1753 continue;
1754 1754
1755 counter = cpuc->counters[idx]; 1755 event = cpuc->events[idx];
1756 hwc = &counter->hw; 1756 hwc = &event->hw;
1757 1757
1758 val = x86_perf_counter_update(counter, hwc, idx); 1758 val = x86_perf_event_update(event, hwc, idx);
1759 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1759 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1760 continue; 1760 continue;
1761 1761
1762 /* 1762 /*
1763 * counter overflow 1763 * event overflow
1764 */ 1764 */
1765 handled = 1; 1765 handled = 1;
1766 data.period = counter->hw.last_period; 1766 data.period = event->hw.last_period;
1767 1767
1768 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1768 if (!x86_perf_event_set_period(event, hwc, idx))
1769 continue; 1769 continue;
1770 1770
1771 if (perf_counter_overflow(counter, 1, &data, regs)) 1771 if (perf_event_overflow(event, 1, &data, regs))
1772 amd_pmu_disable_counter(hwc, idx); 1772 amd_pmu_disable_event(hwc, idx);
1773 } 1773 }
1774 1774
1775 if (handled) 1775 if (handled)
@@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
1783 irq_enter(); 1783 irq_enter();
1784 ack_APIC_irq(); 1784 ack_APIC_irq();
1785 inc_irq_stat(apic_pending_irqs); 1785 inc_irq_stat(apic_pending_irqs);
1786 perf_counter_do_pending(); 1786 perf_event_do_pending();
1787 irq_exit(); 1787 irq_exit();
1788} 1788}
1789 1789
1790void set_perf_counter_pending(void) 1790void set_perf_event_pending(void)
1791{ 1791{
1792#ifdef CONFIG_X86_LOCAL_APIC 1792#ifdef CONFIG_X86_LOCAL_APIC
1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR); 1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1794#endif 1794#endif
1795} 1795}
1796 1796
1797void perf_counters_lapic_init(void) 1797void perf_events_lapic_init(void)
1798{ 1798{
1799#ifdef CONFIG_X86_LOCAL_APIC 1799#ifdef CONFIG_X86_LOCAL_APIC
1800 if (!x86_pmu.apic || !x86_pmu_initialized()) 1800 if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void)
1808} 1808}
1809 1809
1810static int __kprobes 1810static int __kprobes
1811perf_counter_nmi_handler(struct notifier_block *self, 1811perf_event_nmi_handler(struct notifier_block *self,
1812 unsigned long cmd, void *__args) 1812 unsigned long cmd, void *__args)
1813{ 1813{
1814 struct die_args *args = __args; 1814 struct die_args *args = __args;
1815 struct pt_regs *regs; 1815 struct pt_regs *regs;
1816 1816
1817 if (!atomic_read(&active_counters)) 1817 if (!atomic_read(&active_events))
1818 return NOTIFY_DONE; 1818 return NOTIFY_DONE;
1819 1819
1820 switch (cmd) { 1820 switch (cmd) {
@@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
1833#endif 1833#endif
1834 /* 1834 /*
1835 * Can't rely on the handled return value to say it was our NMI, two 1835 * Can't rely on the handled return value to say it was our NMI, two
1836 * counters could trigger 'simultaneously' raising two back-to-back NMIs. 1836 * events could trigger 'simultaneously' raising two back-to-back NMIs.
1837 * 1837 *
1838 * If the first NMI handles both, the latter will be empty and daze 1838 * If the first NMI handles both, the latter will be empty and daze
1839 * the CPU. 1839 * the CPU.
@@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self,
1843 return NOTIFY_STOP; 1843 return NOTIFY_STOP;
1844} 1844}
1845 1845
1846static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 1846static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1847 .notifier_call = perf_counter_nmi_handler, 1847 .notifier_call = perf_event_nmi_handler,
1848 .next = NULL, 1848 .next = NULL,
1849 .priority = 1 1849 .priority = 1
1850}; 1850};
@@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = {
1854 .handle_irq = p6_pmu_handle_irq, 1854 .handle_irq = p6_pmu_handle_irq,
1855 .disable_all = p6_pmu_disable_all, 1855 .disable_all = p6_pmu_disable_all,
1856 .enable_all = p6_pmu_enable_all, 1856 .enable_all = p6_pmu_enable_all,
1857 .enable = p6_pmu_enable_counter, 1857 .enable = p6_pmu_enable_event,
1858 .disable = p6_pmu_disable_counter, 1858 .disable = p6_pmu_disable_event,
1859 .eventsel = MSR_P6_EVNTSEL0, 1859 .eventsel = MSR_P6_EVNTSEL0,
1860 .perfctr = MSR_P6_PERFCTR0, 1860 .perfctr = MSR_P6_PERFCTR0,
1861 .event_map = p6_pmu_event_map, 1861 .event_map = p6_pmu_event_map,
@@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = {
1864 .apic = 1, 1864 .apic = 1,
1865 .max_period = (1ULL << 31) - 1, 1865 .max_period = (1ULL << 31) - 1,
1866 .version = 0, 1866 .version = 0,
1867 .num_counters = 2, 1867 .num_events = 2,
1868 /* 1868 /*
1869 * Counters have 40 bits implemented. However they are designed such 1869 * Events have 40 bits implemented. However they are designed such
1870 * that bits [32-39] are sign extensions of bit 31. As such the 1870 * that bits [32-39] are sign extensions of bit 31. As such the
1871 * effective width of a counter for P6-like PMU is 32 bits only. 1871 * effective width of a event for P6-like PMU is 32 bits only.
1872 * 1872 *
1873 * See IA-32 Intel Architecture Software developer manual Vol 3B 1873 * See IA-32 Intel Architecture Software developer manual Vol 3B
1874 */ 1874 */
1875 .counter_bits = 32, 1875 .event_bits = 32,
1876 .counter_mask = (1ULL << 32) - 1, 1876 .event_mask = (1ULL << 32) - 1,
1877}; 1877};
1878 1878
1879static struct x86_pmu intel_pmu = { 1879static struct x86_pmu intel_pmu = {
@@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = {
1881 .handle_irq = intel_pmu_handle_irq, 1881 .handle_irq = intel_pmu_handle_irq,
1882 .disable_all = intel_pmu_disable_all, 1882 .disable_all = intel_pmu_disable_all,
1883 .enable_all = intel_pmu_enable_all, 1883 .enable_all = intel_pmu_enable_all,
1884 .enable = intel_pmu_enable_counter, 1884 .enable = intel_pmu_enable_event,
1885 .disable = intel_pmu_disable_counter, 1885 .disable = intel_pmu_disable_event,
1886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 1886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1888 .event_map = intel_pmu_event_map, 1888 .event_map = intel_pmu_event_map,
@@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = {
1892 /* 1892 /*
1893 * Intel PMCs cannot be accessed sanely above 32 bit width, 1893 * Intel PMCs cannot be accessed sanely above 32 bit width,
1894 * so we install an artificial 1<<31 period regardless of 1894 * so we install an artificial 1<<31 period regardless of
1895 * the generic counter period: 1895 * the generic event period:
1896 */ 1896 */
1897 .max_period = (1ULL << 31) - 1, 1897 .max_period = (1ULL << 31) - 1,
1898 .enable_bts = intel_pmu_enable_bts, 1898 .enable_bts = intel_pmu_enable_bts,
@@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = {
1904 .handle_irq = amd_pmu_handle_irq, 1904 .handle_irq = amd_pmu_handle_irq,
1905 .disable_all = amd_pmu_disable_all, 1905 .disable_all = amd_pmu_disable_all,
1906 .enable_all = amd_pmu_enable_all, 1906 .enable_all = amd_pmu_enable_all,
1907 .enable = amd_pmu_enable_counter, 1907 .enable = amd_pmu_enable_event,
1908 .disable = amd_pmu_disable_counter, 1908 .disable = amd_pmu_disable_event,
1909 .eventsel = MSR_K7_EVNTSEL0, 1909 .eventsel = MSR_K7_EVNTSEL0,
1910 .perfctr = MSR_K7_PERFCTR0, 1910 .perfctr = MSR_K7_PERFCTR0,
1911 .event_map = amd_pmu_event_map, 1911 .event_map = amd_pmu_event_map,
1912 .raw_event = amd_pmu_raw_event, 1912 .raw_event = amd_pmu_raw_event,
1913 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 1913 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
1914 .num_counters = 4, 1914 .num_events = 4,
1915 .counter_bits = 48, 1915 .event_bits = 48,
1916 .counter_mask = (1ULL << 48) - 1, 1916 .event_mask = (1ULL << 48) - 1,
1917 .apic = 1, 1917 .apic = 1,
1918 /* use highest bit to detect overflow */ 1918 /* use highest bit to detect overflow */
1919 .max_period = (1ULL << 47) - 1, 1919 .max_period = (1ULL << 47) - 1,
@@ -1982,15 +1982,15 @@ static int intel_pmu_init(void)
1982 1982
1983 x86_pmu = intel_pmu; 1983 x86_pmu = intel_pmu;
1984 x86_pmu.version = version; 1984 x86_pmu.version = version;
1985 x86_pmu.num_counters = eax.split.num_counters; 1985 x86_pmu.num_events = eax.split.num_events;
1986 x86_pmu.counter_bits = eax.split.bit_width; 1986 x86_pmu.event_bits = eax.split.bit_width;
1987 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; 1987 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
1988 1988
1989 /* 1989 /*
1990 * Quirk: v2 perfmon does not report fixed-purpose counters, so 1990 * Quirk: v2 perfmon does not report fixed-purpose events, so
1991 * assume at least 3 counters: 1991 * assume at least 3 events:
1992 */ 1992 */
1993 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1993 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
1994 1994
1995 /* 1995 /*
1996 * Install the hw-cache-events table: 1996 * Install the hw-cache-events table:
@@ -2037,11 +2037,11 @@ static int amd_pmu_init(void)
2037 return 0; 2037 return 0;
2038} 2038}
2039 2039
2040void __init init_hw_perf_counters(void) 2040void __init init_hw_perf_events(void)
2041{ 2041{
2042 int err; 2042 int err;
2043 2043
2044 pr_info("Performance Counters: "); 2044 pr_info("Performance Events: ");
2045 2045
2046 switch (boot_cpu_data.x86_vendor) { 2046 switch (boot_cpu_data.x86_vendor) {
2047 case X86_VENDOR_INTEL: 2047 case X86_VENDOR_INTEL:
@@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void)
2054 return; 2054 return;
2055 } 2055 }
2056 if (err != 0) { 2056 if (err != 0) {
2057 pr_cont("no PMU driver, software counters only.\n"); 2057 pr_cont("no PMU driver, software events only.\n");
2058 return; 2058 return;
2059 } 2059 }
2060 2060
2061 pr_cont("%s PMU driver.\n", x86_pmu.name); 2061 pr_cont("%s PMU driver.\n", x86_pmu.name);
2062 2062
2063 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 2063 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2064 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 2064 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2065 x86_pmu.num_counters, X86_PMC_MAX_GENERIC); 2065 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2066 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 2066 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2067 } 2067 }
2068 perf_counter_mask = (1 << x86_pmu.num_counters) - 1; 2068 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2069 perf_max_counters = x86_pmu.num_counters; 2069 perf_max_events = x86_pmu.num_events;
2070 2070
2071 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 2071 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2072 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 2072 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2073 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); 2073 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2074 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 2074 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2075 } 2075 }
2076 2076
2077 perf_counter_mask |= 2077 perf_event_mask |=
2078 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 2078 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2079 x86_pmu.intel_ctrl = perf_counter_mask; 2079 x86_pmu.intel_ctrl = perf_event_mask;
2080 2080
2081 perf_counters_lapic_init(); 2081 perf_events_lapic_init();
2082 register_die_notifier(&perf_counter_nmi_notifier); 2082 register_die_notifier(&perf_event_nmi_notifier);
2083 2083
2084 pr_info("... version: %d\n", x86_pmu.version); 2084 pr_info("... version: %d\n", x86_pmu.version);
2085 pr_info("... bit width: %d\n", x86_pmu.counter_bits); 2085 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2086 pr_info("... generic counters: %d\n", x86_pmu.num_counters); 2086 pr_info("... generic events: %d\n", x86_pmu.num_events);
2087 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); 2087 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2088 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 2088 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2089 pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); 2089 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2090 pr_info("... counter mask: %016Lx\n", perf_counter_mask); 2090 pr_info("... event mask: %016Lx\n", perf_event_mask);
2091} 2091}
2092 2092
2093static inline void x86_pmu_read(struct perf_counter *counter) 2093static inline void x86_pmu_read(struct perf_event *event)
2094{ 2094{
2095 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 2095 x86_perf_event_update(event, &event->hw, event->hw.idx);
2096} 2096}
2097 2097
2098static const struct pmu pmu = { 2098static const struct pmu pmu = {
@@ -2102,14 +2102,14 @@ static const struct pmu pmu = {
2102 .unthrottle = x86_pmu_unthrottle, 2102 .unthrottle = x86_pmu_unthrottle,
2103}; 2103};
2104 2104
2105const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 2105const struct pmu *hw_perf_event_init(struct perf_event *event)
2106{ 2106{
2107 int err; 2107 int err;
2108 2108
2109 err = __hw_perf_counter_init(counter); 2109 err = __hw_perf_event_init(event);
2110 if (err) { 2110 if (err) {
2111 if (counter->destroy) 2111 if (event->destroy)
2112 counter->destroy(counter); 2112 event->destroy(event);
2113 return ERR_PTR(err); 2113 return ERR_PTR(err);
2114 } 2114 }
2115 2115
@@ -2292,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2292 return entry; 2292 return entry;
2293} 2293}
2294 2294
2295void hw_perf_counter_setup_online(int cpu) 2295void hw_perf_event_setup_online(int cpu)
2296{ 2296{
2297 init_debug_store_on_cpu(cpu); 2297 init_debug_store_on_cpu(cpu);
2298} 2298}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 392bea43b890..fab786f60ed6 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -20,7 +20,7 @@
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/perf_counter.h> 23#include <asm/perf_event.h>
24 24
25struct nmi_watchdog_ctlblk { 25struct nmi_watchdog_ctlblk {
26 unsigned int cccr_msr; 26 unsigned int cccr_msr;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index d59fe323807e..681c3fda7391 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \
1021apicinterrupt SPURIOUS_APIC_VECTOR \ 1021apicinterrupt SPURIOUS_APIC_VECTOR \
1022 spurious_interrupt smp_spurious_interrupt 1022 spurious_interrupt smp_spurious_interrupt
1023 1023
1024#ifdef CONFIG_PERF_COUNTERS 1024#ifdef CONFIG_PERF_EVENTS
1025apicinterrupt LOCAL_PENDING_VECTOR \ 1025apicinterrupt LOCAL_PENDING_VECTOR \
1026 perf_pending_interrupt smp_perf_pending_interrupt 1026 perf_pending_interrupt smp_perf_pending_interrupt
1027#endif 1027#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 300883112e3d..40f30773fb29 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -208,7 +208,7 @@ static void __init apic_intr_init(void)
208 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 208 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
209 209
210 /* Performance monitoring interrupts: */ 210 /* Performance monitoring interrupts: */
211# ifdef CONFIG_PERF_COUNTERS 211# ifdef CONFIG_PERF_EVENTS
212 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); 212 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
213# endif 213# endif
214 214
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d51321ddafda..0157cd26d7cc 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -335,4 +335,4 @@ ENTRY(sys_call_table)
335 .long sys_preadv 335 .long sys_preadv
336 .long sys_pwritev 336 .long sys_pwritev
337 .long sys_rt_tgsigqueueinfo /* 335 */ 337 .long sys_rt_tgsigqueueinfo /* 335 */
338 .long sys_perf_counter_open 338 .long sys_perf_event_open