aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c (renamed from arch/powerpc/kernel/perf_counter.c)594
-rw-r--r--arch/powerpc/kernel/power4-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c2
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c2
-rw-r--r--arch/powerpc/kernel/time.c30
14 files changed, 330 insertions, 330 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 569f79ccd310..b23664a0b86c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
97 97
98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
100obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o 100obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o
101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ 101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
102 power5+-pmu.o power6-pmu.o power7-pmu.o 102 power5+-pmu.o power6-pmu.o power7-pmu.o
103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f0df285f0f87..0812b0f414bb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,7 @@ int main(void)
133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); 136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
138#ifdef CONFIG_PPC_MM_SLICES 138#ifdef CONFIG_PPC_MM_SLICES
139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 66bcda34a6bb..900e0eea0099 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5562: 5562:
557 TRACE_AND_RESTORE_IRQ(r5); 557 TRACE_AND_RESTORE_IRQ(r5);
558 558
559#ifdef CONFIG_PERF_COUNTERS 559#ifdef CONFIG_PERF_EVENTS
560 /* check paca->perf_counter_pending if we're enabling ints */ 560 /* check paca->perf_event_pending if we're enabling ints */
561 lbz r3,PACAPERFPEND(r13) 561 lbz r3,PACAPERFPEND(r13)
562 and. r3,r3,r5 562 and. r3,r3,r5
563 beq 27f 563 beq 27f
564 bl .perf_counter_do_pending 564 bl .perf_event_do_pending
56527: 56527:
566#endif /* CONFIG_PERF_COUNTERS */ 566#endif /* CONFIG_PERF_EVENTS */
567 567
568 /* extract EE bit and use it to restore paca->hard_enabled */ 568 /* extract EE bit and use it to restore paca->hard_enabled */
569 ld r3,_MSR(r1) 569 ld r3,_MSR(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f7f376ea7b17..e5d121177984 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,7 @@
53#include <linux/bootmem.h> 53#include <linux/bootmem.h>
54#include <linux/pci.h> 54#include <linux/pci.h>
55#include <linux/debugfs.h> 55#include <linux/debugfs.h>
56#include <linux/perf_counter.h> 56#include <linux/perf_event.h>
57 57
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <asm/system.h> 59#include <asm/system.h>
@@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en)
138 } 138 }
139#endif /* CONFIG_PPC_STD_MMU_64 */ 139#endif /* CONFIG_PPC_STD_MMU_64 */
140 140
141 if (test_perf_counter_pending()) { 141 if (test_perf_event_pending()) {
142 clear_perf_counter_pending(); 142 clear_perf_event_pending();
143 perf_counter_do_pending(); 143 perf_event_do_pending();
144 } 144 }
145 145
146 /* 146 /*
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index cc466d039af6..09d72028f317 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <asm/reg.h> 13#include <asm/reg.h>
14#include <asm/cputable.h> 14#include <asm/cputable.h>
15 15
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index f74b62c67511..0a03cf70d247 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -10,7 +10,7 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/perf_counter.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_event.c
index 5ccf9bca96c0..c98321fcb459 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter support - powerpc architecture code 2 * Performance event support - powerpc architecture code
3 * 3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * 5 *
@@ -10,7 +10,7 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/perf_counter.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/hardirq.h> 15#include <linux/hardirq.h>
16#include <asm/reg.h> 16#include <asm/reg.h>
@@ -19,35 +19,35 @@
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21 21
22struct cpu_hw_counters { 22struct cpu_hw_events {
23 int n_counters; 23 int n_events;
24 int n_percpu; 24 int n_percpu;
25 int disabled; 25 int disabled;
26 int n_added; 26 int n_added;
27 int n_limited; 27 int n_limited;
28 u8 pmcs_enabled; 28 u8 pmcs_enabled;
29 struct perf_counter *counter[MAX_HWCOUNTERS]; 29 struct perf_event *event[MAX_HWEVENTS];
30 u64 events[MAX_HWCOUNTERS]; 30 u64 events[MAX_HWEVENTS];
31 unsigned int flags[MAX_HWCOUNTERS]; 31 unsigned int flags[MAX_HWEVENTS];
32 unsigned long mmcr[3]; 32 unsigned long mmcr[3];
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; 33 struct perf_event *limited_event[MAX_LIMITED_HWEVENTS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; 34 u8 limited_hwidx[MAX_LIMITED_HWEVENTS];
35 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
38}; 38};
39DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); 39DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
40 40
41struct power_pmu *ppmu; 41struct power_pmu *ppmu;
42 42
43/* 43/*
44 * Normally, to ignore kernel events we set the FCS (freeze counters 44 * Normally, to ignore kernel events we set the FCS (freeze events
45 * in supervisor mode) bit in MMCR0, but if the kernel runs with the 45 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
46 * hypervisor bit set in the MSR, or if we are running on a processor 46 * hypervisor bit set in the MSR, or if we are running on a processor
47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors), 47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
48 * then we need to use the FCHV bit to ignore kernel events. 48 * then we need to use the FCHV bit to ignore kernel events.
49 */ 49 */
50static unsigned int freeze_counters_kernel = MMCR0_FCS; 50static unsigned int freeze_events_kernel = MMCR0_FCS;
51 51
52/* 52/*
53 * 32-bit doesn't have MMCRA but does have an MMCR2, 53 * 32-bit doesn't have MMCRA but does have an MMCR2,
@@ -122,14 +122,14 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
122 122
123 if (ppmu->flags & PPMU_ALT_SIPR) { 123 if (ppmu->flags & PPMU_ALT_SIPR) {
124 if (mmcra & POWER6_MMCRA_SIHV) 124 if (mmcra & POWER6_MMCRA_SIHV)
125 return PERF_EVENT_MISC_HYPERVISOR; 125 return PERF_RECORD_MISC_HYPERVISOR;
126 return (mmcra & POWER6_MMCRA_SIPR) ? 126 return (mmcra & POWER6_MMCRA_SIPR) ?
127 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; 127 PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
128 } 128 }
129 if (mmcra & MMCRA_SIHV) 129 if (mmcra & MMCRA_SIHV)
130 return PERF_EVENT_MISC_HYPERVISOR; 130 return PERF_RECORD_MISC_HYPERVISOR;
131 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : 131 return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
132 PERF_EVENT_MISC_KERNEL; 132 PERF_RECORD_MISC_KERNEL;
133} 133}
134 134
135/* 135/*
@@ -152,14 +152,14 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
152 152
153#endif /* CONFIG_PPC64 */ 153#endif /* CONFIG_PPC64 */
154 154
155static void perf_counter_interrupt(struct pt_regs *regs); 155static void perf_event_interrupt(struct pt_regs *regs);
156 156
157void perf_counter_print_debug(void) 157void perf_event_print_debug(void)
158{ 158{
159} 159}
160 160
161/* 161/*
162 * Read one performance monitor counter (PMC). 162 * Read one performance monitor event (PMC).
163 */ 163 */
164static unsigned long read_pmc(int idx) 164static unsigned long read_pmc(int idx)
165{ 165{
@@ -240,31 +240,31 @@ static void write_pmc(int idx, unsigned long val)
240 * Check if a set of events can all go on the PMU at once. 240 * Check if a set of events can all go on the PMU at once.
241 * If they can't, this will look at alternative codes for the events 241 * If they can't, this will look at alternative codes for the events
242 * and see if any combination of alternative codes is feasible. 242 * and see if any combination of alternative codes is feasible.
243 * The feasible set is returned in event[]. 243 * The feasible set is returned in event_id[].
244 */ 244 */
245static int power_check_constraints(struct cpu_hw_counters *cpuhw, 245static int power_check_constraints(struct cpu_hw_events *cpuhw,
246 u64 event[], unsigned int cflags[], 246 u64 event_id[], unsigned int cflags[],
247 int n_ev) 247 int n_ev)
248{ 248{
249 unsigned long mask, value, nv; 249 unsigned long mask, value, nv;
250 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; 250 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
251 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; 251 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
252 int i, j; 252 int i, j;
253 unsigned long addf = ppmu->add_fields; 253 unsigned long addf = ppmu->add_fields;
254 unsigned long tadd = ppmu->test_adder; 254 unsigned long tadd = ppmu->test_adder;
255 255
256 if (n_ev > ppmu->n_counter) 256 if (n_ev > ppmu->n_event)
257 return -1; 257 return -1;
258 258
259 /* First see if the events will go on as-is */ 259 /* First see if the events will go on as-is */
260 for (i = 0; i < n_ev; ++i) { 260 for (i = 0; i < n_ev; ++i) {
261 if ((cflags[i] & PPMU_LIMITED_PMC_REQD) 261 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
262 && !ppmu->limited_pmc_event(event[i])) { 262 && !ppmu->limited_pmc_event(event_id[i])) {
263 ppmu->get_alternatives(event[i], cflags[i], 263 ppmu->get_alternatives(event_id[i], cflags[i],
264 cpuhw->alternatives[i]); 264 cpuhw->alternatives[i]);
265 event[i] = cpuhw->alternatives[i][0]; 265 event_id[i] = cpuhw->alternatives[i][0];
266 } 266 }
267 if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0], 267 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
268 &cpuhw->avalues[i][0])) 268 &cpuhw->avalues[i][0]))
269 return -1; 269 return -1;
270 } 270 }
@@ -287,7 +287,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
287 return -1; 287 return -1;
288 for (i = 0; i < n_ev; ++i) { 288 for (i = 0; i < n_ev; ++i) {
289 choice[i] = 0; 289 choice[i] = 0;
290 n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], 290 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
291 cpuhw->alternatives[i]); 291 cpuhw->alternatives[i]);
292 for (j = 1; j < n_alt[i]; ++j) 292 for (j = 1; j < n_alt[i]; ++j)
293 ppmu->get_constraint(cpuhw->alternatives[i][j], 293 ppmu->get_constraint(cpuhw->alternatives[i][j],
@@ -307,7 +307,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
307 j = choice[i]; 307 j = choice[i];
308 } 308 }
309 /* 309 /*
310 * See if any alternative k for event i, 310 * See if any alternative k for event_id i,
311 * where k > j, will satisfy the constraints. 311 * where k > j, will satisfy the constraints.
312 */ 312 */
313 while (++j < n_alt[i]) { 313 while (++j < n_alt[i]) {
@@ -321,16 +321,16 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
321 if (j >= n_alt[i]) { 321 if (j >= n_alt[i]) {
322 /* 322 /*
323 * No feasible alternative, backtrack 323 * No feasible alternative, backtrack
324 * to event i-1 and continue enumerating its 324 * to event_id i-1 and continue enumerating its
325 * alternatives from where we got up to. 325 * alternatives from where we got up to.
326 */ 326 */
327 if (--i < 0) 327 if (--i < 0)
328 return -1; 328 return -1;
329 } else { 329 } else {
330 /* 330 /*
331 * Found a feasible alternative for event i, 331 * Found a feasible alternative for event_id i,
332 * remember where we got up to with this event, 332 * remember where we got up to with this event_id,
333 * go on to the next event, and start with 333 * go on to the next event_id, and start with
334 * the first alternative for it. 334 * the first alternative for it.
335 */ 335 */
336 choice[i] = j; 336 choice[i] = j;
@@ -345,21 +345,21 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
345 345
346 /* OK, we have a feasible combination, tell the caller the solution */ 346 /* OK, we have a feasible combination, tell the caller the solution */
347 for (i = 0; i < n_ev; ++i) 347 for (i = 0; i < n_ev; ++i)
348 event[i] = cpuhw->alternatives[i][choice[i]]; 348 event_id[i] = cpuhw->alternatives[i][choice[i]];
349 return 0; 349 return 0;
350} 350}
351 351
352/* 352/*
353 * Check if newly-added counters have consistent settings for 353 * Check if newly-added events have consistent settings for
354 * exclude_{user,kernel,hv} with each other and any previously 354 * exclude_{user,kernel,hv} with each other and any previously
355 * added counters. 355 * added events.
356 */ 356 */
357static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], 357static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
358 int n_prev, int n_new) 358 int n_prev, int n_new)
359{ 359{
360 int eu = 0, ek = 0, eh = 0; 360 int eu = 0, ek = 0, eh = 0;
361 int i, n, first; 361 int i, n, first;
362 struct perf_counter *counter; 362 struct perf_event *event;
363 363
364 n = n_prev + n_new; 364 n = n_prev + n_new;
365 if (n <= 1) 365 if (n <= 1)
@@ -371,15 +371,15 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
371 cflags[i] &= ~PPMU_LIMITED_PMC_REQD; 371 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
372 continue; 372 continue;
373 } 373 }
374 counter = ctrs[i]; 374 event = ctrs[i];
375 if (first) { 375 if (first) {
376 eu = counter->attr.exclude_user; 376 eu = event->attr.exclude_user;
377 ek = counter->attr.exclude_kernel; 377 ek = event->attr.exclude_kernel;
378 eh = counter->attr.exclude_hv; 378 eh = event->attr.exclude_hv;
379 first = 0; 379 first = 0;
380 } else if (counter->attr.exclude_user != eu || 380 } else if (event->attr.exclude_user != eu ||
381 counter->attr.exclude_kernel != ek || 381 event->attr.exclude_kernel != ek ||
382 counter->attr.exclude_hv != eh) { 382 event->attr.exclude_hv != eh) {
383 return -EAGAIN; 383 return -EAGAIN;
384 } 384 }
385 } 385 }
@@ -392,11 +392,11 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
392 return 0; 392 return 0;
393} 393}
394 394
395static void power_pmu_read(struct perf_counter *counter) 395static void power_pmu_read(struct perf_event *event)
396{ 396{
397 s64 val, delta, prev; 397 s64 val, delta, prev;
398 398
399 if (!counter->hw.idx) 399 if (!event->hw.idx)
400 return; 400 return;
401 /* 401 /*
402 * Performance monitor interrupts come even when interrupts 402 * Performance monitor interrupts come even when interrupts
@@ -404,21 +404,21 @@ static void power_pmu_read(struct perf_counter *counter)
404 * Therefore we treat them like NMIs. 404 * Therefore we treat them like NMIs.
405 */ 405 */
406 do { 406 do {
407 prev = atomic64_read(&counter->hw.prev_count); 407 prev = atomic64_read(&event->hw.prev_count);
408 barrier(); 408 barrier();
409 val = read_pmc(counter->hw.idx); 409 val = read_pmc(event->hw.idx);
410 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); 410 } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
411 411
412 /* The counters are only 32 bits wide */ 412 /* The events are only 32 bits wide */
413 delta = (val - prev) & 0xfffffffful; 413 delta = (val - prev) & 0xfffffffful;
414 atomic64_add(delta, &counter->count); 414 atomic64_add(delta, &event->count);
415 atomic64_sub(delta, &counter->hw.period_left); 415 atomic64_sub(delta, &event->hw.period_left);
416} 416}
417 417
418/* 418/*
419 * On some machines, PMC5 and PMC6 can't be written, don't respect 419 * On some machines, PMC5 and PMC6 can't be written, don't respect
420 * the freeze conditions, and don't generate interrupts. This tells 420 * the freeze conditions, and don't generate interrupts. This tells
421 * us if `counter' is using such a PMC. 421 * us if `event' is using such a PMC.
422 */ 422 */
423static int is_limited_pmc(int pmcnum) 423static int is_limited_pmc(int pmcnum)
424{ 424{
@@ -426,53 +426,53 @@ static int is_limited_pmc(int pmcnum)
426 && (pmcnum == 5 || pmcnum == 6); 426 && (pmcnum == 5 || pmcnum == 6);
427} 427}
428 428
429static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, 429static void freeze_limited_events(struct cpu_hw_events *cpuhw,
430 unsigned long pmc5, unsigned long pmc6) 430 unsigned long pmc5, unsigned long pmc6)
431{ 431{
432 struct perf_counter *counter; 432 struct perf_event *event;
433 u64 val, prev, delta; 433 u64 val, prev, delta;
434 int i; 434 int i;
435 435
436 for (i = 0; i < cpuhw->n_limited; ++i) { 436 for (i = 0; i < cpuhw->n_limited; ++i) {
437 counter = cpuhw->limited_counter[i]; 437 event = cpuhw->limited_event[i];
438 if (!counter->hw.idx) 438 if (!event->hw.idx)
439 continue; 439 continue;
440 val = (counter->hw.idx == 5) ? pmc5 : pmc6; 440 val = (event->hw.idx == 5) ? pmc5 : pmc6;
441 prev = atomic64_read(&counter->hw.prev_count); 441 prev = atomic64_read(&event->hw.prev_count);
442 counter->hw.idx = 0; 442 event->hw.idx = 0;
443 delta = (val - prev) & 0xfffffffful; 443 delta = (val - prev) & 0xfffffffful;
444 atomic64_add(delta, &counter->count); 444 atomic64_add(delta, &event->count);
445 } 445 }
446} 446}
447 447
448static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, 448static void thaw_limited_events(struct cpu_hw_events *cpuhw,
449 unsigned long pmc5, unsigned long pmc6) 449 unsigned long pmc5, unsigned long pmc6)
450{ 450{
451 struct perf_counter *counter; 451 struct perf_event *event;
452 u64 val; 452 u64 val;
453 int i; 453 int i;
454 454
455 for (i = 0; i < cpuhw->n_limited; ++i) { 455 for (i = 0; i < cpuhw->n_limited; ++i) {
456 counter = cpuhw->limited_counter[i]; 456 event = cpuhw->limited_event[i];
457 counter->hw.idx = cpuhw->limited_hwidx[i]; 457 event->hw.idx = cpuhw->limited_hwidx[i];
458 val = (counter->hw.idx == 5) ? pmc5 : pmc6; 458 val = (event->hw.idx == 5) ? pmc5 : pmc6;
459 atomic64_set(&counter->hw.prev_count, val); 459 atomic64_set(&event->hw.prev_count, val);
460 perf_counter_update_userpage(counter); 460 perf_event_update_userpage(event);
461 } 461 }
462} 462}
463 463
464/* 464/*
465 * Since limited counters don't respect the freeze conditions, we 465 * Since limited events don't respect the freeze conditions, we
466 * have to read them immediately after freezing or unfreezing the 466 * have to read them immediately after freezing or unfreezing the
467 * other counters. We try to keep the values from the limited 467 * other events. We try to keep the values from the limited
468 * counters as consistent as possible by keeping the delay (in 468 * events as consistent as possible by keeping the delay (in
469 * cycles and instructions) between freezing/unfreezing and reading 469 * cycles and instructions) between freezing/unfreezing and reading
470 * the limited counters as small and consistent as possible. 470 * the limited events as small and consistent as possible.
471 * Therefore, if any limited counters are in use, we read them 471 * Therefore, if any limited events are in use, we read them
472 * both, and always in the same order, to minimize variability, 472 * both, and always in the same order, to minimize variability,
473 * and do it inside the same asm that writes MMCR0. 473 * and do it inside the same asm that writes MMCR0.
474 */ 474 */
475static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) 475static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
476{ 476{
477 unsigned long pmc5, pmc6; 477 unsigned long pmc5, pmc6;
478 478
@@ -485,7 +485,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
485 * Write MMCR0, then read PMC5 and PMC6 immediately. 485 * Write MMCR0, then read PMC5 and PMC6 immediately.
486 * To ensure we don't get a performance monitor interrupt 486 * To ensure we don't get a performance monitor interrupt
487 * between writing MMCR0 and freezing/thawing the limited 487 * between writing MMCR0 and freezing/thawing the limited
488 * counters, we first write MMCR0 with the counter overflow 488 * events, we first write MMCR0 with the event overflow
489 * interrupt enable bits turned off. 489 * interrupt enable bits turned off.
490 */ 490 */
491 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" 491 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
@@ -495,12 +495,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
495 "i" (SPRN_PMC5), "i" (SPRN_PMC6)); 495 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
496 496
497 if (mmcr0 & MMCR0_FC) 497 if (mmcr0 & MMCR0_FC)
498 freeze_limited_counters(cpuhw, pmc5, pmc6); 498 freeze_limited_events(cpuhw, pmc5, pmc6);
499 else 499 else
500 thaw_limited_counters(cpuhw, pmc5, pmc6); 500 thaw_limited_events(cpuhw, pmc5, pmc6);
501 501
502 /* 502 /*
503 * Write the full MMCR0 including the counter overflow interrupt 503 * Write the full MMCR0 including the event overflow interrupt
504 * enable bits, if necessary. 504 * enable bits, if necessary.
505 */ 505 */
506 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) 506 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -508,18 +508,18 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
508} 508}
509 509
510/* 510/*
511 * Disable all counters to prevent PMU interrupts and to allow 511 * Disable all events to prevent PMU interrupts and to allow
512 * counters to be added or removed. 512 * events to be added or removed.
513 */ 513 */
514void hw_perf_disable(void) 514void hw_perf_disable(void)
515{ 515{
516 struct cpu_hw_counters *cpuhw; 516 struct cpu_hw_events *cpuhw;
517 unsigned long flags; 517 unsigned long flags;
518 518
519 if (!ppmu) 519 if (!ppmu)
520 return; 520 return;
521 local_irq_save(flags); 521 local_irq_save(flags);
522 cpuhw = &__get_cpu_var(cpu_hw_counters); 522 cpuhw = &__get_cpu_var(cpu_hw_events);
523 523
524 if (!cpuhw->disabled) { 524 if (!cpuhw->disabled) {
525 cpuhw->disabled = 1; 525 cpuhw->disabled = 1;
@@ -543,9 +543,9 @@ void hw_perf_disable(void)
543 } 543 }
544 544
545 /* 545 /*
546 * Set the 'freeze counters' bit. 546 * Set the 'freeze events' bit.
547 * The barrier is to make sure the mtspr has been 547 * The barrier is to make sure the mtspr has been
548 * executed and the PMU has frozen the counters 548 * executed and the PMU has frozen the events
549 * before we return. 549 * before we return.
550 */ 550 */
551 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); 551 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
@@ -555,26 +555,26 @@ void hw_perf_disable(void)
555} 555}
556 556
557/* 557/*
558 * Re-enable all counters if disable == 0. 558 * Re-enable all events if disable == 0.
559 * If we were previously disabled and counters were added, then 559 * If we were previously disabled and events were added, then
560 * put the new config on the PMU. 560 * put the new config on the PMU.
561 */ 561 */
562void hw_perf_enable(void) 562void hw_perf_enable(void)
563{ 563{
564 struct perf_counter *counter; 564 struct perf_event *event;
565 struct cpu_hw_counters *cpuhw; 565 struct cpu_hw_events *cpuhw;
566 unsigned long flags; 566 unsigned long flags;
567 long i; 567 long i;
568 unsigned long val; 568 unsigned long val;
569 s64 left; 569 s64 left;
570 unsigned int hwc_index[MAX_HWCOUNTERS]; 570 unsigned int hwc_index[MAX_HWEVENTS];
571 int n_lim; 571 int n_lim;
572 int idx; 572 int idx;
573 573
574 if (!ppmu) 574 if (!ppmu)
575 return; 575 return;
576 local_irq_save(flags); 576 local_irq_save(flags);
577 cpuhw = &__get_cpu_var(cpu_hw_counters); 577 cpuhw = &__get_cpu_var(cpu_hw_events);
578 if (!cpuhw->disabled) { 578 if (!cpuhw->disabled) {
579 local_irq_restore(flags); 579 local_irq_restore(flags);
580 return; 580 return;
@@ -582,23 +582,23 @@ void hw_perf_enable(void)
582 cpuhw->disabled = 0; 582 cpuhw->disabled = 0;
583 583
584 /* 584 /*
585 * If we didn't change anything, or only removed counters, 585 * If we didn't change anything, or only removed events,
586 * no need to recalculate MMCR* settings and reset the PMCs. 586 * no need to recalculate MMCR* settings and reset the PMCs.
587 * Just reenable the PMU with the current MMCR* settings 587 * Just reenable the PMU with the current MMCR* settings
588 * (possibly updated for removal of counters). 588 * (possibly updated for removal of events).
589 */ 589 */
590 if (!cpuhw->n_added) { 590 if (!cpuhw->n_added) {
591 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 591 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
592 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 592 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
593 if (cpuhw->n_counters == 0) 593 if (cpuhw->n_events == 0)
594 ppc_set_pmu_inuse(0); 594 ppc_set_pmu_inuse(0);
595 goto out_enable; 595 goto out_enable;
596 } 596 }
597 597
598 /* 598 /*
599 * Compute MMCR* values for the new set of counters 599 * Compute MMCR* values for the new set of events
600 */ 600 */
601 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, 601 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
602 cpuhw->mmcr)) { 602 cpuhw->mmcr)) {
603 /* shouldn't ever get here */ 603 /* shouldn't ever get here */
604 printk(KERN_ERR "oops compute_mmcr failed\n"); 604 printk(KERN_ERR "oops compute_mmcr failed\n");
@@ -607,22 +607,22 @@ void hw_perf_enable(void)
607 607
608 /* 608 /*
609 * Add in MMCR0 freeze bits corresponding to the 609 * Add in MMCR0 freeze bits corresponding to the
610 * attr.exclude_* bits for the first counter. 610 * attr.exclude_* bits for the first event.
611 * We have already checked that all counters have the 611 * We have already checked that all events have the
612 * same values for these bits as the first counter. 612 * same values for these bits as the first event.
613 */ 613 */
614 counter = cpuhw->counter[0]; 614 event = cpuhw->event[0];
615 if (counter->attr.exclude_user) 615 if (event->attr.exclude_user)
616 cpuhw->mmcr[0] |= MMCR0_FCP; 616 cpuhw->mmcr[0] |= MMCR0_FCP;
617 if (counter->attr.exclude_kernel) 617 if (event->attr.exclude_kernel)
618 cpuhw->mmcr[0] |= freeze_counters_kernel; 618 cpuhw->mmcr[0] |= freeze_events_kernel;
619 if (counter->attr.exclude_hv) 619 if (event->attr.exclude_hv)
620 cpuhw->mmcr[0] |= MMCR0_FCHV; 620 cpuhw->mmcr[0] |= MMCR0_FCHV;
621 621
622 /* 622 /*
623 * Write the new configuration to MMCR* with the freeze 623 * Write the new configuration to MMCR* with the freeze
624 * bit set and set the hardware counters to their initial values. 624 * bit set and set the hardware events to their initial values.
625 * Then unfreeze the counters. 625 * Then unfreeze the events.
626 */ 626 */
627 ppc_set_pmu_inuse(1); 627 ppc_set_pmu_inuse(1);
628 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 628 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
@@ -631,43 +631,43 @@ void hw_perf_enable(void)
631 | MMCR0_FC); 631 | MMCR0_FC);
632 632
633 /* 633 /*
634 * Read off any pre-existing counters that need to move 634 * Read off any pre-existing events that need to move
635 * to another PMC. 635 * to another PMC.
636 */ 636 */
637 for (i = 0; i < cpuhw->n_counters; ++i) { 637 for (i = 0; i < cpuhw->n_events; ++i) {
638 counter = cpuhw->counter[i]; 638 event = cpuhw->event[i];
639 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { 639 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
640 power_pmu_read(counter); 640 power_pmu_read(event);
641 write_pmc(counter->hw.idx, 0); 641 write_pmc(event->hw.idx, 0);
642 counter->hw.idx = 0; 642 event->hw.idx = 0;
643 } 643 }
644 } 644 }
645 645
646 /* 646 /*
647 * Initialize the PMCs for all the new and moved counters. 647 * Initialize the PMCs for all the new and moved events.
648 */ 648 */
649 cpuhw->n_limited = n_lim = 0; 649 cpuhw->n_limited = n_lim = 0;
650 for (i = 0; i < cpuhw->n_counters; ++i) { 650 for (i = 0; i < cpuhw->n_events; ++i) {
651 counter = cpuhw->counter[i]; 651 event = cpuhw->event[i];
652 if (counter->hw.idx) 652 if (event->hw.idx)
653 continue; 653 continue;
654 idx = hwc_index[i] + 1; 654 idx = hwc_index[i] + 1;
655 if (is_limited_pmc(idx)) { 655 if (is_limited_pmc(idx)) {
656 cpuhw->limited_counter[n_lim] = counter; 656 cpuhw->limited_event[n_lim] = event;
657 cpuhw->limited_hwidx[n_lim] = idx; 657 cpuhw->limited_hwidx[n_lim] = idx;
658 ++n_lim; 658 ++n_lim;
659 continue; 659 continue;
660 } 660 }
661 val = 0; 661 val = 0;
662 if (counter->hw.sample_period) { 662 if (event->hw.sample_period) {
663 left = atomic64_read(&counter->hw.period_left); 663 left = atomic64_read(&event->hw.period_left);
664 if (left < 0x80000000L) 664 if (left < 0x80000000L)
665 val = 0x80000000L - left; 665 val = 0x80000000L - left;
666 } 666 }
667 atomic64_set(&counter->hw.prev_count, val); 667 atomic64_set(&event->hw.prev_count, val);
668 counter->hw.idx = idx; 668 event->hw.idx = idx;
669 write_pmc(idx, val); 669 write_pmc(idx, val);
670 perf_counter_update_userpage(counter); 670 perf_event_update_userpage(event);
671 } 671 }
672 cpuhw->n_limited = n_lim; 672 cpuhw->n_limited = n_lim;
673 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; 673 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
@@ -688,85 +688,85 @@ void hw_perf_enable(void)
688 local_irq_restore(flags); 688 local_irq_restore(flags);
689} 689}
690 690
691static int collect_events(struct perf_counter *group, int max_count, 691static int collect_events(struct perf_event *group, int max_count,
692 struct perf_counter *ctrs[], u64 *events, 692 struct perf_event *ctrs[], u64 *events,
693 unsigned int *flags) 693 unsigned int *flags)
694{ 694{
695 int n = 0; 695 int n = 0;
696 struct perf_counter *counter; 696 struct perf_event *event;
697 697
698 if (!is_software_counter(group)) { 698 if (!is_software_event(group)) {
699 if (n >= max_count) 699 if (n >= max_count)
700 return -1; 700 return -1;
701 ctrs[n] = group; 701 ctrs[n] = group;
702 flags[n] = group->hw.counter_base; 702 flags[n] = group->hw.event_base;
703 events[n++] = group->hw.config; 703 events[n++] = group->hw.config;
704 } 704 }
705 list_for_each_entry(counter, &group->sibling_list, list_entry) { 705 list_for_each_entry(event, &group->sibling_list, list_entry) {
706 if (!is_software_counter(counter) && 706 if (!is_software_event(event) &&
707 counter->state != PERF_COUNTER_STATE_OFF) { 707 event->state != PERF_EVENT_STATE_OFF) {
708 if (n >= max_count) 708 if (n >= max_count)
709 return -1; 709 return -1;
710 ctrs[n] = counter; 710 ctrs[n] = event;
711 flags[n] = counter->hw.counter_base; 711 flags[n] = event->hw.event_base;
712 events[n++] = counter->hw.config; 712 events[n++] = event->hw.config;
713 } 713 }
714 } 714 }
715 return n; 715 return n;
716} 716}
717 717
718static void counter_sched_in(struct perf_counter *counter, int cpu) 718static void event_sched_in(struct perf_event *event, int cpu)
719{ 719{
720 counter->state = PERF_COUNTER_STATE_ACTIVE; 720 event->state = PERF_EVENT_STATE_ACTIVE;
721 counter->oncpu = cpu; 721 event->oncpu = cpu;
722 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; 722 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
723 if (is_software_counter(counter)) 723 if (is_software_event(event))
724 counter->pmu->enable(counter); 724 event->pmu->enable(event);
725} 725}
726 726
727/* 727/*
728 * Called to enable a whole group of counters. 728 * Called to enable a whole group of events.
729 * Returns 1 if the group was enabled, or -EAGAIN if it could not be. 729 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
730 * Assumes the caller has disabled interrupts and has 730 * Assumes the caller has disabled interrupts and has
731 * frozen the PMU with hw_perf_save_disable. 731 * frozen the PMU with hw_perf_save_disable.
732 */ 732 */
733int hw_perf_group_sched_in(struct perf_counter *group_leader, 733int hw_perf_group_sched_in(struct perf_event *group_leader,
734 struct perf_cpu_context *cpuctx, 734 struct perf_cpu_context *cpuctx,
735 struct perf_counter_context *ctx, int cpu) 735 struct perf_event_context *ctx, int cpu)
736{ 736{
737 struct cpu_hw_counters *cpuhw; 737 struct cpu_hw_events *cpuhw;
738 long i, n, n0; 738 long i, n, n0;
739 struct perf_counter *sub; 739 struct perf_event *sub;
740 740
741 if (!ppmu) 741 if (!ppmu)
742 return 0; 742 return 0;
743 cpuhw = &__get_cpu_var(cpu_hw_counters); 743 cpuhw = &__get_cpu_var(cpu_hw_events);
744 n0 = cpuhw->n_counters; 744 n0 = cpuhw->n_events;
745 n = collect_events(group_leader, ppmu->n_counter - n0, 745 n = collect_events(group_leader, ppmu->n_event - n0,
746 &cpuhw->counter[n0], &cpuhw->events[n0], 746 &cpuhw->event[n0], &cpuhw->events[n0],
747 &cpuhw->flags[n0]); 747 &cpuhw->flags[n0]);
748 if (n < 0) 748 if (n < 0)
749 return -EAGAIN; 749 return -EAGAIN;
750 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) 750 if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
751 return -EAGAIN; 751 return -EAGAIN;
752 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); 752 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
753 if (i < 0) 753 if (i < 0)
754 return -EAGAIN; 754 return -EAGAIN;
755 cpuhw->n_counters = n0 + n; 755 cpuhw->n_events = n0 + n;
756 cpuhw->n_added += n; 756 cpuhw->n_added += n;
757 757
758 /* 758 /*
759 * OK, this group can go on; update counter states etc., 759 * OK, this group can go on; update event states etc.,
760 * and enable any software counters 760 * and enable any software events
761 */ 761 */
762 for (i = n0; i < n0 + n; ++i) 762 for (i = n0; i < n0 + n; ++i)
763 cpuhw->counter[i]->hw.config = cpuhw->events[i]; 763 cpuhw->event[i]->hw.config = cpuhw->events[i];
764 cpuctx->active_oncpu += n; 764 cpuctx->active_oncpu += n;
765 n = 1; 765 n = 1;
766 counter_sched_in(group_leader, cpu); 766 event_sched_in(group_leader, cpu);
767 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { 767 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
768 if (sub->state != PERF_COUNTER_STATE_OFF) { 768 if (sub->state != PERF_EVENT_STATE_OFF) {
769 counter_sched_in(sub, cpu); 769 event_sched_in(sub, cpu);
770 ++n; 770 ++n;
771 } 771 }
772 } 772 }
@@ -776,14 +776,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
776} 776}
777 777
778/* 778/*
779 * Add a counter to the PMU. 779 * Add a event to the PMU.
780 * If all counters are not already frozen, then we disable and 780 * If all events are not already frozen, then we disable and
781 * re-enable the PMU in order to get hw_perf_enable to do the 781 * re-enable the PMU in order to get hw_perf_enable to do the
782 * actual work of reconfiguring the PMU. 782 * actual work of reconfiguring the PMU.
783 */ 783 */
784static int power_pmu_enable(struct perf_counter *counter) 784static int power_pmu_enable(struct perf_event *event)
785{ 785{
786 struct cpu_hw_counters *cpuhw; 786 struct cpu_hw_events *cpuhw;
787 unsigned long flags; 787 unsigned long flags;
788 int n0; 788 int n0;
789 int ret = -EAGAIN; 789 int ret = -EAGAIN;
@@ -792,23 +792,23 @@ static int power_pmu_enable(struct perf_counter *counter)
792 perf_disable(); 792 perf_disable();
793 793
794 /* 794 /*
795 * Add the counter to the list (if there is room) 795 * Add the event to the list (if there is room)
796 * and check whether the total set is still feasible. 796 * and check whether the total set is still feasible.
797 */ 797 */
798 cpuhw = &__get_cpu_var(cpu_hw_counters); 798 cpuhw = &__get_cpu_var(cpu_hw_events);
799 n0 = cpuhw->n_counters; 799 n0 = cpuhw->n_events;
800 if (n0 >= ppmu->n_counter) 800 if (n0 >= ppmu->n_event)
801 goto out; 801 goto out;
802 cpuhw->counter[n0] = counter; 802 cpuhw->event[n0] = event;
803 cpuhw->events[n0] = counter->hw.config; 803 cpuhw->events[n0] = event->hw.config;
804 cpuhw->flags[n0] = counter->hw.counter_base; 804 cpuhw->flags[n0] = event->hw.event_base;
805 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) 805 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
806 goto out; 806 goto out;
807 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) 807 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
808 goto out; 808 goto out;
809 809
810 counter->hw.config = cpuhw->events[n0]; 810 event->hw.config = cpuhw->events[n0];
811 ++cpuhw->n_counters; 811 ++cpuhw->n_events;
812 ++cpuhw->n_added; 812 ++cpuhw->n_added;
813 813
814 ret = 0; 814 ret = 0;
@@ -819,46 +819,46 @@ static int power_pmu_enable(struct perf_counter *counter)
819} 819}
820 820
821/* 821/*
822 * Remove a counter from the PMU. 822 * Remove a event from the PMU.
823 */ 823 */
824static void power_pmu_disable(struct perf_counter *counter) 824static void power_pmu_disable(struct perf_event *event)
825{ 825{
826 struct cpu_hw_counters *cpuhw; 826 struct cpu_hw_events *cpuhw;
827 long i; 827 long i;
828 unsigned long flags; 828 unsigned long flags;
829 829
830 local_irq_save(flags); 830 local_irq_save(flags);
831 perf_disable(); 831 perf_disable();
832 832
833 power_pmu_read(counter); 833 power_pmu_read(event);
834 834
835 cpuhw = &__get_cpu_var(cpu_hw_counters); 835 cpuhw = &__get_cpu_var(cpu_hw_events);
836 for (i = 0; i < cpuhw->n_counters; ++i) { 836 for (i = 0; i < cpuhw->n_events; ++i) {
837 if (counter == cpuhw->counter[i]) { 837 if (event == cpuhw->event[i]) {
838 while (++i < cpuhw->n_counters) 838 while (++i < cpuhw->n_events)
839 cpuhw->counter[i-1] = cpuhw->counter[i]; 839 cpuhw->event[i-1] = cpuhw->event[i];
840 --cpuhw->n_counters; 840 --cpuhw->n_events;
841 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); 841 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
842 if (counter->hw.idx) { 842 if (event->hw.idx) {
843 write_pmc(counter->hw.idx, 0); 843 write_pmc(event->hw.idx, 0);
844 counter->hw.idx = 0; 844 event->hw.idx = 0;
845 } 845 }
846 perf_counter_update_userpage(counter); 846 perf_event_update_userpage(event);
847 break; 847 break;
848 } 848 }
849 } 849 }
850 for (i = 0; i < cpuhw->n_limited; ++i) 850 for (i = 0; i < cpuhw->n_limited; ++i)
851 if (counter == cpuhw->limited_counter[i]) 851 if (event == cpuhw->limited_event[i])
852 break; 852 break;
853 if (i < cpuhw->n_limited) { 853 if (i < cpuhw->n_limited) {
854 while (++i < cpuhw->n_limited) { 854 while (++i < cpuhw->n_limited) {
855 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; 855 cpuhw->limited_event[i-1] = cpuhw->limited_event[i];
856 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; 856 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
857 } 857 }
858 --cpuhw->n_limited; 858 --cpuhw->n_limited;
859 } 859 }
860 if (cpuhw->n_counters == 0) { 860 if (cpuhw->n_events == 0) {
861 /* disable exceptions if no counters are running */ 861 /* disable exceptions if no events are running */
862 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); 862 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
863 } 863 }
864 864
@@ -867,28 +867,28 @@ static void power_pmu_disable(struct perf_counter *counter)
867} 867}
868 868
869/* 869/*
870 * Re-enable interrupts on a counter after they were throttled 870 * Re-enable interrupts on a event after they were throttled
871 * because they were coming too fast. 871 * because they were coming too fast.
872 */ 872 */
873static void power_pmu_unthrottle(struct perf_counter *counter) 873static void power_pmu_unthrottle(struct perf_event *event)
874{ 874{
875 s64 val, left; 875 s64 val, left;
876 unsigned long flags; 876 unsigned long flags;
877 877
878 if (!counter->hw.idx || !counter->hw.sample_period) 878 if (!event->hw.idx || !event->hw.sample_period)
879 return; 879 return;
880 local_irq_save(flags); 880 local_irq_save(flags);
881 perf_disable(); 881 perf_disable();
882 power_pmu_read(counter); 882 power_pmu_read(event);
883 left = counter->hw.sample_period; 883 left = event->hw.sample_period;
884 counter->hw.last_period = left; 884 event->hw.last_period = left;
885 val = 0; 885 val = 0;
886 if (left < 0x80000000L) 886 if (left < 0x80000000L)
887 val = 0x80000000L - left; 887 val = 0x80000000L - left;
888 write_pmc(counter->hw.idx, val); 888 write_pmc(event->hw.idx, val);
889 atomic64_set(&counter->hw.prev_count, val); 889 atomic64_set(&event->hw.prev_count, val);
890 atomic64_set(&counter->hw.period_left, left); 890 atomic64_set(&event->hw.period_left, left);
891 perf_counter_update_userpage(counter); 891 perf_event_update_userpage(event);
892 perf_enable(); 892 perf_enable();
893 local_irq_restore(flags); 893 local_irq_restore(flags);
894} 894}
@@ -901,29 +901,29 @@ struct pmu power_pmu = {
901}; 901};
902 902
903/* 903/*
904 * Return 1 if we might be able to put counter on a limited PMC, 904 * Return 1 if we might be able to put event on a limited PMC,
905 * or 0 if not. 905 * or 0 if not.
906 * A counter can only go on a limited PMC if it counts something 906 * A event can only go on a limited PMC if it counts something
907 * that a limited PMC can count, doesn't require interrupts, and 907 * that a limited PMC can count, doesn't require interrupts, and
908 * doesn't exclude any processor mode. 908 * doesn't exclude any processor mode.
909 */ 909 */
910static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, 910static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
911 unsigned int flags) 911 unsigned int flags)
912{ 912{
913 int n; 913 int n;
914 u64 alt[MAX_EVENT_ALTERNATIVES]; 914 u64 alt[MAX_EVENT_ALTERNATIVES];
915 915
916 if (counter->attr.exclude_user 916 if (event->attr.exclude_user
917 || counter->attr.exclude_kernel 917 || event->attr.exclude_kernel
918 || counter->attr.exclude_hv 918 || event->attr.exclude_hv
919 || counter->attr.sample_period) 919 || event->attr.sample_period)
920 return 0; 920 return 0;
921 921
922 if (ppmu->limited_pmc_event(ev)) 922 if (ppmu->limited_pmc_event(ev))
923 return 1; 923 return 1;
924 924
925 /* 925 /*
926 * The requested event isn't on a limited PMC already; 926 * The requested event_id isn't on a limited PMC already;
927 * see if any alternative code goes on a limited PMC. 927 * see if any alternative code goes on a limited PMC.
928 */ 928 */
929 if (!ppmu->get_alternatives) 929 if (!ppmu->get_alternatives)
@@ -936,9 +936,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
936} 936}
937 937
938/* 938/*
939 * Find an alternative event that goes on a normal PMC, if possible, 939 * Find an alternative event_id that goes on a normal PMC, if possible,
940 * and return the event code, or 0 if there is no such alternative. 940 * and return the event_id code, or 0 if there is no such alternative.
941 * (Note: event code 0 is "don't count" on all machines.) 941 * (Note: event_id code 0 is "don't count" on all machines.)
942 */ 942 */
943static u64 normal_pmc_alternative(u64 ev, unsigned long flags) 943static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
944{ 944{
@@ -952,26 +952,26 @@ static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
952 return alt[0]; 952 return alt[0];
953} 953}
954 954
955/* Number of perf_counters counting hardware events */ 955/* Number of perf_events counting hardware events */
956static atomic_t num_counters; 956static atomic_t num_events;
957/* Used to avoid races in calling reserve/release_pmc_hardware */ 957/* Used to avoid races in calling reserve/release_pmc_hardware */
958static DEFINE_MUTEX(pmc_reserve_mutex); 958static DEFINE_MUTEX(pmc_reserve_mutex);
959 959
960/* 960/*
961 * Release the PMU if this is the last perf_counter. 961 * Release the PMU if this is the last perf_event.
962 */ 962 */
963static void hw_perf_counter_destroy(struct perf_counter *counter) 963static void hw_perf_event_destroy(struct perf_event *event)
964{ 964{
965 if (!atomic_add_unless(&num_counters, -1, 1)) { 965 if (!atomic_add_unless(&num_events, -1, 1)) {
966 mutex_lock(&pmc_reserve_mutex); 966 mutex_lock(&pmc_reserve_mutex);
967 if (atomic_dec_return(&num_counters) == 0) 967 if (atomic_dec_return(&num_events) == 0)
968 release_pmc_hardware(); 968 release_pmc_hardware();
969 mutex_unlock(&pmc_reserve_mutex); 969 mutex_unlock(&pmc_reserve_mutex);
970 } 970 }
971} 971}
972 972
973/* 973/*
974 * Translate a generic cache event config to a raw event code. 974 * Translate a generic cache event_id config to a raw event_id code.
975 */ 975 */
976static int hw_perf_cache_event(u64 config, u64 *eventp) 976static int hw_perf_cache_event(u64 config, u64 *eventp)
977{ 977{
@@ -1000,39 +1000,39 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
1000 return 0; 1000 return 0;
1001} 1001}
1002 1002
1003const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 1003const struct pmu *hw_perf_event_init(struct perf_event *event)
1004{ 1004{
1005 u64 ev; 1005 u64 ev;
1006 unsigned long flags; 1006 unsigned long flags;
1007 struct perf_counter *ctrs[MAX_HWCOUNTERS]; 1007 struct perf_event *ctrs[MAX_HWEVENTS];
1008 u64 events[MAX_HWCOUNTERS]; 1008 u64 events[MAX_HWEVENTS];
1009 unsigned int cflags[MAX_HWCOUNTERS]; 1009 unsigned int cflags[MAX_HWEVENTS];
1010 int n; 1010 int n;
1011 int err; 1011 int err;
1012 struct cpu_hw_counters *cpuhw; 1012 struct cpu_hw_events *cpuhw;
1013 1013
1014 if (!ppmu) 1014 if (!ppmu)
1015 return ERR_PTR(-ENXIO); 1015 return ERR_PTR(-ENXIO);
1016 switch (counter->attr.type) { 1016 switch (event->attr.type) {
1017 case PERF_TYPE_HARDWARE: 1017 case PERF_TYPE_HARDWARE:
1018 ev = counter->attr.config; 1018 ev = event->attr.config;
1019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 1019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1020 return ERR_PTR(-EOPNOTSUPP); 1020 return ERR_PTR(-EOPNOTSUPP);
1021 ev = ppmu->generic_events[ev]; 1021 ev = ppmu->generic_events[ev];
1022 break; 1022 break;
1023 case PERF_TYPE_HW_CACHE: 1023 case PERF_TYPE_HW_CACHE:
1024 err = hw_perf_cache_event(counter->attr.config, &ev); 1024 err = hw_perf_cache_event(event->attr.config, &ev);
1025 if (err) 1025 if (err)
1026 return ERR_PTR(err); 1026 return ERR_PTR(err);
1027 break; 1027 break;
1028 case PERF_TYPE_RAW: 1028 case PERF_TYPE_RAW:
1029 ev = counter->attr.config; 1029 ev = event->attr.config;
1030 break; 1030 break;
1031 default: 1031 default:
1032 return ERR_PTR(-EINVAL); 1032 return ERR_PTR(-EINVAL);
1033 } 1033 }
1034 counter->hw.config_base = ev; 1034 event->hw.config_base = ev;
1035 counter->hw.idx = 0; 1035 event->hw.idx = 0;
1036 1036
1037 /* 1037 /*
1038 * If we are not running on a hypervisor, force the 1038 * If we are not running on a hypervisor, force the
@@ -1040,28 +1040,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1040 * the user set it to. 1040 * the user set it to.
1041 */ 1041 */
1042 if (!firmware_has_feature(FW_FEATURE_LPAR)) 1042 if (!firmware_has_feature(FW_FEATURE_LPAR))
1043 counter->attr.exclude_hv = 0; 1043 event->attr.exclude_hv = 0;
1044 1044
1045 /* 1045 /*
1046 * If this is a per-task counter, then we can use 1046 * If this is a per-task event, then we can use
1047 * PM_RUN_* events interchangeably with their non RUN_* 1047 * PM_RUN_* events interchangeably with their non RUN_*
1048 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. 1048 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1049 * XXX we should check if the task is an idle task. 1049 * XXX we should check if the task is an idle task.
1050 */ 1050 */
1051 flags = 0; 1051 flags = 0;
1052 if (counter->ctx->task) 1052 if (event->ctx->task)
1053 flags |= PPMU_ONLY_COUNT_RUN; 1053 flags |= PPMU_ONLY_COUNT_RUN;
1054 1054
1055 /* 1055 /*
1056 * If this machine has limited counters, check whether this 1056 * If this machine has limited events, check whether this
1057 * event could go on a limited counter. 1057 * event_id could go on a limited event.
1058 */ 1058 */
1059 if (ppmu->flags & PPMU_LIMITED_PMC5_6) { 1059 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1060 if (can_go_on_limited_pmc(counter, ev, flags)) { 1060 if (can_go_on_limited_pmc(event, ev, flags)) {
1061 flags |= PPMU_LIMITED_PMC_OK; 1061 flags |= PPMU_LIMITED_PMC_OK;
1062 } else if (ppmu->limited_pmc_event(ev)) { 1062 } else if (ppmu->limited_pmc_event(ev)) {
1063 /* 1063 /*
1064 * The requested event is on a limited PMC, 1064 * The requested event_id is on a limited PMC,
1065 * but we can't use a limited PMC; see if any 1065 * but we can't use a limited PMC; see if any
1066 * alternative goes on a normal PMC. 1066 * alternative goes on a normal PMC.
1067 */ 1067 */
@@ -1073,50 +1073,50 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1073 1073
1074 /* 1074 /*
1075 * If this is in a group, check if it can go on with all the 1075 * If this is in a group, check if it can go on with all the
1076 * other hardware counters in the group. We assume the counter 1076 * other hardware events in the group. We assume the event
1077 * hasn't been linked into its leader's sibling list at this point. 1077 * hasn't been linked into its leader's sibling list at this point.
1078 */ 1078 */
1079 n = 0; 1079 n = 0;
1080 if (counter->group_leader != counter) { 1080 if (event->group_leader != event) {
1081 n = collect_events(counter->group_leader, ppmu->n_counter - 1, 1081 n = collect_events(event->group_leader, ppmu->n_event - 1,
1082 ctrs, events, cflags); 1082 ctrs, events, cflags);
1083 if (n < 0) 1083 if (n < 0)
1084 return ERR_PTR(-EINVAL); 1084 return ERR_PTR(-EINVAL);
1085 } 1085 }
1086 events[n] = ev; 1086 events[n] = ev;
1087 ctrs[n] = counter; 1087 ctrs[n] = event;
1088 cflags[n] = flags; 1088 cflags[n] = flags;
1089 if (check_excludes(ctrs, cflags, n, 1)) 1089 if (check_excludes(ctrs, cflags, n, 1))
1090 return ERR_PTR(-EINVAL); 1090 return ERR_PTR(-EINVAL);
1091 1091
1092 cpuhw = &get_cpu_var(cpu_hw_counters); 1092 cpuhw = &get_cpu_var(cpu_hw_events);
1093 err = power_check_constraints(cpuhw, events, cflags, n + 1); 1093 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1094 put_cpu_var(cpu_hw_counters); 1094 put_cpu_var(cpu_hw_events);
1095 if (err) 1095 if (err)
1096 return ERR_PTR(-EINVAL); 1096 return ERR_PTR(-EINVAL);
1097 1097
1098 counter->hw.config = events[n]; 1098 event->hw.config = events[n];
1099 counter->hw.counter_base = cflags[n]; 1099 event->hw.event_base = cflags[n];
1100 counter->hw.last_period = counter->hw.sample_period; 1100 event->hw.last_period = event->hw.sample_period;
1101 atomic64_set(&counter->hw.period_left, counter->hw.last_period); 1101 atomic64_set(&event->hw.period_left, event->hw.last_period);
1102 1102
1103 /* 1103 /*
1104 * See if we need to reserve the PMU. 1104 * See if we need to reserve the PMU.
1105 * If no counters are currently in use, then we have to take a 1105 * If no events are currently in use, then we have to take a
1106 * mutex to ensure that we don't race with another task doing 1106 * mutex to ensure that we don't race with another task doing
1107 * reserve_pmc_hardware or release_pmc_hardware. 1107 * reserve_pmc_hardware or release_pmc_hardware.
1108 */ 1108 */
1109 err = 0; 1109 err = 0;
1110 if (!atomic_inc_not_zero(&num_counters)) { 1110 if (!atomic_inc_not_zero(&num_events)) {
1111 mutex_lock(&pmc_reserve_mutex); 1111 mutex_lock(&pmc_reserve_mutex);
1112 if (atomic_read(&num_counters) == 0 && 1112 if (atomic_read(&num_events) == 0 &&
1113 reserve_pmc_hardware(perf_counter_interrupt)) 1113 reserve_pmc_hardware(perf_event_interrupt))
1114 err = -EBUSY; 1114 err = -EBUSY;
1115 else 1115 else
1116 atomic_inc(&num_counters); 1116 atomic_inc(&num_events);
1117 mutex_unlock(&pmc_reserve_mutex); 1117 mutex_unlock(&pmc_reserve_mutex);
1118 } 1118 }
1119 counter->destroy = hw_perf_counter_destroy; 1119 event->destroy = hw_perf_event_destroy;
1120 1120
1121 if (err) 1121 if (err)
1122 return ERR_PTR(err); 1122 return ERR_PTR(err);
@@ -1124,28 +1124,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1124} 1124}
1125 1125
1126/* 1126/*
1127 * A counter has overflowed; update its count and record 1127 * A event has overflowed; update its count and record
1128 * things if requested. Note that interrupts are hard-disabled 1128 * things if requested. Note that interrupts are hard-disabled
1129 * here so there is no possibility of being interrupted. 1129 * here so there is no possibility of being interrupted.
1130 */ 1130 */
1131static void record_and_restart(struct perf_counter *counter, unsigned long val, 1131static void record_and_restart(struct perf_event *event, unsigned long val,
1132 struct pt_regs *regs, int nmi) 1132 struct pt_regs *regs, int nmi)
1133{ 1133{
1134 u64 period = counter->hw.sample_period; 1134 u64 period = event->hw.sample_period;
1135 s64 prev, delta, left; 1135 s64 prev, delta, left;
1136 int record = 0; 1136 int record = 0;
1137 1137
1138 /* we don't have to worry about interrupts here */ 1138 /* we don't have to worry about interrupts here */
1139 prev = atomic64_read(&counter->hw.prev_count); 1139 prev = atomic64_read(&event->hw.prev_count);
1140 delta = (val - prev) & 0xfffffffful; 1140 delta = (val - prev) & 0xfffffffful;
1141 atomic64_add(delta, &counter->count); 1141 atomic64_add(delta, &event->count);
1142 1142
1143 /* 1143 /*
1144 * See if the total period for this counter has expired, 1144 * See if the total period for this event has expired,
1145 * and update for the next period. 1145 * and update for the next period.
1146 */ 1146 */
1147 val = 0; 1147 val = 0;
1148 left = atomic64_read(&counter->hw.period_left) - delta; 1148 left = atomic64_read(&event->hw.period_left) - delta;
1149 if (period) { 1149 if (period) {
1150 if (left <= 0) { 1150 if (left <= 0) {
1151 left += period; 1151 left += period;
@@ -1163,18 +1163,18 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
1163 if (record) { 1163 if (record) {
1164 struct perf_sample_data data = { 1164 struct perf_sample_data data = {
1165 .addr = 0, 1165 .addr = 0,
1166 .period = counter->hw.last_period, 1166 .period = event->hw.last_period,
1167 }; 1167 };
1168 1168
1169 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) 1169 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1170 perf_get_data_addr(regs, &data.addr); 1170 perf_get_data_addr(regs, &data.addr);
1171 1171
1172 if (perf_counter_overflow(counter, nmi, &data, regs)) { 1172 if (perf_event_overflow(event, nmi, &data, regs)) {
1173 /* 1173 /*
1174 * Interrupts are coming too fast - throttle them 1174 * Interrupts are coming too fast - throttle them
1175 * by setting the counter to 0, so it will be 1175 * by setting the event to 0, so it will be
1176 * at least 2^30 cycles until the next interrupt 1176 * at least 2^30 cycles until the next interrupt
1177 * (assuming each counter counts at most 2 counts 1177 * (assuming each event counts at most 2 counts
1178 * per cycle). 1178 * per cycle).
1179 */ 1179 */
1180 val = 0; 1180 val = 0;
@@ -1182,15 +1182,15 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
1182 } 1182 }
1183 } 1183 }
1184 1184
1185 write_pmc(counter->hw.idx, val); 1185 write_pmc(event->hw.idx, val);
1186 atomic64_set(&counter->hw.prev_count, val); 1186 atomic64_set(&event->hw.prev_count, val);
1187 atomic64_set(&counter->hw.period_left, left); 1187 atomic64_set(&event->hw.period_left, left);
1188 perf_counter_update_userpage(counter); 1188 perf_event_update_userpage(event);
1189} 1189}
1190 1190
1191/* 1191/*
1192 * Called from generic code to get the misc flags (i.e. processor mode) 1192 * Called from generic code to get the misc flags (i.e. processor mode)
1193 * for an event. 1193 * for an event_id.
1194 */ 1194 */
1195unsigned long perf_misc_flags(struct pt_regs *regs) 1195unsigned long perf_misc_flags(struct pt_regs *regs)
1196{ 1196{
@@ -1198,13 +1198,13 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1198 1198
1199 if (flags) 1199 if (flags)
1200 return flags; 1200 return flags;
1201 return user_mode(regs) ? PERF_EVENT_MISC_USER : 1201 return user_mode(regs) ? PERF_RECORD_MISC_USER :
1202 PERF_EVENT_MISC_KERNEL; 1202 PERF_RECORD_MISC_KERNEL;
1203} 1203}
1204 1204
1205/* 1205/*
1206 * Called from generic code to get the instruction pointer 1206 * Called from generic code to get the instruction pointer
1207 * for an event. 1207 * for an event_id.
1208 */ 1208 */
1209unsigned long perf_instruction_pointer(struct pt_regs *regs) 1209unsigned long perf_instruction_pointer(struct pt_regs *regs)
1210{ 1210{
@@ -1220,17 +1220,17 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
1220/* 1220/*
1221 * Performance monitor interrupt stuff 1221 * Performance monitor interrupt stuff
1222 */ 1222 */
1223static void perf_counter_interrupt(struct pt_regs *regs) 1223static void perf_event_interrupt(struct pt_regs *regs)
1224{ 1224{
1225 int i; 1225 int i;
1226 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 1226 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1227 struct perf_counter *counter; 1227 struct perf_event *event;
1228 unsigned long val; 1228 unsigned long val;
1229 int found = 0; 1229 int found = 0;
1230 int nmi; 1230 int nmi;
1231 1231
1232 if (cpuhw->n_limited) 1232 if (cpuhw->n_limited)
1233 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), 1233 freeze_limited_events(cpuhw, mfspr(SPRN_PMC5),
1234 mfspr(SPRN_PMC6)); 1234 mfspr(SPRN_PMC6));
1235 1235
1236 perf_read_regs(regs); 1236 perf_read_regs(regs);
@@ -1241,26 +1241,26 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1241 else 1241 else
1242 irq_enter(); 1242 irq_enter();
1243 1243
1244 for (i = 0; i < cpuhw->n_counters; ++i) { 1244 for (i = 0; i < cpuhw->n_events; ++i) {
1245 counter = cpuhw->counter[i]; 1245 event = cpuhw->event[i];
1246 if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) 1246 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1247 continue; 1247 continue;
1248 val = read_pmc(counter->hw.idx); 1248 val = read_pmc(event->hw.idx);
1249 if ((int)val < 0) { 1249 if ((int)val < 0) {
1250 /* counter has overflowed */ 1250 /* event has overflowed */
1251 found = 1; 1251 found = 1;
1252 record_and_restart(counter, val, regs, nmi); 1252 record_and_restart(event, val, regs, nmi);
1253 } 1253 }
1254 } 1254 }
1255 1255
1256 /* 1256 /*
1257 * In case we didn't find and reset the counter that caused 1257 * In case we didn't find and reset the event that caused
1258 * the interrupt, scan all counters and reset any that are 1258 * the interrupt, scan all events and reset any that are
1259 * negative, to avoid getting continual interrupts. 1259 * negative, to avoid getting continual interrupts.
1260 * Any that we processed in the previous loop will not be negative. 1260 * Any that we processed in the previous loop will not be negative.
1261 */ 1261 */
1262 if (!found) { 1262 if (!found) {
1263 for (i = 0; i < ppmu->n_counter; ++i) { 1263 for (i = 0; i < ppmu->n_event; ++i) {
1264 if (is_limited_pmc(i + 1)) 1264 if (is_limited_pmc(i + 1))
1265 continue; 1265 continue;
1266 val = read_pmc(i + 1); 1266 val = read_pmc(i + 1);
@@ -1271,9 +1271,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1271 1271
1272 /* 1272 /*
1273 * Reset MMCR0 to its normal value. This will set PMXE and 1273 * Reset MMCR0 to its normal value. This will set PMXE and
1274 * clear FC (freeze counters) and PMAO (perf mon alert occurred) 1274 * clear FC (freeze events) and PMAO (perf mon alert occurred)
1275 * and thus allow interrupts to occur again. 1275 * and thus allow interrupts to occur again.
1276 * XXX might want to use MSR.PM to keep the counters frozen until 1276 * XXX might want to use MSR.PM to keep the events frozen until
1277 * we get back out of this interrupt. 1277 * we get back out of this interrupt.
1278 */ 1278 */
1279 write_mmcr0(cpuhw, cpuhw->mmcr[0]); 1279 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
@@ -1284,9 +1284,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1284 irq_exit(); 1284 irq_exit();
1285} 1285}
1286 1286
1287void hw_perf_counter_setup(int cpu) 1287void hw_perf_event_setup(int cpu)
1288{ 1288{
1289 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); 1289 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1290 1290
1291 if (!ppmu) 1291 if (!ppmu)
1292 return; 1292 return;
@@ -1308,7 +1308,7 @@ int register_power_pmu(struct power_pmu *pmu)
1308 * Use FCHV to ignore kernel events if MSR.HV is set. 1308 * Use FCHV to ignore kernel events if MSR.HV is set.
1309 */ 1309 */
1310 if (mfmsr() & MSR_HV) 1310 if (mfmsr() & MSR_HV)
1311 freeze_counters_kernel = MMCR0_FCHV; 1311 freeze_events_kernel = MMCR0_FCHV;
1312#endif /* CONFIG_PPC64 */ 1312#endif /* CONFIG_PPC64 */
1313 1313
1314 return 0; 1314 return 0;
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 3c90a3d9173e..2a361cdda635 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 31918af3e355..0f4c1c73a6ad 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 867f6f663963..c351b3a57fbb 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index fa21890531da..ca399ba5034c 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 018d094d92f9..28a4daacdc02 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 75dccb71a043..479574413a93 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <asm/reg.h> 13#include <asm/reg.h>
14#include <asm/cputable.h> 14#include <asm/cputable.h>
15 15
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 465e498bcb33..df45a7449a66 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
53#include <linux/posix-timers.h> 53#include <linux/posix-timers.h>
54#include <linux/irq.h> 54#include <linux/irq.h>
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/perf_counter.h> 56#include <linux/perf_event.h>
57 57
58#include <asm/io.h> 58#include <asm/io.h>
59#include <asm/processor.h> 59#include <asm/processor.h>
@@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void)
527} 527}
528#endif /* CONFIG_PPC_ISERIES */ 528#endif /* CONFIG_PPC_ISERIES */
529 529
530#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32) 530#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
531DEFINE_PER_CPU(u8, perf_counter_pending); 531DEFINE_PER_CPU(u8, perf_event_pending);
532 532
533void set_perf_counter_pending(void) 533void set_perf_event_pending(void)
534{ 534{
535 get_cpu_var(perf_counter_pending) = 1; 535 get_cpu_var(perf_event_pending) = 1;
536 set_dec(1); 536 set_dec(1);
537 put_cpu_var(perf_counter_pending); 537 put_cpu_var(perf_event_pending);
538} 538}
539 539
540#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending) 540#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
541#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0 541#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
542 542
543#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 543#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
544 544
545#define test_perf_counter_pending() 0 545#define test_perf_event_pending() 0
546#define clear_perf_counter_pending() 546#define clear_perf_event_pending()
547 547
548#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 548#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
549 549
550/* 550/*
551 * For iSeries shared processors, we have to let the hypervisor 551 * For iSeries shared processors, we have to let the hypervisor
@@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs)
573 set_dec(DECREMENTER_MAX); 573 set_dec(DECREMENTER_MAX);
574 574
575#ifdef CONFIG_PPC32 575#ifdef CONFIG_PPC32
576 if (test_perf_counter_pending()) { 576 if (test_perf_event_pending()) {
577 clear_perf_counter_pending(); 577 clear_perf_event_pending();
578 perf_counter_do_pending(); 578 perf_event_do_pending();
579 } 579 }
580 if (atomic_read(&ppc_n_lost_interrupts) != 0) 580 if (atomic_read(&ppc_n_lost_interrupts) != 0)
581 do_IRQ(regs); 581 do_IRQ(regs);