aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 06:02:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 08:28:04 -0400
commitcdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (patch)
tree81f98a3ab46c589792057fe2392c1e10f8ad7893 /arch/sparc
parentdfc65094d0313cc48969fa60bcf33d693aeb05a7 (diff)
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/perf_counter.h14
-rw-r--r--arch/sparc/include/asm/perf_event.h14
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/nmi.c4
-rw-r--r--arch/sparc/kernel/pcr.c10
-rw-r--r--arch/sparc/kernel/perf_event.c (renamed from arch/sparc/kernel/perf_counter.c)178
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
10 files changed, 117 insertions, 117 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 86b82348b97c..97fca4695e0b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,7 +25,7 @@ config SPARC
25 select ARCH_WANT_OPTIONAL_GPIOLIB 25 select ARCH_WANT_OPTIONAL_GPIOLIB
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_COUNTERS 28 select HAVE_PERF_EVENTS
29 select HAVE_DMA_ATTRS 29 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG 30 select HAVE_DMA_API_DEBUG
31 31
@@ -47,7 +47,7 @@ config SPARC64
47 select RTC_DRV_BQ4802 47 select RTC_DRV_BQ4802
48 select RTC_DRV_SUN4V 48 select RTC_DRV_SUN4V
49 select RTC_DRV_STARFIRE 49 select RTC_DRV_STARFIRE
50 select HAVE_PERF_COUNTERS 50 select HAVE_PERF_EVENTS
51 51
52config ARCH_DEFCONFIG 52config ARCH_DEFCONFIG
53 string 53 string
diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h
deleted file mode 100644
index 5d7a8ca0e491..000000000000
--- a/arch/sparc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_SPARC_PERF_COUNTER_H
2#define __ASM_SPARC_PERF_COUNTER_H
3
4extern void set_perf_counter_pending(void);
5
6#define PERF_COUNTER_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_COUNTERS
9extern void init_hw_perf_counters(void);
10#else
11static inline void init_hw_perf_counters(void) { }
12#endif
13
14#endif
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
new file mode 100644
index 000000000000..7e2669894ce8
--- /dev/null
+++ b/arch/sparc/include/asm/perf_event.h
@@ -0,0 +1,14 @@
1#ifndef __ASM_SPARC_PERF_EVENT_H
2#define __ASM_SPARC_PERF_EVENT_H
3
4extern void set_perf_event_pending(void);
5
6#define PERF_EVENT_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_EVENTS
9extern void init_hw_perf_events(void);
10#else
11static inline void init_hw_perf_events(void) { }
12#endif
13
14#endif
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 706df669f3b8..42f2316c3eaa 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -395,7 +395,7 @@
395#define __NR_preadv 324 395#define __NR_preadv 324
396#define __NR_pwritev 325 396#define __NR_pwritev 325
397#define __NR_rt_tgsigqueueinfo 326 397#define __NR_rt_tgsigqueueinfo 326
398#define __NR_perf_counter_open 327 398#define __NR_perf_event_open 327
399 399
400#define NR_SYSCALLS 328 400#define NR_SYSCALLS 328
401 401
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 247cc620cee5..3a048fad7ee2 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT) += audit.o
104audit--$(CONFIG_AUDIT) := compat_audit.o 104audit--$(CONFIG_AUDIT) := compat_audit.o
105obj-$(CONFIG_COMPAT) += $(audit--y) 105obj-$(CONFIG_COMPAT) += $(audit--y)
106 106
107pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o 107pc--$(CONFIG_PERF_EVENTS) := perf_event.o
108obj-$(CONFIG_SPARC64) += $(pc--y) 108obj-$(CONFIG_SPARC64) += $(pc--y)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 378eb53e0776..b129611590a4 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,7 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21 21
22#include <asm/perf_counter.h> 22#include <asm/perf_event.h>
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/local.h> 24#include <asm/local.h>
25#include <asm/pcr.h> 25#include <asm/pcr.h>
@@ -265,7 +265,7 @@ int __init nmi_init(void)
265 } 265 }
266 } 266 }
267 if (!err) 267 if (!err)
268 init_hw_perf_counters(); 268 init_hw_perf_events();
269 269
270 return err; 270 return err;
271} 271}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 68ff00107073..2d94e7a03af5 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_counter.h> 10#include <linux/perf_event.h>
11 11
12#include <asm/pil.h> 12#include <asm/pil.h>
13#include <asm/pcr.h> 13#include <asm/pcr.h>
@@ -15,7 +15,7 @@
15 15
16/* This code is shared between various users of the performance 16/* This code is shared between various users of the performance
17 * counters. Users will be oprofile, pseudo-NMI watchdog, and the 17 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
18 * perf_counter support layer. 18 * perf_event support layer.
19 */ 19 */
20 20
21#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) 21#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
@@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
42 42
43 old_regs = set_irq_regs(regs); 43 old_regs = set_irq_regs(regs);
44 irq_enter(); 44 irq_enter();
45#ifdef CONFIG_PERF_COUNTERS 45#ifdef CONFIG_PERF_EVENTS
46 perf_counter_do_pending(); 46 perf_event_do_pending();
47#endif 47#endif
48 irq_exit(); 48 irq_exit();
49 set_irq_regs(old_regs); 49 set_irq_regs(old_regs);
50} 50}
51 51
52void set_perf_counter_pending(void) 52void set_perf_event_pending(void)
53{ 53{
54 set_softint(1 << PIL_DEFERRED_PCR_WORK); 54 set_softint(1 << PIL_DEFERRED_PCR_WORK);
55} 55}
diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_event.c
index b1265ce8a053..2d6a1b10c81d 100644
--- a/arch/sparc/kernel/perf_counter.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,8 +1,8 @@
1/* Performance counter support for sparc64. 1/* Performance event support for sparc64.
2 * 2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 * 4 *
5 * This code is based almost entirely upon the x86 perf counter 5 * This code is based almost entirely upon the x86 perf event
6 * code, which is: 6 * code, which is:
7 * 7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
@@ -12,7 +12,7 @@
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */ 13 */
14 14
15#include <linux/perf_counter.h> 15#include <linux/perf_event.h>
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/kdebug.h> 18#include <linux/kdebug.h>
@@ -46,19 +46,19 @@
46 * normal code. 46 * normal code.
47 */ 47 */
48 48
49#define MAX_HWCOUNTERS 2 49#define MAX_HWEVENTS 2
50#define MAX_PERIOD ((1UL << 32) - 1) 50#define MAX_PERIOD ((1UL << 32) - 1)
51 51
52#define PIC_UPPER_INDEX 0 52#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1 53#define PIC_LOWER_INDEX 1
54 54
55struct cpu_hw_counters { 55struct cpu_hw_events {
56 struct perf_counter *counters[MAX_HWCOUNTERS]; 56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; 57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; 58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59 int enabled; 59 int enabled;
60}; 60};
61DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; 61DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62 62
63struct perf_event_map { 63struct perf_event_map {
64 u16 encoding; 64 u16 encoding;
@@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = {
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, 87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88}; 88};
89 89
90static const struct perf_event_map *ultra3i_event_map(int event) 90static const struct perf_event_map *ultra3i_event_map(int event_id)
91{ 91{
92 return &ultra3i_perfmon_event_map[event]; 92 return &ultra3i_perfmon_event_map[event_id];
93} 93}
94 94
95static const struct sparc_pmu ultra3i_pmu = { 95static const struct sparc_pmu ultra3i_pmu = {
@@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = {
111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, 111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
112}; 112};
113 113
114static const struct perf_event_map *niagara2_event_map(int event) 114static const struct perf_event_map *niagara2_event_map(int event_id)
115{ 115{
116 return &niagara2_perfmon_event_map[event]; 116 return &niagara2_perfmon_event_map[event_id];
117} 117}
118 118
119static const struct sparc_pmu niagara2_pmu = { 119static const struct sparc_pmu niagara2_pmu = {
@@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = {
130 130
131static const struct sparc_pmu *sparc_pmu __read_mostly; 131static const struct sparc_pmu *sparc_pmu __read_mostly;
132 132
133static u64 event_encoding(u64 event, int idx) 133static u64 event_encoding(u64 event_id, int idx)
134{ 134{
135 if (idx == PIC_UPPER_INDEX) 135 if (idx == PIC_UPPER_INDEX)
136 event <<= sparc_pmu->upper_shift; 136 event_id <<= sparc_pmu->upper_shift;
137 else 137 else
138 event <<= sparc_pmu->lower_shift; 138 event_id <<= sparc_pmu->lower_shift;
139 return event; 139 return event_id;
140} 140}
141 141
142static u64 mask_for_index(int idx) 142static u64 mask_for_index(int idx)
@@ -151,7 +151,7 @@ static u64 nop_for_index(int idx)
151 sparc_pmu->lower_nop, idx); 151 sparc_pmu->lower_nop, idx);
152} 152}
153 153
154static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, 154static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
155 int idx) 155 int idx)
156{ 156{
157 u64 val, mask = mask_for_index(idx); 157 u64 val, mask = mask_for_index(idx);
@@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
160 pcr_ops->write((val & ~mask) | hwc->config); 160 pcr_ops->write((val & ~mask) | hwc->config);
161} 161}
162 162
163static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, 163static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
164 int idx) 164 int idx)
165{ 165{
166 u64 mask = mask_for_index(idx); 166 u64 mask = mask_for_index(idx);
@@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
172 172
173void hw_perf_enable(void) 173void hw_perf_enable(void)
174{ 174{
175 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 175 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
176 u64 val; 176 u64 val;
177 int i; 177 int i;
178 178
@@ -184,9 +184,9 @@ void hw_perf_enable(void)
184 184
185 val = pcr_ops->read(); 185 val = pcr_ops->read();
186 186
187 for (i = 0; i < MAX_HWCOUNTERS; i++) { 187 for (i = 0; i < MAX_HWEVENTS; i++) {
188 struct perf_counter *cp = cpuc->counters[i]; 188 struct perf_event *cp = cpuc->events[i];
189 struct hw_perf_counter *hwc; 189 struct hw_perf_event *hwc;
190 190
191 if (!cp) 191 if (!cp)
192 continue; 192 continue;
@@ -199,7 +199,7 @@ void hw_perf_enable(void)
199 199
200void hw_perf_disable(void) 200void hw_perf_disable(void)
201{ 201{
202 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 202 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
203 u64 val; 203 u64 val;
204 204
205 if (!cpuc->enabled) 205 if (!cpuc->enabled)
@@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val)
241 write_pic(pic); 241 write_pic(pic);
242} 242}
243 243
244static int sparc_perf_counter_set_period(struct perf_counter *counter, 244static int sparc_perf_event_set_period(struct perf_event *event,
245 struct hw_perf_counter *hwc, int idx) 245 struct hw_perf_event *hwc, int idx)
246{ 246{
247 s64 left = atomic64_read(&hwc->period_left); 247 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period; 248 s64 period = hwc->sample_period;
@@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter,
268 268
269 write_pmc(idx, (u64)(-left) & 0xffffffff); 269 write_pmc(idx, (u64)(-left) & 0xffffffff);
270 270
271 perf_counter_update_userpage(counter); 271 perf_event_update_userpage(event);
272 272
273 return ret; 273 return ret;
274} 274}
275 275
276static int sparc_pmu_enable(struct perf_counter *counter) 276static int sparc_pmu_enable(struct perf_event *event)
277{ 277{
278 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 278 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
279 struct hw_perf_counter *hwc = &counter->hw; 279 struct hw_perf_event *hwc = &event->hw;
280 int idx = hwc->idx; 280 int idx = hwc->idx;
281 281
282 if (test_and_set_bit(idx, cpuc->used_mask)) 282 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN; 283 return -EAGAIN;
284 284
285 sparc_pmu_disable_counter(hwc, idx); 285 sparc_pmu_disable_event(hwc, idx);
286 286
287 cpuc->counters[idx] = counter; 287 cpuc->events[idx] = event;
288 set_bit(idx, cpuc->active_mask); 288 set_bit(idx, cpuc->active_mask);
289 289
290 sparc_perf_counter_set_period(counter, hwc, idx); 290 sparc_perf_event_set_period(event, hwc, idx);
291 sparc_pmu_enable_counter(hwc, idx); 291 sparc_pmu_enable_event(hwc, idx);
292 perf_counter_update_userpage(counter); 292 perf_event_update_userpage(event);
293 return 0; 293 return 0;
294} 294}
295 295
296static u64 sparc_perf_counter_update(struct perf_counter *counter, 296static u64 sparc_perf_event_update(struct perf_event *event,
297 struct hw_perf_counter *hwc, int idx) 297 struct hw_perf_event *hwc, int idx)
298{ 298{
299 int shift = 64 - 32; 299 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count; 300 u64 prev_raw_count, new_raw_count;
@@ -311,79 +311,79 @@ again:
311 delta = (new_raw_count << shift) - (prev_raw_count << shift); 311 delta = (new_raw_count << shift) - (prev_raw_count << shift);
312 delta >>= shift; 312 delta >>= shift;
313 313
314 atomic64_add(delta, &counter->count); 314 atomic64_add(delta, &event->count);
315 atomic64_sub(delta, &hwc->period_left); 315 atomic64_sub(delta, &hwc->period_left);
316 316
317 return new_raw_count; 317 return new_raw_count;
318} 318}
319 319
320static void sparc_pmu_disable(struct perf_counter *counter) 320static void sparc_pmu_disable(struct perf_event *event)
321{ 321{
322 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 322 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
323 struct hw_perf_counter *hwc = &counter->hw; 323 struct hw_perf_event *hwc = &event->hw;
324 int idx = hwc->idx; 324 int idx = hwc->idx;
325 325
326 clear_bit(idx, cpuc->active_mask); 326 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_counter(hwc, idx); 327 sparc_pmu_disable_event(hwc, idx);
328 328
329 barrier(); 329 barrier();
330 330
331 sparc_perf_counter_update(counter, hwc, idx); 331 sparc_perf_event_update(event, hwc, idx);
332 cpuc->counters[idx] = NULL; 332 cpuc->events[idx] = NULL;
333 clear_bit(idx, cpuc->used_mask); 333 clear_bit(idx, cpuc->used_mask);
334 334
335 perf_counter_update_userpage(counter); 335 perf_event_update_userpage(event);
336} 336}
337 337
338static void sparc_pmu_read(struct perf_counter *counter) 338static void sparc_pmu_read(struct perf_event *event)
339{ 339{
340 struct hw_perf_counter *hwc = &counter->hw; 340 struct hw_perf_event *hwc = &event->hw;
341 sparc_perf_counter_update(counter, hwc, hwc->idx); 341 sparc_perf_event_update(event, hwc, hwc->idx);
342} 342}
343 343
344static void sparc_pmu_unthrottle(struct perf_counter *counter) 344static void sparc_pmu_unthrottle(struct perf_event *event)
345{ 345{
346 struct hw_perf_counter *hwc = &counter->hw; 346 struct hw_perf_event *hwc = &event->hw;
347 sparc_pmu_enable_counter(hwc, hwc->idx); 347 sparc_pmu_enable_event(hwc, hwc->idx);
348} 348}
349 349
350static atomic_t active_counters = ATOMIC_INIT(0); 350static atomic_t active_events = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex); 351static DEFINE_MUTEX(pmc_grab_mutex);
352 352
353void perf_counter_grab_pmc(void) 353void perf_event_grab_pmc(void)
354{ 354{
355 if (atomic_inc_not_zero(&active_counters)) 355 if (atomic_inc_not_zero(&active_events))
356 return; 356 return;
357 357
358 mutex_lock(&pmc_grab_mutex); 358 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_counters) == 0) { 359 if (atomic_read(&active_events) == 0) {
360 if (atomic_read(&nmi_active) > 0) { 360 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1); 361 on_each_cpu(stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0); 362 BUG_ON(atomic_read(&nmi_active) != 0);
363 } 363 }
364 atomic_inc(&active_counters); 364 atomic_inc(&active_events);
365 } 365 }
366 mutex_unlock(&pmc_grab_mutex); 366 mutex_unlock(&pmc_grab_mutex);
367} 367}
368 368
369void perf_counter_release_pmc(void) 369void perf_event_release_pmc(void)
370{ 370{
371 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { 371 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
372 if (atomic_read(&nmi_active) == 0) 372 if (atomic_read(&nmi_active) == 0)
373 on_each_cpu(start_nmi_watchdog, NULL, 1); 373 on_each_cpu(start_nmi_watchdog, NULL, 1);
374 mutex_unlock(&pmc_grab_mutex); 374 mutex_unlock(&pmc_grab_mutex);
375 } 375 }
376} 376}
377 377
378static void hw_perf_counter_destroy(struct perf_counter *counter) 378static void hw_perf_event_destroy(struct perf_event *event)
379{ 379{
380 perf_counter_release_pmc(); 380 perf_event_release_pmc();
381} 381}
382 382
383static int __hw_perf_counter_init(struct perf_counter *counter) 383static int __hw_perf_event_init(struct perf_event *event)
384{ 384{
385 struct perf_counter_attr *attr = &counter->attr; 385 struct perf_event_attr *attr = &event->attr;
386 struct hw_perf_counter *hwc = &counter->hw; 386 struct hw_perf_event *hwc = &event->hw;
387 const struct perf_event_map *pmap; 387 const struct perf_event_map *pmap;
388 u64 enc; 388 u64 enc;
389 389
@@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
396 if (attr->config >= sparc_pmu->max_events) 396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL; 397 return -EINVAL;
398 398
399 perf_counter_grab_pmc(); 399 perf_event_grab_pmc();
400 counter->destroy = hw_perf_counter_destroy; 400 event->destroy = hw_perf_event_destroy;
401 401
402 /* We save the enable bits in the config_base. So to 402 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable 403 * turn off sampling just write 'config', and to enable
@@ -439,16 +439,16 @@ static const struct pmu pmu = {
439 .unthrottle = sparc_pmu_unthrottle, 439 .unthrottle = sparc_pmu_unthrottle,
440}; 440};
441 441
442const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 442const struct pmu *hw_perf_event_init(struct perf_event *event)
443{ 443{
444 int err = __hw_perf_counter_init(counter); 444 int err = __hw_perf_event_init(event);
445 445
446 if (err) 446 if (err)
447 return ERR_PTR(err); 447 return ERR_PTR(err);
448 return &pmu; 448 return &pmu;
449} 449}
450 450
451void perf_counter_print_debug(void) 451void perf_event_print_debug(void)
452{ 452{
453 unsigned long flags; 453 unsigned long flags;
454 u64 pcr, pic; 454 u64 pcr, pic;
@@ -471,16 +471,16 @@ void perf_counter_print_debug(void)
471 local_irq_restore(flags); 471 local_irq_restore(flags);
472} 472}
473 473
474static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, 474static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args) 475 unsigned long cmd, void *__args)
476{ 476{
477 struct die_args *args = __args; 477 struct die_args *args = __args;
478 struct perf_sample_data data; 478 struct perf_sample_data data;
479 struct cpu_hw_counters *cpuc; 479 struct cpu_hw_events *cpuc;
480 struct pt_regs *regs; 480 struct pt_regs *regs;
481 int idx; 481 int idx;
482 482
483 if (!atomic_read(&active_counters)) 483 if (!atomic_read(&active_events))
484 return NOTIFY_DONE; 484 return NOTIFY_DONE;
485 485
486 switch (cmd) { 486 switch (cmd) {
@@ -495,32 +495,32 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
495 495
496 data.addr = 0; 496 data.addr = 0;
497 497
498 cpuc = &__get_cpu_var(cpu_hw_counters); 498 cpuc = &__get_cpu_var(cpu_hw_events);
499 for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { 499 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
500 struct perf_counter *counter = cpuc->counters[idx]; 500 struct perf_event *event = cpuc->events[idx];
501 struct hw_perf_counter *hwc; 501 struct hw_perf_event *hwc;
502 u64 val; 502 u64 val;
503 503
504 if (!test_bit(idx, cpuc->active_mask)) 504 if (!test_bit(idx, cpuc->active_mask))
505 continue; 505 continue;
506 hwc = &counter->hw; 506 hwc = &event->hw;
507 val = sparc_perf_counter_update(counter, hwc, idx); 507 val = sparc_perf_event_update(event, hwc, idx);
508 if (val & (1ULL << 31)) 508 if (val & (1ULL << 31))
509 continue; 509 continue;
510 510
511 data.period = counter->hw.last_period; 511 data.period = event->hw.last_period;
512 if (!sparc_perf_counter_set_period(counter, hwc, idx)) 512 if (!sparc_perf_event_set_period(event, hwc, idx))
513 continue; 513 continue;
514 514
515 if (perf_counter_overflow(counter, 1, &data, regs)) 515 if (perf_event_overflow(event, 1, &data, regs))
516 sparc_pmu_disable_counter(hwc, idx); 516 sparc_pmu_disable_event(hwc, idx);
517 } 517 }
518 518
519 return NOTIFY_STOP; 519 return NOTIFY_STOP;
520} 520}
521 521
522static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 522static __read_mostly struct notifier_block perf_event_nmi_notifier = {
523 .notifier_call = perf_counter_nmi_handler, 523 .notifier_call = perf_event_nmi_handler,
524}; 524};
525 525
526static bool __init supported_pmu(void) 526static bool __init supported_pmu(void)
@@ -536,9 +536,9 @@ static bool __init supported_pmu(void)
536 return false; 536 return false;
537} 537}
538 538
539void __init init_hw_perf_counters(void) 539void __init init_hw_perf_events(void)
540{ 540{
541 pr_info("Performance counters: "); 541 pr_info("Performance events: ");
542 542
543 if (!supported_pmu()) { 543 if (!supported_pmu()) {
544 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); 544 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
@@ -547,10 +547,10 @@ void __init init_hw_perf_counters(void)
547 547
548 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 548 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
549 549
550 /* All sparc64 PMUs currently have 2 counters. But this simple 550 /* All sparc64 PMUs currently have 2 events. But this simple
551 * driver only supports one active counter at a time. 551 * driver only supports one active event at a time.
552 */ 552 */
553 perf_max_counters = 1; 553 perf_max_events = 1;
554 554
555 register_die_notifier(&perf_counter_nmi_notifier); 555 register_die_notifier(&perf_event_nmi_notifier);
556} 556}
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 04181577cb65..0f1658d37490 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,5 @@ sys_call_table:
82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
86 86
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 91b06b7f7acf..009825f6e73c 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,7 @@ sys_call_table32:
83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
87 87
88#endif /* CONFIG_COMPAT */ 88#endif /* CONFIG_COMPAT */
89 89
@@ -158,4 +158,4 @@ sys_call_table:
158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
161 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open 161 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open