aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 06:02:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 08:28:04 -0400
commitcdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (patch)
tree81f98a3ab46c589792057fe2392c1e10f8ad7893 /kernel
parentdfc65094d0313cc48969fa60bcf33d693aeb05a7 (diff)
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/perf_event.c (renamed from kernel/perf_counter.c)2422
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c22
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/trace/trace_syscalls.c6
10 files changed, 1249 insertions, 1249 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 3d9c7e27e3f9..e26a546eac44 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -96,7 +96,7 @@ obj-$(CONFIG_X86_DS) += trace/
96obj-$(CONFIG_RING_BUFFER) += trace/ 96obj-$(CONFIG_RING_BUFFER) += trace/
97obj-$(CONFIG_SMP) += sched_cpupri.o 97obj-$(CONFIG_SMP) += sched_cpupri.o
98obj-$(CONFIG_SLOW_WORK) += slow-work.o 98obj-$(CONFIG_SLOW_WORK) += slow-work.o
99obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 99obj-$(CONFIG_PERF_EVENTS) += perf_event.o
100 100
101ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 101ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
102# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 102# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/exit.c b/kernel/exit.c
index ae5d8660ddff..e47ee8a06135 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,7 +47,7 @@
47#include <linux/tracehook.h> 47#include <linux/tracehook.h>
48#include <linux/fs_struct.h> 48#include <linux/fs_struct.h>
49#include <linux/init_task.h> 49#include <linux/init_task.h>
50#include <linux/perf_counter.h> 50#include <linux/perf_event.h>
51#include <trace/events/sched.h> 51#include <trace/events/sched.h>
52 52
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
@@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
154{ 154{
155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
156 156
157#ifdef CONFIG_PERF_COUNTERS 157#ifdef CONFIG_PERF_EVENTS
158 WARN_ON_ONCE(tsk->perf_counter_ctxp); 158 WARN_ON_ONCE(tsk->perf_event_ctxp);
159#endif 159#endif
160 trace_sched_process_free(tsk); 160 trace_sched_process_free(tsk);
161 put_task_struct(tsk); 161 put_task_struct(tsk);
@@ -981,7 +981,7 @@ NORET_TYPE void do_exit(long code)
981 * Flush inherited counters to the parent - before the parent 981 * Flush inherited counters to the parent - before the parent
982 * gets woken up by child-exit notifications. 982 * gets woken up by child-exit notifications.
983 */ 983 */
984 perf_counter_exit_task(tsk); 984 perf_event_exit_task(tsk);
985 985
986 exit_notify(tsk, group_dead); 986 exit_notify(tsk, group_dead);
987#ifdef CONFIG_NUMA 987#ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index bfee931ee3fb..2cebfb23b0b8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,7 +61,7 @@
61#include <linux/blkdev.h> 61#include <linux/blkdev.h>
62#include <linux/fs_struct.h> 62#include <linux/fs_struct.h>
63#include <linux/magic.h> 63#include <linux/magic.h>
64#include <linux/perf_counter.h> 64#include <linux/perf_event.h>
65 65
66#include <asm/pgtable.h> 66#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 67#include <asm/pgalloc.h>
@@ -1078,7 +1078,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1078 /* Perform scheduler related setup. Assign this task to a CPU. */ 1078 /* Perform scheduler related setup. Assign this task to a CPU. */
1079 sched_fork(p, clone_flags); 1079 sched_fork(p, clone_flags);
1080 1080
1081 retval = perf_counter_init_task(p); 1081 retval = perf_event_init_task(p);
1082 if (retval) 1082 if (retval)
1083 goto bad_fork_cleanup_policy; 1083 goto bad_fork_cleanup_policy;
1084 1084
@@ -1253,7 +1253,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1253 write_unlock_irq(&tasklist_lock); 1253 write_unlock_irq(&tasklist_lock);
1254 proc_fork_connector(p); 1254 proc_fork_connector(p);
1255 cgroup_post_fork(p); 1255 cgroup_post_fork(p);
1256 perf_counter_fork(p); 1256 perf_event_fork(p);
1257 return p; 1257 return p;
1258 1258
1259bad_fork_free_pid: 1259bad_fork_free_pid:
@@ -1280,7 +1280,7 @@ bad_fork_cleanup_semundo:
1280bad_fork_cleanup_audit: 1280bad_fork_cleanup_audit:
1281 audit_free(p); 1281 audit_free(p);
1282bad_fork_cleanup_policy: 1282bad_fork_cleanup_policy:
1283 perf_counter_free_task(p); 1283 perf_event_free_task(p);
1284#ifdef CONFIG_NUMA 1284#ifdef CONFIG_NUMA
1285 mpol_put(p->mempolicy); 1285 mpol_put(p->mempolicy);
1286bad_fork_cleanup_cgroup: 1286bad_fork_cleanup_cgroup:
diff --git a/kernel/perf_counter.c b/kernel/perf_event.c
index 62de0db8092b..6e8b99a04e1e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter core code 2 * Performance event core code
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -26,66 +26,66 @@
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h> 27#include <linux/anon_inodes.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/perf_counter.h> 29#include <linux/perf_event.h>
30 30
31#include <asm/irq_regs.h> 31#include <asm/irq_regs.h>
32 32
33/* 33/*
34 * Each CPU has a list of per CPU counters: 34 * Each CPU has a list of per CPU events:
35 */ 35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37 37
38int perf_max_counters __read_mostly = 1; 38int perf_max_events __read_mostly = 1;
39static int perf_reserved_percpu __read_mostly; 39static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1; 40static int perf_overcommit __read_mostly = 1;
41 41
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_events __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_events __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_events __read_mostly;
45static atomic_t nr_task_counters __read_mostly; 45static atomic_t nr_task_events __read_mostly;
46 46
47/* 47/*
48 * perf counter paranoia level: 48 * perf event paranoia level:
49 * -1 - not paranoid at all 49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv 50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu counters for unpriv 51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv 52 * 2 - disallow kernel profiling for unpriv
53 */ 53 */
54int sysctl_perf_counter_paranoid __read_mostly = 1; 54int sysctl_perf_event_paranoid __read_mostly = 1;
55 55
56static inline bool perf_paranoid_tracepoint_raw(void) 56static inline bool perf_paranoid_tracepoint_raw(void)
57{ 57{
58 return sysctl_perf_counter_paranoid > -1; 58 return sysctl_perf_event_paranoid > -1;
59} 59}
60 60
61static inline bool perf_paranoid_cpu(void) 61static inline bool perf_paranoid_cpu(void)
62{ 62{
63 return sysctl_perf_counter_paranoid > 0; 63 return sysctl_perf_event_paranoid > 0;
64} 64}
65 65
66static inline bool perf_paranoid_kernel(void) 66static inline bool perf_paranoid_kernel(void)
67{ 67{
68 return sysctl_perf_counter_paranoid > 1; 68 return sysctl_perf_event_paranoid > 1;
69} 69}
70 70
71int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 71int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
72 72
73/* 73/*
74 * max perf counter sample rate 74 * max perf event sample rate
75 */ 75 */
76int sysctl_perf_counter_sample_rate __read_mostly = 100000; 76int sysctl_perf_event_sample_rate __read_mostly = 100000;
77 77
78static atomic64_t perf_counter_id; 78static atomic64_t perf_event_id;
79 79
80/* 80/*
81 * Lock for (sysadmin-configurable) counter reservations: 81 * Lock for (sysadmin-configurable) event reservations:
82 */ 82 */
83static DEFINE_SPINLOCK(perf_resource_lock); 83static DEFINE_SPINLOCK(perf_resource_lock);
84 84
85/* 85/*
86 * Architecture provided APIs - weak aliases: 86 * Architecture provided APIs - weak aliases:
87 */ 87 */
88extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 88extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
89{ 89{
90 return NULL; 90 return NULL;
91} 91}
@@ -93,18 +93,18 @@ extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counte
93void __weak hw_perf_disable(void) { barrier(); } 93void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); } 94void __weak hw_perf_enable(void) { barrier(); }
95 95
96void __weak hw_perf_counter_setup(int cpu) { barrier(); } 96void __weak hw_perf_event_setup(int cpu) { barrier(); }
97void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } 97void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
98 98
99int __weak 99int __weak
100hw_perf_group_sched_in(struct perf_counter *group_leader, 100hw_perf_group_sched_in(struct perf_event *group_leader,
101 struct perf_cpu_context *cpuctx, 101 struct perf_cpu_context *cpuctx,
102 struct perf_counter_context *ctx, int cpu) 102 struct perf_event_context *ctx, int cpu)
103{ 103{
104 return 0; 104 return 0;
105} 105}
106 106
107void __weak perf_counter_print_debug(void) { } 107void __weak perf_event_print_debug(void) { }
108 108
109static DEFINE_PER_CPU(int, perf_disable_count); 109static DEFINE_PER_CPU(int, perf_disable_count);
110 110
@@ -130,20 +130,20 @@ void perf_enable(void)
130 hw_perf_enable(); 130 hw_perf_enable();
131} 131}
132 132
133static void get_ctx(struct perf_counter_context *ctx) 133static void get_ctx(struct perf_event_context *ctx)
134{ 134{
135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
136} 136}
137 137
138static void free_ctx(struct rcu_head *head) 138static void free_ctx(struct rcu_head *head)
139{ 139{
140 struct perf_counter_context *ctx; 140 struct perf_event_context *ctx;
141 141
142 ctx = container_of(head, struct perf_counter_context, rcu_head); 142 ctx = container_of(head, struct perf_event_context, rcu_head);
143 kfree(ctx); 143 kfree(ctx);
144} 144}
145 145
146static void put_ctx(struct perf_counter_context *ctx) 146static void put_ctx(struct perf_event_context *ctx)
147{ 147{
148 if (atomic_dec_and_test(&ctx->refcount)) { 148 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx) 149 if (ctx->parent_ctx)
@@ -154,7 +154,7 @@ static void put_ctx(struct perf_counter_context *ctx)
154 } 154 }
155} 155}
156 156
157static void unclone_ctx(struct perf_counter_context *ctx) 157static void unclone_ctx(struct perf_event_context *ctx)
158{ 158{
159 if (ctx->parent_ctx) { 159 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx); 160 put_ctx(ctx->parent_ctx);
@@ -163,37 +163,37 @@ static void unclone_ctx(struct perf_counter_context *ctx)
163} 163}
164 164
165/* 165/*
166 * If we inherit counters we want to return the parent counter id 166 * If we inherit events we want to return the parent event id
167 * to userspace. 167 * to userspace.
168 */ 168 */
169static u64 primary_counter_id(struct perf_counter *counter) 169static u64 primary_event_id(struct perf_event *event)
170{ 170{
171 u64 id = counter->id; 171 u64 id = event->id;
172 172
173 if (counter->parent) 173 if (event->parent)
174 id = counter->parent->id; 174 id = event->parent->id;
175 175
176 return id; 176 return id;
177} 177}
178 178
179/* 179/*
180 * Get the perf_counter_context for a task and lock it. 180 * Get the perf_event_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked, 181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task. 182 * the context could get moved to another task.
183 */ 183 */
184static struct perf_counter_context * 184static struct perf_event_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags) 185perf_lock_task_context(struct task_struct *task, unsigned long *flags)
186{ 186{
187 struct perf_counter_context *ctx; 187 struct perf_event_context *ctx;
188 188
189 rcu_read_lock(); 189 rcu_read_lock();
190 retry: 190 retry:
191 ctx = rcu_dereference(task->perf_counter_ctxp); 191 ctx = rcu_dereference(task->perf_event_ctxp);
192 if (ctx) { 192 if (ctx) {
193 /* 193 /*
194 * If this context is a clone of another, it might 194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by 195 * get swapped for another underneath us by
196 * perf_counter_task_sched_out, though the 196 * perf_event_task_sched_out, though the
197 * rcu_read_lock() protects us from any context 197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it 198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry 199 * got swapped before we could get the lock, and retry
@@ -201,7 +201,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
201 * can't get swapped on us any more. 201 * can't get swapped on us any more.
202 */ 202 */
203 spin_lock_irqsave(&ctx->lock, *flags); 203 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_counter_ctxp)) { 204 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags); 205 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry; 206 goto retry;
207 } 207 }
@@ -220,9 +220,9 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
220 * can't get swapped to another task. This also increments its 220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed. 221 * reference count so that the context can't get freed.
222 */ 222 */
223static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) 223static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
224{ 224{
225 struct perf_counter_context *ctx; 225 struct perf_event_context *ctx;
226 unsigned long flags; 226 unsigned long flags;
227 227
228 ctx = perf_lock_task_context(task, &flags); 228 ctx = perf_lock_task_context(task, &flags);
@@ -233,7 +233,7 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
233 return ctx; 233 return ctx;
234} 234}
235 235
236static void perf_unpin_context(struct perf_counter_context *ctx) 236static void perf_unpin_context(struct perf_event_context *ctx)
237{ 237{
238 unsigned long flags; 238 unsigned long flags;
239 239
@@ -244,59 +244,59 @@ static void perf_unpin_context(struct perf_counter_context *ctx)
244} 244}
245 245
246/* 246/*
247 * Add a counter from the lists for its context. 247 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held. 248 * Must be called with ctx->mutex and ctx->lock held.
249 */ 249 */
250static void 250static void
251list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 251list_add_event(struct perf_event *event, struct perf_event_context *ctx)
252{ 252{
253 struct perf_counter *group_leader = counter->group_leader; 253 struct perf_event *group_leader = event->group_leader;
254 254
255 /* 255 /*
256 * Depending on whether it is a standalone or sibling counter, 256 * Depending on whether it is a standalone or sibling event,
257 * add it straight to the context's counter list, or to the group 257 * add it straight to the context's event list, or to the group
258 * leader's sibling list: 258 * leader's sibling list:
259 */ 259 */
260 if (group_leader == counter) 260 if (group_leader == event)
261 list_add_tail(&counter->group_entry, &ctx->group_list); 261 list_add_tail(&event->group_entry, &ctx->group_list);
262 else { 262 else {
263 list_add_tail(&counter->group_entry, &group_leader->sibling_list); 263 list_add_tail(&event->group_entry, &group_leader->sibling_list);
264 group_leader->nr_siblings++; 264 group_leader->nr_siblings++;
265 } 265 }
266 266
267 list_add_rcu(&counter->event_entry, &ctx->event_list); 267 list_add_rcu(&event->event_entry, &ctx->event_list);
268 ctx->nr_counters++; 268 ctx->nr_events++;
269 if (counter->attr.inherit_stat) 269 if (event->attr.inherit_stat)
270 ctx->nr_stat++; 270 ctx->nr_stat++;
271} 271}
272 272
273/* 273/*
274 * Remove a counter from the lists for its context. 274 * Remove a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held. 275 * Must be called with ctx->mutex and ctx->lock held.
276 */ 276 */
277static void 277static void
278list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 278list_del_event(struct perf_event *event, struct perf_event_context *ctx)
279{ 279{
280 struct perf_counter *sibling, *tmp; 280 struct perf_event *sibling, *tmp;
281 281
282 if (list_empty(&counter->group_entry)) 282 if (list_empty(&event->group_entry))
283 return; 283 return;
284 ctx->nr_counters--; 284 ctx->nr_events--;
285 if (counter->attr.inherit_stat) 285 if (event->attr.inherit_stat)
286 ctx->nr_stat--; 286 ctx->nr_stat--;
287 287
288 list_del_init(&counter->group_entry); 288 list_del_init(&event->group_entry);
289 list_del_rcu(&counter->event_entry); 289 list_del_rcu(&event->event_entry);
290 290
291 if (counter->group_leader != counter) 291 if (event->group_leader != event)
292 counter->group_leader->nr_siblings--; 292 event->group_leader->nr_siblings--;
293 293
294 /* 294 /*
295 * If this was a group counter with sibling counters then 295 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton counters by adding them 296 * upgrade the siblings to singleton events by adding them
297 * to the context list directly: 297 * to the context list directly:
298 */ 298 */
299 list_for_each_entry_safe(sibling, tmp, &counter->sibling_list, group_entry) { 299 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
300 300
301 list_move_tail(&sibling->group_entry, &ctx->group_list); 301 list_move_tail(&sibling->group_entry, &ctx->group_list);
302 sibling->group_leader = sibling; 302 sibling->group_leader = sibling;
@@ -304,62 +304,62 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
304} 304}
305 305
306static void 306static void
307counter_sched_out(struct perf_counter *counter, 307event_sched_out(struct perf_event *event,
308 struct perf_cpu_context *cpuctx, 308 struct perf_cpu_context *cpuctx,
309 struct perf_counter_context *ctx) 309 struct perf_event_context *ctx)
310{ 310{
311 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 311 if (event->state != PERF_EVENT_STATE_ACTIVE)
312 return; 312 return;
313 313
314 counter->state = PERF_COUNTER_STATE_INACTIVE; 314 event->state = PERF_EVENT_STATE_INACTIVE;
315 if (counter->pending_disable) { 315 if (event->pending_disable) {
316 counter->pending_disable = 0; 316 event->pending_disable = 0;
317 counter->state = PERF_COUNTER_STATE_OFF; 317 event->state = PERF_EVENT_STATE_OFF;
318 } 318 }
319 counter->tstamp_stopped = ctx->time; 319 event->tstamp_stopped = ctx->time;
320 counter->pmu->disable(counter); 320 event->pmu->disable(event);
321 counter->oncpu = -1; 321 event->oncpu = -1;
322 322
323 if (!is_software_counter(counter)) 323 if (!is_software_event(event))
324 cpuctx->active_oncpu--; 324 cpuctx->active_oncpu--;
325 ctx->nr_active--; 325 ctx->nr_active--;
326 if (counter->attr.exclusive || !cpuctx->active_oncpu) 326 if (event->attr.exclusive || !cpuctx->active_oncpu)
327 cpuctx->exclusive = 0; 327 cpuctx->exclusive = 0;
328} 328}
329 329
330static void 330static void
331group_sched_out(struct perf_counter *group_counter, 331group_sched_out(struct perf_event *group_event,
332 struct perf_cpu_context *cpuctx, 332 struct perf_cpu_context *cpuctx,
333 struct perf_counter_context *ctx) 333 struct perf_event_context *ctx)
334{ 334{
335 struct perf_counter *counter; 335 struct perf_event *event;
336 336
337 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) 337 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
338 return; 338 return;
339 339
340 counter_sched_out(group_counter, cpuctx, ctx); 340 event_sched_out(group_event, cpuctx, ctx);
341 341
342 /* 342 /*
343 * Schedule out siblings (if any): 343 * Schedule out siblings (if any):
344 */ 344 */
345 list_for_each_entry(counter, &group_counter->sibling_list, group_entry) 345 list_for_each_entry(event, &group_event->sibling_list, group_entry)
346 counter_sched_out(counter, cpuctx, ctx); 346 event_sched_out(event, cpuctx, ctx);
347 347
348 if (group_counter->attr.exclusive) 348 if (group_event->attr.exclusive)
349 cpuctx->exclusive = 0; 349 cpuctx->exclusive = 0;
350} 350}
351 351
352/* 352/*
353 * Cross CPU call to remove a performance counter 353 * Cross CPU call to remove a performance event
354 * 354 *
355 * We disable the counter on the hardware level first. After that we 355 * We disable the event on the hardware level first. After that we
356 * remove it from the context list. 356 * remove it from the context list.
357 */ 357 */
358static void __perf_counter_remove_from_context(void *info) 358static void __perf_event_remove_from_context(void *info)
359{ 359{
360 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 360 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
361 struct perf_counter *counter = info; 361 struct perf_event *event = info;
362 struct perf_counter_context *ctx = counter->ctx; 362 struct perf_event_context *ctx = event->ctx;
363 363
364 /* 364 /*
365 * If this is a task context, we need to check whether it is 365 * If this is a task context, we need to check whether it is
@@ -372,22 +372,22 @@ static void __perf_counter_remove_from_context(void *info)
372 spin_lock(&ctx->lock); 372 spin_lock(&ctx->lock);
373 /* 373 /*
374 * Protect the list operation against NMI by disabling the 374 * Protect the list operation against NMI by disabling the
375 * counters on a global level. 375 * events on a global level.
376 */ 376 */
377 perf_disable(); 377 perf_disable();
378 378
379 counter_sched_out(counter, cpuctx, ctx); 379 event_sched_out(event, cpuctx, ctx);
380 380
381 list_del_counter(counter, ctx); 381 list_del_event(event, ctx);
382 382
383 if (!ctx->task) { 383 if (!ctx->task) {
384 /* 384 /*
385 * Allow more per task counters with respect to the 385 * Allow more per task events with respect to the
386 * reservation: 386 * reservation:
387 */ 387 */
388 cpuctx->max_pertask = 388 cpuctx->max_pertask =
389 min(perf_max_counters - ctx->nr_counters, 389 min(perf_max_events - ctx->nr_events,
390 perf_max_counters - perf_reserved_percpu); 390 perf_max_events - perf_reserved_percpu);
391 } 391 }
392 392
393 perf_enable(); 393 perf_enable();
@@ -396,56 +396,56 @@ static void __perf_counter_remove_from_context(void *info)
396 396
397 397
398/* 398/*
399 * Remove the counter from a task's (or a CPU's) list of counters. 399 * Remove the event from a task's (or a CPU's) list of events.
400 * 400 *
401 * Must be called with ctx->mutex held. 401 * Must be called with ctx->mutex held.
402 * 402 *
403 * CPU counters are removed with a smp call. For task counters we only 403 * CPU events are removed with a smp call. For task events we only
404 * call when the task is on a CPU. 404 * call when the task is on a CPU.
405 * 405 *
406 * If counter->ctx is a cloned context, callers must make sure that 406 * If event->ctx is a cloned context, callers must make sure that
407 * every task struct that counter->ctx->task could possibly point to 407 * every task struct that event->ctx->task could possibly point to
408 * remains valid. This is OK when called from perf_release since 408 * remains valid. This is OK when called from perf_release since
409 * that only calls us on the top-level context, which can't be a clone. 409 * that only calls us on the top-level context, which can't be a clone.
410 * When called from perf_counter_exit_task, it's OK because the 410 * When called from perf_event_exit_task, it's OK because the
411 * context has been detached from its task. 411 * context has been detached from its task.
412 */ 412 */
413static void perf_counter_remove_from_context(struct perf_counter *counter) 413static void perf_event_remove_from_context(struct perf_event *event)
414{ 414{
415 struct perf_counter_context *ctx = counter->ctx; 415 struct perf_event_context *ctx = event->ctx;
416 struct task_struct *task = ctx->task; 416 struct task_struct *task = ctx->task;
417 417
418 if (!task) { 418 if (!task) {
419 /* 419 /*
420 * Per cpu counters are removed via an smp call and 420 * Per cpu events are removed via an smp call and
421 * the removal is always sucessful. 421 * the removal is always sucessful.
422 */ 422 */
423 smp_call_function_single(counter->cpu, 423 smp_call_function_single(event->cpu,
424 __perf_counter_remove_from_context, 424 __perf_event_remove_from_context,
425 counter, 1); 425 event, 1);
426 return; 426 return;
427 } 427 }
428 428
429retry: 429retry:
430 task_oncpu_function_call(task, __perf_counter_remove_from_context, 430 task_oncpu_function_call(task, __perf_event_remove_from_context,
431 counter); 431 event);
432 432
433 spin_lock_irq(&ctx->lock); 433 spin_lock_irq(&ctx->lock);
434 /* 434 /*
435 * If the context is active we need to retry the smp call. 435 * If the context is active we need to retry the smp call.
436 */ 436 */
437 if (ctx->nr_active && !list_empty(&counter->group_entry)) { 437 if (ctx->nr_active && !list_empty(&event->group_entry)) {
438 spin_unlock_irq(&ctx->lock); 438 spin_unlock_irq(&ctx->lock);
439 goto retry; 439 goto retry;
440 } 440 }
441 441
442 /* 442 /*
443 * The lock prevents that this context is scheduled in so we 443 * The lock prevents that this context is scheduled in so we
444 * can remove the counter safely, if the call above did not 444 * can remove the event safely, if the call above did not
445 * succeed. 445 * succeed.
446 */ 446 */
447 if (!list_empty(&counter->group_entry)) { 447 if (!list_empty(&event->group_entry)) {
448 list_del_counter(counter, ctx); 448 list_del_event(event, ctx);
449 } 449 }
450 spin_unlock_irq(&ctx->lock); 450 spin_unlock_irq(&ctx->lock);
451} 451}
@@ -458,7 +458,7 @@ static inline u64 perf_clock(void)
458/* 458/*
459 * Update the record of the current time in a context. 459 * Update the record of the current time in a context.
460 */ 460 */
461static void update_context_time(struct perf_counter_context *ctx) 461static void update_context_time(struct perf_event_context *ctx)
462{ 462{
463 u64 now = perf_clock(); 463 u64 now = perf_clock();
464 464
@@ -467,51 +467,51 @@ static void update_context_time(struct perf_counter_context *ctx)
467} 467}
468 468
469/* 469/*
470 * Update the total_time_enabled and total_time_running fields for a counter. 470 * Update the total_time_enabled and total_time_running fields for a event.
471 */ 471 */
472static void update_counter_times(struct perf_counter *counter) 472static void update_event_times(struct perf_event *event)
473{ 473{
474 struct perf_counter_context *ctx = counter->ctx; 474 struct perf_event_context *ctx = event->ctx;
475 u64 run_end; 475 u64 run_end;
476 476
477 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 477 if (event->state < PERF_EVENT_STATE_INACTIVE ||
478 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) 478 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
479 return; 479 return;
480 480
481 counter->total_time_enabled = ctx->time - counter->tstamp_enabled; 481 event->total_time_enabled = ctx->time - event->tstamp_enabled;
482 482
483 if (counter->state == PERF_COUNTER_STATE_INACTIVE) 483 if (event->state == PERF_EVENT_STATE_INACTIVE)
484 run_end = counter->tstamp_stopped; 484 run_end = event->tstamp_stopped;
485 else 485 else
486 run_end = ctx->time; 486 run_end = ctx->time;
487 487
488 counter->total_time_running = run_end - counter->tstamp_running; 488 event->total_time_running = run_end - event->tstamp_running;
489} 489}
490 490
491/* 491/*
492 * Update total_time_enabled and total_time_running for all counters in a group. 492 * Update total_time_enabled and total_time_running for all events in a group.
493 */ 493 */
494static void update_group_times(struct perf_counter *leader) 494static void update_group_times(struct perf_event *leader)
495{ 495{
496 struct perf_counter *counter; 496 struct perf_event *event;
497 497
498 update_counter_times(leader); 498 update_event_times(leader);
499 list_for_each_entry(counter, &leader->sibling_list, group_entry) 499 list_for_each_entry(event, &leader->sibling_list, group_entry)
500 update_counter_times(counter); 500 update_event_times(event);
501} 501}
502 502
503/* 503/*
504 * Cross CPU call to disable a performance counter 504 * Cross CPU call to disable a performance event
505 */ 505 */
506static void __perf_counter_disable(void *info) 506static void __perf_event_disable(void *info)
507{ 507{
508 struct perf_counter *counter = info; 508 struct perf_event *event = info;
509 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 509 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
510 struct perf_counter_context *ctx = counter->ctx; 510 struct perf_event_context *ctx = event->ctx;
511 511
512 /* 512 /*
513 * If this is a per-task counter, need to check whether this 513 * If this is a per-task event, need to check whether this
514 * counter's task is the current task on this cpu. 514 * event's task is the current task on this cpu.
515 */ 515 */
516 if (ctx->task && cpuctx->task_ctx != ctx) 516 if (ctx->task && cpuctx->task_ctx != ctx)
517 return; 517 return;
@@ -519,57 +519,57 @@ static void __perf_counter_disable(void *info)
519 spin_lock(&ctx->lock); 519 spin_lock(&ctx->lock);
520 520
521 /* 521 /*
522 * If the counter is on, turn it off. 522 * If the event is on, turn it off.
523 * If it is in error state, leave it in error state. 523 * If it is in error state, leave it in error state.
524 */ 524 */
525 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { 525 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
526 update_context_time(ctx); 526 update_context_time(ctx);
527 update_group_times(counter); 527 update_group_times(event);
528 if (counter == counter->group_leader) 528 if (event == event->group_leader)
529 group_sched_out(counter, cpuctx, ctx); 529 group_sched_out(event, cpuctx, ctx);
530 else 530 else
531 counter_sched_out(counter, cpuctx, ctx); 531 event_sched_out(event, cpuctx, ctx);
532 counter->state = PERF_COUNTER_STATE_OFF; 532 event->state = PERF_EVENT_STATE_OFF;
533 } 533 }
534 534
535 spin_unlock(&ctx->lock); 535 spin_unlock(&ctx->lock);
536} 536}
537 537
538/* 538/*
539 * Disable a counter. 539 * Disable a event.
540 * 540 *
541 * If counter->ctx is a cloned context, callers must make sure that 541 * If event->ctx is a cloned context, callers must make sure that
542 * every task struct that counter->ctx->task could possibly point to 542 * every task struct that event->ctx->task could possibly point to
543 * remains valid. This condition is satisifed when called through 543 * remains valid. This condition is satisifed when called through
544 * perf_counter_for_each_child or perf_counter_for_each because they 544 * perf_event_for_each_child or perf_event_for_each because they
545 * hold the top-level counter's child_mutex, so any descendant that 545 * hold the top-level event's child_mutex, so any descendant that
546 * goes to exit will block in sync_child_counter. 546 * goes to exit will block in sync_child_event.
547 * When called from perf_pending_counter it's OK because counter->ctx 547 * When called from perf_pending_event it's OK because event->ctx
548 * is the current context on this CPU and preemption is disabled, 548 * is the current context on this CPU and preemption is disabled,
549 * hence we can't get into perf_counter_task_sched_out for this context. 549 * hence we can't get into perf_event_task_sched_out for this context.
550 */ 550 */
551static void perf_counter_disable(struct perf_counter *counter) 551static void perf_event_disable(struct perf_event *event)
552{ 552{
553 struct perf_counter_context *ctx = counter->ctx; 553 struct perf_event_context *ctx = event->ctx;
554 struct task_struct *task = ctx->task; 554 struct task_struct *task = ctx->task;
555 555
556 if (!task) { 556 if (!task) {
557 /* 557 /*
558 * Disable the counter on the cpu that it's on 558 * Disable the event on the cpu that it's on
559 */ 559 */
560 smp_call_function_single(counter->cpu, __perf_counter_disable, 560 smp_call_function_single(event->cpu, __perf_event_disable,
561 counter, 1); 561 event, 1);
562 return; 562 return;
563 } 563 }
564 564
565 retry: 565 retry:
566 task_oncpu_function_call(task, __perf_counter_disable, counter); 566 task_oncpu_function_call(task, __perf_event_disable, event);
567 567
568 spin_lock_irq(&ctx->lock); 568 spin_lock_irq(&ctx->lock);
569 /* 569 /*
570 * If the counter is still active, we need to retry the cross-call. 570 * If the event is still active, we need to retry the cross-call.
571 */ 571 */
572 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 572 if (event->state == PERF_EVENT_STATE_ACTIVE) {
573 spin_unlock_irq(&ctx->lock); 573 spin_unlock_irq(&ctx->lock);
574 goto retry; 574 goto retry;
575 } 575 }
@@ -578,73 +578,73 @@ static void perf_counter_disable(struct perf_counter *counter)
578 * Since we have the lock this context can't be scheduled 578 * Since we have the lock this context can't be scheduled
579 * in, so we can change the state safely. 579 * in, so we can change the state safely.
580 */ 580 */
581 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 581 if (event->state == PERF_EVENT_STATE_INACTIVE) {
582 update_group_times(counter); 582 update_group_times(event);
583 counter->state = PERF_COUNTER_STATE_OFF; 583 event->state = PERF_EVENT_STATE_OFF;
584 } 584 }
585 585
586 spin_unlock_irq(&ctx->lock); 586 spin_unlock_irq(&ctx->lock);
587} 587}
588 588
589static int 589static int
590counter_sched_in(struct perf_counter *counter, 590event_sched_in(struct perf_event *event,
591 struct perf_cpu_context *cpuctx, 591 struct perf_cpu_context *cpuctx,
592 struct perf_counter_context *ctx, 592 struct perf_event_context *ctx,
593 int cpu) 593 int cpu)
594{ 594{
595 if (counter->state <= PERF_COUNTER_STATE_OFF) 595 if (event->state <= PERF_EVENT_STATE_OFF)
596 return 0; 596 return 0;
597 597
598 counter->state = PERF_COUNTER_STATE_ACTIVE; 598 event->state = PERF_EVENT_STATE_ACTIVE;
599 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 599 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
600 /* 600 /*
601 * The new state must be visible before we turn it on in the hardware: 601 * The new state must be visible before we turn it on in the hardware:
602 */ 602 */
603 smp_wmb(); 603 smp_wmb();
604 604
605 if (counter->pmu->enable(counter)) { 605 if (event->pmu->enable(event)) {
606 counter->state = PERF_COUNTER_STATE_INACTIVE; 606 event->state = PERF_EVENT_STATE_INACTIVE;
607 counter->oncpu = -1; 607 event->oncpu = -1;
608 return -EAGAIN; 608 return -EAGAIN;
609 } 609 }
610 610
611 counter->tstamp_running += ctx->time - counter->tstamp_stopped; 611 event->tstamp_running += ctx->time - event->tstamp_stopped;
612 612
613 if (!is_software_counter(counter)) 613 if (!is_software_event(event))
614 cpuctx->active_oncpu++; 614 cpuctx->active_oncpu++;
615 ctx->nr_active++; 615 ctx->nr_active++;
616 616
617 if (counter->attr.exclusive) 617 if (event->attr.exclusive)
618 cpuctx->exclusive = 1; 618 cpuctx->exclusive = 1;
619 619
620 return 0; 620 return 0;
621} 621}
622 622
623static int 623static int
624group_sched_in(struct perf_counter *group_counter, 624group_sched_in(struct perf_event *group_event,
625 struct perf_cpu_context *cpuctx, 625 struct perf_cpu_context *cpuctx,
626 struct perf_counter_context *ctx, 626 struct perf_event_context *ctx,
627 int cpu) 627 int cpu)
628{ 628{
629 struct perf_counter *counter, *partial_group; 629 struct perf_event *event, *partial_group;
630 int ret; 630 int ret;
631 631
632 if (group_counter->state == PERF_COUNTER_STATE_OFF) 632 if (group_event->state == PERF_EVENT_STATE_OFF)
633 return 0; 633 return 0;
634 634
635 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); 635 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
636 if (ret) 636 if (ret)
637 return ret < 0 ? ret : 0; 637 return ret < 0 ? ret : 0;
638 638
639 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 639 if (event_sched_in(group_event, cpuctx, ctx, cpu))
640 return -EAGAIN; 640 return -EAGAIN;
641 641
642 /* 642 /*
643 * Schedule in siblings as one group (if any): 643 * Schedule in siblings as one group (if any):
644 */ 644 */
645 list_for_each_entry(counter, &group_counter->sibling_list, group_entry) { 645 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
646 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 646 if (event_sched_in(event, cpuctx, ctx, cpu)) {
647 partial_group = counter; 647 partial_group = event;
648 goto group_error; 648 goto group_error;
649 } 649 }
650 } 650 }
@@ -656,57 +656,57 @@ group_error:
656 * Groups can be scheduled in as one unit only, so undo any 656 * Groups can be scheduled in as one unit only, so undo any
657 * partial group before returning: 657 * partial group before returning:
658 */ 658 */
659 list_for_each_entry(counter, &group_counter->sibling_list, group_entry) { 659 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
660 if (counter == partial_group) 660 if (event == partial_group)
661 break; 661 break;
662 counter_sched_out(counter, cpuctx, ctx); 662 event_sched_out(event, cpuctx, ctx);
663 } 663 }
664 counter_sched_out(group_counter, cpuctx, ctx); 664 event_sched_out(group_event, cpuctx, ctx);
665 665
666 return -EAGAIN; 666 return -EAGAIN;
667} 667}
668 668
669/* 669/*
670 * Return 1 for a group consisting entirely of software counters, 670 * Return 1 for a group consisting entirely of software events,
671 * 0 if the group contains any hardware counters. 671 * 0 if the group contains any hardware events.
672 */ 672 */
673static int is_software_only_group(struct perf_counter *leader) 673static int is_software_only_group(struct perf_event *leader)
674{ 674{
675 struct perf_counter *counter; 675 struct perf_event *event;
676 676
677 if (!is_software_counter(leader)) 677 if (!is_software_event(leader))
678 return 0; 678 return 0;
679 679
680 list_for_each_entry(counter, &leader->sibling_list, group_entry) 680 list_for_each_entry(event, &leader->sibling_list, group_entry)
681 if (!is_software_counter(counter)) 681 if (!is_software_event(event))
682 return 0; 682 return 0;
683 683
684 return 1; 684 return 1;
685} 685}
686 686
687/* 687/*
688 * Work out whether we can put this counter group on the CPU now. 688 * Work out whether we can put this event group on the CPU now.
689 */ 689 */
690static int group_can_go_on(struct perf_counter *counter, 690static int group_can_go_on(struct perf_event *event,
691 struct perf_cpu_context *cpuctx, 691 struct perf_cpu_context *cpuctx,
692 int can_add_hw) 692 int can_add_hw)
693{ 693{
694 /* 694 /*
695 * Groups consisting entirely of software counters can always go on. 695 * Groups consisting entirely of software events can always go on.
696 */ 696 */
697 if (is_software_only_group(counter)) 697 if (is_software_only_group(event))
698 return 1; 698 return 1;
699 /* 699 /*
700 * If an exclusive group is already on, no other hardware 700 * If an exclusive group is already on, no other hardware
701 * counters can go on. 701 * events can go on.
702 */ 702 */
703 if (cpuctx->exclusive) 703 if (cpuctx->exclusive)
704 return 0; 704 return 0;
705 /* 705 /*
706 * If this group is exclusive and there are already 706 * If this group is exclusive and there are already
707 * counters on the CPU, it can't go on. 707 * events on the CPU, it can't go on.
708 */ 708 */
709 if (counter->attr.exclusive && cpuctx->active_oncpu) 709 if (event->attr.exclusive && cpuctx->active_oncpu)
710 return 0; 710 return 0;
711 /* 711 /*
712 * Otherwise, try to add it if all previous groups were able 712 * Otherwise, try to add it if all previous groups were able
@@ -715,26 +715,26 @@ static int group_can_go_on(struct perf_counter *counter,
715 return can_add_hw; 715 return can_add_hw;
716} 716}
717 717
718static void add_counter_to_ctx(struct perf_counter *counter, 718static void add_event_to_ctx(struct perf_event *event,
719 struct perf_counter_context *ctx) 719 struct perf_event_context *ctx)
720{ 720{
721 list_add_counter(counter, ctx); 721 list_add_event(event, ctx);
722 counter->tstamp_enabled = ctx->time; 722 event->tstamp_enabled = ctx->time;
723 counter->tstamp_running = ctx->time; 723 event->tstamp_running = ctx->time;
724 counter->tstamp_stopped = ctx->time; 724 event->tstamp_stopped = ctx->time;
725} 725}
726 726
727/* 727/*
728 * Cross CPU call to install and enable a performance counter 728 * Cross CPU call to install and enable a performance event
729 * 729 *
730 * Must be called with ctx->mutex held 730 * Must be called with ctx->mutex held
731 */ 731 */
732static void __perf_install_in_context(void *info) 732static void __perf_install_in_context(void *info)
733{ 733{
734 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 734 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
735 struct perf_counter *counter = info; 735 struct perf_event *event = info;
736 struct perf_counter_context *ctx = counter->ctx; 736 struct perf_event_context *ctx = event->ctx;
737 struct perf_counter *leader = counter->group_leader; 737 struct perf_event *leader = event->group_leader;
738 int cpu = smp_processor_id(); 738 int cpu = smp_processor_id();
739 int err; 739 int err;
740 740
@@ -743,7 +743,7 @@ static void __perf_install_in_context(void *info)
743 * the current task context of this cpu. If not it has been 743 * the current task context of this cpu. If not it has been
744 * scheduled out before the smp call arrived. 744 * scheduled out before the smp call arrived.
745 * Or possibly this is the right context but it isn't 745 * Or possibly this is the right context but it isn't
746 * on this cpu because it had no counters. 746 * on this cpu because it had no events.
747 */ 747 */
748 if (ctx->task && cpuctx->task_ctx != ctx) { 748 if (ctx->task && cpuctx->task_ctx != ctx) {
749 if (cpuctx->task_ctx || ctx->task != current) 749 if (cpuctx->task_ctx || ctx->task != current)
@@ -757,41 +757,41 @@ static void __perf_install_in_context(void *info)
757 757
758 /* 758 /*
759 * Protect the list operation against NMI by disabling the 759 * Protect the list operation against NMI by disabling the
760 * counters on a global level. NOP for non NMI based counters. 760 * events on a global level. NOP for non NMI based events.
761 */ 761 */
762 perf_disable(); 762 perf_disable();
763 763
764 add_counter_to_ctx(counter, ctx); 764 add_event_to_ctx(event, ctx);
765 765
766 /* 766 /*
767 * Don't put the counter on if it is disabled or if 767 * Don't put the event on if it is disabled or if
768 * it is in a group and the group isn't on. 768 * it is in a group and the group isn't on.
769 */ 769 */
770 if (counter->state != PERF_COUNTER_STATE_INACTIVE || 770 if (event->state != PERF_EVENT_STATE_INACTIVE ||
771 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) 771 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
772 goto unlock; 772 goto unlock;
773 773
774 /* 774 /*
775 * An exclusive counter can't go on if there are already active 775 * An exclusive event can't go on if there are already active
776 * hardware counters, and no hardware counter can go on if there 776 * hardware events, and no hardware event can go on if there
777 * is already an exclusive counter on. 777 * is already an exclusive event on.
778 */ 778 */
779 if (!group_can_go_on(counter, cpuctx, 1)) 779 if (!group_can_go_on(event, cpuctx, 1))
780 err = -EEXIST; 780 err = -EEXIST;
781 else 781 else
782 err = counter_sched_in(counter, cpuctx, ctx, cpu); 782 err = event_sched_in(event, cpuctx, ctx, cpu);
783 783
784 if (err) { 784 if (err) {
785 /* 785 /*
786 * This counter couldn't go on. If it is in a group 786 * This event couldn't go on. If it is in a group
787 * then we have to pull the whole group off. 787 * then we have to pull the whole group off.
788 * If the counter group is pinned then put it in error state. 788 * If the event group is pinned then put it in error state.
789 */ 789 */
790 if (leader != counter) 790 if (leader != event)
791 group_sched_out(leader, cpuctx, ctx); 791 group_sched_out(leader, cpuctx, ctx);
792 if (leader->attr.pinned) { 792 if (leader->attr.pinned) {
793 update_group_times(leader); 793 update_group_times(leader);
794 leader->state = PERF_COUNTER_STATE_ERROR; 794 leader->state = PERF_EVENT_STATE_ERROR;
795 } 795 }
796 } 796 }
797 797
@@ -805,92 +805,92 @@ static void __perf_install_in_context(void *info)
805} 805}
806 806
807/* 807/*
808 * Attach a performance counter to a context 808 * Attach a performance event to a context
809 * 809 *
810 * First we add the counter to the list with the hardware enable bit 810 * First we add the event to the list with the hardware enable bit
811 * in counter->hw_config cleared. 811 * in event->hw_config cleared.
812 * 812 *
813 * If the counter is attached to a task which is on a CPU we use a smp 813 * If the event is attached to a task which is on a CPU we use a smp
814 * call to enable it in the task context. The task might have been 814 * call to enable it in the task context. The task might have been
815 * scheduled away, but we check this in the smp call again. 815 * scheduled away, but we check this in the smp call again.
816 * 816 *
817 * Must be called with ctx->mutex held. 817 * Must be called with ctx->mutex held.
818 */ 818 */
819static void 819static void
820perf_install_in_context(struct perf_counter_context *ctx, 820perf_install_in_context(struct perf_event_context *ctx,
821 struct perf_counter *counter, 821 struct perf_event *event,
822 int cpu) 822 int cpu)
823{ 823{
824 struct task_struct *task = ctx->task; 824 struct task_struct *task = ctx->task;
825 825
826 if (!task) { 826 if (!task) {
827 /* 827 /*
828 * Per cpu counters are installed via an smp call and 828 * Per cpu events are installed via an smp call and
829 * the install is always sucessful. 829 * the install is always sucessful.
830 */ 830 */
831 smp_call_function_single(cpu, __perf_install_in_context, 831 smp_call_function_single(cpu, __perf_install_in_context,
832 counter, 1); 832 event, 1);
833 return; 833 return;
834 } 834 }
835 835
836retry: 836retry:
837 task_oncpu_function_call(task, __perf_install_in_context, 837 task_oncpu_function_call(task, __perf_install_in_context,
838 counter); 838 event);
839 839
840 spin_lock_irq(&ctx->lock); 840 spin_lock_irq(&ctx->lock);
841 /* 841 /*
842 * we need to retry the smp call. 842 * we need to retry the smp call.
843 */ 843 */
844 if (ctx->is_active && list_empty(&counter->group_entry)) { 844 if (ctx->is_active && list_empty(&event->group_entry)) {
845 spin_unlock_irq(&ctx->lock); 845 spin_unlock_irq(&ctx->lock);
846 goto retry; 846 goto retry;
847 } 847 }
848 848
849 /* 849 /*
850 * The lock prevents that this context is scheduled in so we 850 * The lock prevents that this context is scheduled in so we
851 * can add the counter safely, if it the call above did not 851 * can add the event safely, if it the call above did not
852 * succeed. 852 * succeed.
853 */ 853 */
854 if (list_empty(&counter->group_entry)) 854 if (list_empty(&event->group_entry))
855 add_counter_to_ctx(counter, ctx); 855 add_event_to_ctx(event, ctx);
856 spin_unlock_irq(&ctx->lock); 856 spin_unlock_irq(&ctx->lock);
857} 857}
858 858
859/* 859/*
860 * Put a counter into inactive state and update time fields. 860 * Put a event into inactive state and update time fields.
861 * Enabling the leader of a group effectively enables all 861 * Enabling the leader of a group effectively enables all
862 * the group members that aren't explicitly disabled, so we 862 * the group members that aren't explicitly disabled, so we
863 * have to update their ->tstamp_enabled also. 863 * have to update their ->tstamp_enabled also.
864 * Note: this works for group members as well as group leaders 864 * Note: this works for group members as well as group leaders
865 * since the non-leader members' sibling_lists will be empty. 865 * since the non-leader members' sibling_lists will be empty.
866 */ 866 */
867static void __perf_counter_mark_enabled(struct perf_counter *counter, 867static void __perf_event_mark_enabled(struct perf_event *event,
868 struct perf_counter_context *ctx) 868 struct perf_event_context *ctx)
869{ 869{
870 struct perf_counter *sub; 870 struct perf_event *sub;
871 871
872 counter->state = PERF_COUNTER_STATE_INACTIVE; 872 event->state = PERF_EVENT_STATE_INACTIVE;
873 counter->tstamp_enabled = ctx->time - counter->total_time_enabled; 873 event->tstamp_enabled = ctx->time - event->total_time_enabled;
874 list_for_each_entry(sub, &counter->sibling_list, group_entry) 874 list_for_each_entry(sub, &event->sibling_list, group_entry)
875 if (sub->state >= PERF_COUNTER_STATE_INACTIVE) 875 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
876 sub->tstamp_enabled = 876 sub->tstamp_enabled =
877 ctx->time - sub->total_time_enabled; 877 ctx->time - sub->total_time_enabled;
878} 878}
879 879
880/* 880/*
881 * Cross CPU call to enable a performance counter 881 * Cross CPU call to enable a performance event
882 */ 882 */
883static void __perf_counter_enable(void *info) 883static void __perf_event_enable(void *info)
884{ 884{
885 struct perf_counter *counter = info; 885 struct perf_event *event = info;
886 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 886 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
887 struct perf_counter_context *ctx = counter->ctx; 887 struct perf_event_context *ctx = event->ctx;
888 struct perf_counter *leader = counter->group_leader; 888 struct perf_event *leader = event->group_leader;
889 int err; 889 int err;
890 890
891 /* 891 /*
892 * If this is a per-task counter, need to check whether this 892 * If this is a per-task event, need to check whether this
893 * counter's task is the current task on this cpu. 893 * event's task is the current task on this cpu.
894 */ 894 */
895 if (ctx->task && cpuctx->task_ctx != ctx) { 895 if (ctx->task && cpuctx->task_ctx != ctx) {
896 if (cpuctx->task_ctx || ctx->task != current) 896 if (cpuctx->task_ctx || ctx->task != current)
@@ -902,40 +902,40 @@ static void __perf_counter_enable(void *info)
902 ctx->is_active = 1; 902 ctx->is_active = 1;
903 update_context_time(ctx); 903 update_context_time(ctx);
904 904
905 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 905 if (event->state >= PERF_EVENT_STATE_INACTIVE)
906 goto unlock; 906 goto unlock;
907 __perf_counter_mark_enabled(counter, ctx); 907 __perf_event_mark_enabled(event, ctx);
908 908
909 /* 909 /*
910 * If the counter is in a group and isn't the group leader, 910 * If the event is in a group and isn't the group leader,
911 * then don't put it on unless the group is on. 911 * then don't put it on unless the group is on.
912 */ 912 */
913 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) 913 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
914 goto unlock; 914 goto unlock;
915 915
916 if (!group_can_go_on(counter, cpuctx, 1)) { 916 if (!group_can_go_on(event, cpuctx, 1)) {
917 err = -EEXIST; 917 err = -EEXIST;
918 } else { 918 } else {
919 perf_disable(); 919 perf_disable();
920 if (counter == leader) 920 if (event == leader)
921 err = group_sched_in(counter, cpuctx, ctx, 921 err = group_sched_in(event, cpuctx, ctx,
922 smp_processor_id()); 922 smp_processor_id());
923 else 923 else
924 err = counter_sched_in(counter, cpuctx, ctx, 924 err = event_sched_in(event, cpuctx, ctx,
925 smp_processor_id()); 925 smp_processor_id());
926 perf_enable(); 926 perf_enable();
927 } 927 }
928 928
929 if (err) { 929 if (err) {
930 /* 930 /*
931 * If this counter can't go on and it's part of a 931 * If this event can't go on and it's part of a
932 * group, then the whole group has to come off. 932 * group, then the whole group has to come off.
933 */ 933 */
934 if (leader != counter) 934 if (leader != event)
935 group_sched_out(leader, cpuctx, ctx); 935 group_sched_out(leader, cpuctx, ctx);
936 if (leader->attr.pinned) { 936 if (leader->attr.pinned) {
937 update_group_times(leader); 937 update_group_times(leader);
938 leader->state = PERF_COUNTER_STATE_ERROR; 938 leader->state = PERF_EVENT_STATE_ERROR;
939 } 939 }
940 } 940 }
941 941
@@ -944,98 +944,98 @@ static void __perf_counter_enable(void *info)
944} 944}
945 945
946/* 946/*
947 * Enable a counter. 947 * Enable a event.
948 * 948 *
949 * If counter->ctx is a cloned context, callers must make sure that 949 * If event->ctx is a cloned context, callers must make sure that
950 * every task struct that counter->ctx->task could possibly point to 950 * every task struct that event->ctx->task could possibly point to
951 * remains valid. This condition is satisfied when called through 951 * remains valid. This condition is satisfied when called through
952 * perf_counter_for_each_child or perf_counter_for_each as described 952 * perf_event_for_each_child or perf_event_for_each as described
953 * for perf_counter_disable. 953 * for perf_event_disable.
954 */ 954 */
955static void perf_counter_enable(struct perf_counter *counter) 955static void perf_event_enable(struct perf_event *event)
956{ 956{
957 struct perf_counter_context *ctx = counter->ctx; 957 struct perf_event_context *ctx = event->ctx;
958 struct task_struct *task = ctx->task; 958 struct task_struct *task = ctx->task;
959 959
960 if (!task) { 960 if (!task) {
961 /* 961 /*
962 * Enable the counter on the cpu that it's on 962 * Enable the event on the cpu that it's on
963 */ 963 */
964 smp_call_function_single(counter->cpu, __perf_counter_enable, 964 smp_call_function_single(event->cpu, __perf_event_enable,
965 counter, 1); 965 event, 1);
966 return; 966 return;
967 } 967 }
968 968
969 spin_lock_irq(&ctx->lock); 969 spin_lock_irq(&ctx->lock);
970 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 970 if (event->state >= PERF_EVENT_STATE_INACTIVE)
971 goto out; 971 goto out;
972 972
973 /* 973 /*
974 * If the counter is in error state, clear that first. 974 * If the event is in error state, clear that first.
975 * That way, if we see the counter in error state below, we 975 * That way, if we see the event in error state below, we
976 * know that it has gone back into error state, as distinct 976 * know that it has gone back into error state, as distinct
977 * from the task having been scheduled away before the 977 * from the task having been scheduled away before the
978 * cross-call arrived. 978 * cross-call arrived.
979 */ 979 */
980 if (counter->state == PERF_COUNTER_STATE_ERROR) 980 if (event->state == PERF_EVENT_STATE_ERROR)
981 counter->state = PERF_COUNTER_STATE_OFF; 981 event->state = PERF_EVENT_STATE_OFF;
982 982
983 retry: 983 retry:
984 spin_unlock_irq(&ctx->lock); 984 spin_unlock_irq(&ctx->lock);
985 task_oncpu_function_call(task, __perf_counter_enable, counter); 985 task_oncpu_function_call(task, __perf_event_enable, event);
986 986
987 spin_lock_irq(&ctx->lock); 987 spin_lock_irq(&ctx->lock);
988 988
989 /* 989 /*
990 * If the context is active and the counter is still off, 990 * If the context is active and the event is still off,
991 * we need to retry the cross-call. 991 * we need to retry the cross-call.
992 */ 992 */
993 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) 993 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
994 goto retry; 994 goto retry;
995 995
996 /* 996 /*
997 * Since we have the lock this context can't be scheduled 997 * Since we have the lock this context can't be scheduled
998 * in, so we can change the state safely. 998 * in, so we can change the state safely.
999 */ 999 */
1000 if (counter->state == PERF_COUNTER_STATE_OFF) 1000 if (event->state == PERF_EVENT_STATE_OFF)
1001 __perf_counter_mark_enabled(counter, ctx); 1001 __perf_event_mark_enabled(event, ctx);
1002 1002
1003 out: 1003 out:
1004 spin_unlock_irq(&ctx->lock); 1004 spin_unlock_irq(&ctx->lock);
1005} 1005}
1006 1006
1007static int perf_counter_refresh(struct perf_counter *counter, int refresh) 1007static int perf_event_refresh(struct perf_event *event, int refresh)
1008{ 1008{
1009 /* 1009 /*
1010 * not supported on inherited counters 1010 * not supported on inherited events
1011 */ 1011 */
1012 if (counter->attr.inherit) 1012 if (event->attr.inherit)
1013 return -EINVAL; 1013 return -EINVAL;
1014 1014
1015 atomic_add(refresh, &counter->event_limit); 1015 atomic_add(refresh, &event->event_limit);
1016 perf_counter_enable(counter); 1016 perf_event_enable(event);
1017 1017
1018 return 0; 1018 return 0;
1019} 1019}
1020 1020
1021void __perf_counter_sched_out(struct perf_counter_context *ctx, 1021void __perf_event_sched_out(struct perf_event_context *ctx,
1022 struct perf_cpu_context *cpuctx) 1022 struct perf_cpu_context *cpuctx)
1023{ 1023{
1024 struct perf_counter *counter; 1024 struct perf_event *event;
1025 1025
1026 spin_lock(&ctx->lock); 1026 spin_lock(&ctx->lock);
1027 ctx->is_active = 0; 1027 ctx->is_active = 0;
1028 if (likely(!ctx->nr_counters)) 1028 if (likely(!ctx->nr_events))
1029 goto out; 1029 goto out;
1030 update_context_time(ctx); 1030 update_context_time(ctx);
1031 1031
1032 perf_disable(); 1032 perf_disable();
1033 if (ctx->nr_active) { 1033 if (ctx->nr_active) {
1034 list_for_each_entry(counter, &ctx->group_list, group_entry) { 1034 list_for_each_entry(event, &ctx->group_list, group_entry) {
1035 if (counter != counter->group_leader) 1035 if (event != event->group_leader)
1036 counter_sched_out(counter, cpuctx, ctx); 1036 event_sched_out(event, cpuctx, ctx);
1037 else 1037 else
1038 group_sched_out(counter, cpuctx, ctx); 1038 group_sched_out(event, cpuctx, ctx);
1039 } 1039 }
1040 } 1040 }
1041 perf_enable(); 1041 perf_enable();
@@ -1046,46 +1046,46 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
1046/* 1046/*
1047 * Test whether two contexts are equivalent, i.e. whether they 1047 * Test whether two contexts are equivalent, i.e. whether they
1048 * have both been cloned from the same version of the same context 1048 * have both been cloned from the same version of the same context
1049 * and they both have the same number of enabled counters. 1049 * and they both have the same number of enabled events.
1050 * If the number of enabled counters is the same, then the set 1050 * If the number of enabled events is the same, then the set
1051 * of enabled counters should be the same, because these are both 1051 * of enabled events should be the same, because these are both
1052 * inherited contexts, therefore we can't access individual counters 1052 * inherited contexts, therefore we can't access individual events
1053 * in them directly with an fd; we can only enable/disable all 1053 * in them directly with an fd; we can only enable/disable all
1054 * counters via prctl, or enable/disable all counters in a family 1054 * events via prctl, or enable/disable all events in a family
1055 * via ioctl, which will have the same effect on both contexts. 1055 * via ioctl, which will have the same effect on both contexts.
1056 */ 1056 */
1057static int context_equiv(struct perf_counter_context *ctx1, 1057static int context_equiv(struct perf_event_context *ctx1,
1058 struct perf_counter_context *ctx2) 1058 struct perf_event_context *ctx2)
1059{ 1059{
1060 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx 1060 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1061 && ctx1->parent_gen == ctx2->parent_gen 1061 && ctx1->parent_gen == ctx2->parent_gen
1062 && !ctx1->pin_count && !ctx2->pin_count; 1062 && !ctx1->pin_count && !ctx2->pin_count;
1063} 1063}
1064 1064
1065static void __perf_counter_read(void *counter); 1065static void __perf_event_read(void *event);
1066 1066
1067static void __perf_counter_sync_stat(struct perf_counter *counter, 1067static void __perf_event_sync_stat(struct perf_event *event,
1068 struct perf_counter *next_counter) 1068 struct perf_event *next_event)
1069{ 1069{
1070 u64 value; 1070 u64 value;
1071 1071
1072 if (!counter->attr.inherit_stat) 1072 if (!event->attr.inherit_stat)
1073 return; 1073 return;
1074 1074
1075 /* 1075 /*
1076 * Update the counter value, we cannot use perf_counter_read() 1076 * Update the event value, we cannot use perf_event_read()
1077 * because we're in the middle of a context switch and have IRQs 1077 * because we're in the middle of a context switch and have IRQs
1078 * disabled, which upsets smp_call_function_single(), however 1078 * disabled, which upsets smp_call_function_single(), however
1079 * we know the counter must be on the current CPU, therefore we 1079 * we know the event must be on the current CPU, therefore we
1080 * don't need to use it. 1080 * don't need to use it.
1081 */ 1081 */
1082 switch (counter->state) { 1082 switch (event->state) {
1083 case PERF_COUNTER_STATE_ACTIVE: 1083 case PERF_EVENT_STATE_ACTIVE:
1084 __perf_counter_read(counter); 1084 __perf_event_read(event);
1085 break; 1085 break;
1086 1086
1087 case PERF_COUNTER_STATE_INACTIVE: 1087 case PERF_EVENT_STATE_INACTIVE:
1088 update_counter_times(counter); 1088 update_event_times(event);
1089 break; 1089 break;
1090 1090
1091 default: 1091 default:
@@ -1093,73 +1093,73 @@ static void __perf_counter_sync_stat(struct perf_counter *counter,
1093 } 1093 }
1094 1094
1095 /* 1095 /*
1096 * In order to keep per-task stats reliable we need to flip the counter 1096 * In order to keep per-task stats reliable we need to flip the event
1097 * values when we flip the contexts. 1097 * values when we flip the contexts.
1098 */ 1098 */
1099 value = atomic64_read(&next_counter->count); 1099 value = atomic64_read(&next_event->count);
1100 value = atomic64_xchg(&counter->count, value); 1100 value = atomic64_xchg(&event->count, value);
1101 atomic64_set(&next_counter->count, value); 1101 atomic64_set(&next_event->count, value);
1102 1102
1103 swap(counter->total_time_enabled, next_counter->total_time_enabled); 1103 swap(event->total_time_enabled, next_event->total_time_enabled);
1104 swap(counter->total_time_running, next_counter->total_time_running); 1104 swap(event->total_time_running, next_event->total_time_running);
1105 1105
1106 /* 1106 /*
1107 * Since we swizzled the values, update the user visible data too. 1107 * Since we swizzled the values, update the user visible data too.
1108 */ 1108 */
1109 perf_counter_update_userpage(counter); 1109 perf_event_update_userpage(event);
1110 perf_counter_update_userpage(next_counter); 1110 perf_event_update_userpage(next_event);
1111} 1111}
1112 1112
1113#define list_next_entry(pos, member) \ 1113#define list_next_entry(pos, member) \
1114 list_entry(pos->member.next, typeof(*pos), member) 1114 list_entry(pos->member.next, typeof(*pos), member)
1115 1115
1116static void perf_counter_sync_stat(struct perf_counter_context *ctx, 1116static void perf_event_sync_stat(struct perf_event_context *ctx,
1117 struct perf_counter_context *next_ctx) 1117 struct perf_event_context *next_ctx)
1118{ 1118{
1119 struct perf_counter *counter, *next_counter; 1119 struct perf_event *event, *next_event;
1120 1120
1121 if (!ctx->nr_stat) 1121 if (!ctx->nr_stat)
1122 return; 1122 return;
1123 1123
1124 counter = list_first_entry(&ctx->event_list, 1124 event = list_first_entry(&ctx->event_list,
1125 struct perf_counter, event_entry); 1125 struct perf_event, event_entry);
1126 1126
1127 next_counter = list_first_entry(&next_ctx->event_list, 1127 next_event = list_first_entry(&next_ctx->event_list,
1128 struct perf_counter, event_entry); 1128 struct perf_event, event_entry);
1129 1129
1130 while (&counter->event_entry != &ctx->event_list && 1130 while (&event->event_entry != &ctx->event_list &&
1131 &next_counter->event_entry != &next_ctx->event_list) { 1131 &next_event->event_entry != &next_ctx->event_list) {
1132 1132
1133 __perf_counter_sync_stat(counter, next_counter); 1133 __perf_event_sync_stat(event, next_event);
1134 1134
1135 counter = list_next_entry(counter, event_entry); 1135 event = list_next_entry(event, event_entry);
1136 next_counter = list_next_entry(next_counter, event_entry); 1136 next_event = list_next_entry(next_event, event_entry);
1137 } 1137 }
1138} 1138}
1139 1139
1140/* 1140/*
1141 * Called from scheduler to remove the counters of the current task, 1141 * Called from scheduler to remove the events of the current task,
1142 * with interrupts disabled. 1142 * with interrupts disabled.
1143 * 1143 *
1144 * We stop each counter and update the counter value in counter->count. 1144 * We stop each event and update the event value in event->count.
1145 * 1145 *
1146 * This does not protect us against NMI, but disable() 1146 * This does not protect us against NMI, but disable()
1147 * sets the disabled bit in the control field of counter _before_ 1147 * sets the disabled bit in the control field of event _before_
1148 * accessing the counter control register. If a NMI hits, then it will 1148 * accessing the event control register. If a NMI hits, then it will
1149 * not restart the counter. 1149 * not restart the event.
1150 */ 1150 */
1151void perf_counter_task_sched_out(struct task_struct *task, 1151void perf_event_task_sched_out(struct task_struct *task,
1152 struct task_struct *next, int cpu) 1152 struct task_struct *next, int cpu)
1153{ 1153{
1154 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1154 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1155 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1155 struct perf_event_context *ctx = task->perf_event_ctxp;
1156 struct perf_counter_context *next_ctx; 1156 struct perf_event_context *next_ctx;
1157 struct perf_counter_context *parent; 1157 struct perf_event_context *parent;
1158 struct pt_regs *regs; 1158 struct pt_regs *regs;
1159 int do_switch = 1; 1159 int do_switch = 1;
1160 1160
1161 regs = task_pt_regs(task); 1161 regs = task_pt_regs(task);
1162 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); 1162 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1163 1163
1164 if (likely(!ctx || !cpuctx->task_ctx)) 1164 if (likely(!ctx || !cpuctx->task_ctx))
1165 return; 1165 return;
@@ -1168,7 +1168,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1168 1168
1169 rcu_read_lock(); 1169 rcu_read_lock();
1170 parent = rcu_dereference(ctx->parent_ctx); 1170 parent = rcu_dereference(ctx->parent_ctx);
1171 next_ctx = next->perf_counter_ctxp; 1171 next_ctx = next->perf_event_ctxp;
1172 if (parent && next_ctx && 1172 if (parent && next_ctx &&
1173 rcu_dereference(next_ctx->parent_ctx) == parent) { 1173 rcu_dereference(next_ctx->parent_ctx) == parent) {
1174 /* 1174 /*
@@ -1185,15 +1185,15 @@ void perf_counter_task_sched_out(struct task_struct *task,
1185 if (context_equiv(ctx, next_ctx)) { 1185 if (context_equiv(ctx, next_ctx)) {
1186 /* 1186 /*
1187 * XXX do we need a memory barrier of sorts 1187 * XXX do we need a memory barrier of sorts
1188 * wrt to rcu_dereference() of perf_counter_ctxp 1188 * wrt to rcu_dereference() of perf_event_ctxp
1189 */ 1189 */
1190 task->perf_counter_ctxp = next_ctx; 1190 task->perf_event_ctxp = next_ctx;
1191 next->perf_counter_ctxp = ctx; 1191 next->perf_event_ctxp = ctx;
1192 ctx->task = next; 1192 ctx->task = next;
1193 next_ctx->task = task; 1193 next_ctx->task = task;
1194 do_switch = 0; 1194 do_switch = 0;
1195 1195
1196 perf_counter_sync_stat(ctx, next_ctx); 1196 perf_event_sync_stat(ctx, next_ctx);
1197 } 1197 }
1198 spin_unlock(&next_ctx->lock); 1198 spin_unlock(&next_ctx->lock);
1199 spin_unlock(&ctx->lock); 1199 spin_unlock(&ctx->lock);
@@ -1201,7 +1201,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1201 rcu_read_unlock(); 1201 rcu_read_unlock();
1202 1202
1203 if (do_switch) { 1203 if (do_switch) {
1204 __perf_counter_sched_out(ctx, cpuctx); 1204 __perf_event_sched_out(ctx, cpuctx);
1205 cpuctx->task_ctx = NULL; 1205 cpuctx->task_ctx = NULL;
1206 } 1206 }
1207} 1207}
@@ -1209,7 +1209,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1209/* 1209/*
1210 * Called with IRQs disabled 1210 * Called with IRQs disabled
1211 */ 1211 */
1212static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) 1212static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1213{ 1213{
1214 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1214 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1215 1215
@@ -1219,28 +1219,28 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1219 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1219 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1220 return; 1220 return;
1221 1221
1222 __perf_counter_sched_out(ctx, cpuctx); 1222 __perf_event_sched_out(ctx, cpuctx);
1223 cpuctx->task_ctx = NULL; 1223 cpuctx->task_ctx = NULL;
1224} 1224}
1225 1225
1226/* 1226/*
1227 * Called with IRQs disabled 1227 * Called with IRQs disabled
1228 */ 1228 */
1229static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) 1229static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1230{ 1230{
1231 __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 1231 __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1232} 1232}
1233 1233
1234static void 1234static void
1235__perf_counter_sched_in(struct perf_counter_context *ctx, 1235__perf_event_sched_in(struct perf_event_context *ctx,
1236 struct perf_cpu_context *cpuctx, int cpu) 1236 struct perf_cpu_context *cpuctx, int cpu)
1237{ 1237{
1238 struct perf_counter *counter; 1238 struct perf_event *event;
1239 int can_add_hw = 1; 1239 int can_add_hw = 1;
1240 1240
1241 spin_lock(&ctx->lock); 1241 spin_lock(&ctx->lock);
1242 ctx->is_active = 1; 1242 ctx->is_active = 1;
1243 if (likely(!ctx->nr_counters)) 1243 if (likely(!ctx->nr_events))
1244 goto out; 1244 goto out;
1245 1245
1246 ctx->timestamp = perf_clock(); 1246 ctx->timestamp = perf_clock();
@@ -1251,52 +1251,52 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1251 * First go through the list and put on any pinned groups 1251 * First go through the list and put on any pinned groups
1252 * in order to give them the best chance of going on. 1252 * in order to give them the best chance of going on.
1253 */ 1253 */
1254 list_for_each_entry(counter, &ctx->group_list, group_entry) { 1254 list_for_each_entry(event, &ctx->group_list, group_entry) {
1255 if (counter->state <= PERF_COUNTER_STATE_OFF || 1255 if (event->state <= PERF_EVENT_STATE_OFF ||
1256 !counter->attr.pinned) 1256 !event->attr.pinned)
1257 continue; 1257 continue;
1258 if (counter->cpu != -1 && counter->cpu != cpu) 1258 if (event->cpu != -1 && event->cpu != cpu)
1259 continue; 1259 continue;
1260 1260
1261 if (counter != counter->group_leader) 1261 if (event != event->group_leader)
1262 counter_sched_in(counter, cpuctx, ctx, cpu); 1262 event_sched_in(event, cpuctx, ctx, cpu);
1263 else { 1263 else {
1264 if (group_can_go_on(counter, cpuctx, 1)) 1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(counter, cpuctx, ctx, cpu); 1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 } 1266 }
1267 1267
1268 /* 1268 /*
1269 * If this pinned group hasn't been scheduled, 1269 * If this pinned group hasn't been scheduled,
1270 * put it in error state. 1270 * put it in error state.
1271 */ 1271 */
1272 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1272 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1273 update_group_times(counter); 1273 update_group_times(event);
1274 counter->state = PERF_COUNTER_STATE_ERROR; 1274 event->state = PERF_EVENT_STATE_ERROR;
1275 } 1275 }
1276 } 1276 }
1277 1277
1278 list_for_each_entry(counter, &ctx->group_list, group_entry) { 1278 list_for_each_entry(event, &ctx->group_list, group_entry) {
1279 /* 1279 /*
1280 * Ignore counters in OFF or ERROR state, and 1280 * Ignore events in OFF or ERROR state, and
1281 * ignore pinned counters since we did them already. 1281 * ignore pinned events since we did them already.
1282 */ 1282 */
1283 if (counter->state <= PERF_COUNTER_STATE_OFF || 1283 if (event->state <= PERF_EVENT_STATE_OFF ||
1284 counter->attr.pinned) 1284 event->attr.pinned)
1285 continue; 1285 continue;
1286 1286
1287 /* 1287 /*
1288 * Listen to the 'cpu' scheduling filter constraint 1288 * Listen to the 'cpu' scheduling filter constraint
1289 * of counters: 1289 * of events:
1290 */ 1290 */
1291 if (counter->cpu != -1 && counter->cpu != cpu) 1291 if (event->cpu != -1 && event->cpu != cpu)
1292 continue; 1292 continue;
1293 1293
1294 if (counter != counter->group_leader) { 1294 if (event != event->group_leader) {
1295 if (counter_sched_in(counter, cpuctx, ctx, cpu)) 1295 if (event_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0; 1296 can_add_hw = 0;
1297 } else { 1297 } else {
1298 if (group_can_go_on(counter, cpuctx, can_add_hw)) { 1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(counter, cpuctx, ctx, cpu)) 1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0; 1300 can_add_hw = 0;
1301 } 1301 }
1302 } 1302 }
@@ -1307,48 +1307,48 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1307} 1307}
1308 1308
1309/* 1309/*
1310 * Called from scheduler to add the counters of the current task 1310 * Called from scheduler to add the events of the current task
1311 * with interrupts disabled. 1311 * with interrupts disabled.
1312 * 1312 *
1313 * We restore the counter value and then enable it. 1313 * We restore the event value and then enable it.
1314 * 1314 *
1315 * This does not protect us against NMI, but enable() 1315 * This does not protect us against NMI, but enable()
1316 * sets the enabled bit in the control field of counter _before_ 1316 * sets the enabled bit in the control field of event _before_
1317 * accessing the counter control register. If a NMI hits, then it will 1317 * accessing the event control register. If a NMI hits, then it will
1318 * keep the counter running. 1318 * keep the event running.
1319 */ 1319 */
1320void perf_counter_task_sched_in(struct task_struct *task, int cpu) 1320void perf_event_task_sched_in(struct task_struct *task, int cpu)
1321{ 1321{
1322 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1322 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1323 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1323 struct perf_event_context *ctx = task->perf_event_ctxp;
1324 1324
1325 if (likely(!ctx)) 1325 if (likely(!ctx))
1326 return; 1326 return;
1327 if (cpuctx->task_ctx == ctx) 1327 if (cpuctx->task_ctx == ctx)
1328 return; 1328 return;
1329 __perf_counter_sched_in(ctx, cpuctx, cpu); 1329 __perf_event_sched_in(ctx, cpuctx, cpu);
1330 cpuctx->task_ctx = ctx; 1330 cpuctx->task_ctx = ctx;
1331} 1331}
1332 1332
1333static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1333static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1334{ 1334{
1335 struct perf_counter_context *ctx = &cpuctx->ctx; 1335 struct perf_event_context *ctx = &cpuctx->ctx;
1336 1336
1337 __perf_counter_sched_in(ctx, cpuctx, cpu); 1337 __perf_event_sched_in(ctx, cpuctx, cpu);
1338} 1338}
1339 1339
1340#define MAX_INTERRUPTS (~0ULL) 1340#define MAX_INTERRUPTS (~0ULL)
1341 1341
1342static void perf_log_throttle(struct perf_counter *counter, int enable); 1342static void perf_log_throttle(struct perf_event *event, int enable);
1343 1343
1344static void perf_adjust_period(struct perf_counter *counter, u64 events) 1344static void perf_adjust_period(struct perf_event *event, u64 events)
1345{ 1345{
1346 struct hw_perf_counter *hwc = &counter->hw; 1346 struct hw_perf_event *hwc = &event->hw;
1347 u64 period, sample_period; 1347 u64 period, sample_period;
1348 s64 delta; 1348 s64 delta;
1349 1349
1350 events *= hwc->sample_period; 1350 events *= hwc->sample_period;
1351 period = div64_u64(events, counter->attr.sample_freq); 1351 period = div64_u64(events, event->attr.sample_freq);
1352 1352
1353 delta = (s64)(period - hwc->sample_period); 1353 delta = (s64)(period - hwc->sample_period);
1354 delta = (delta + 7) / 8; /* low pass filter */ 1354 delta = (delta + 7) / 8; /* low pass filter */
@@ -1361,39 +1361,39 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
1361 hwc->sample_period = sample_period; 1361 hwc->sample_period = sample_period;
1362} 1362}
1363 1363
1364static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) 1364static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1365{ 1365{
1366 struct perf_counter *counter; 1366 struct perf_event *event;
1367 struct hw_perf_counter *hwc; 1367 struct hw_perf_event *hwc;
1368 u64 interrupts, freq; 1368 u64 interrupts, freq;
1369 1369
1370 spin_lock(&ctx->lock); 1370 spin_lock(&ctx->lock);
1371 list_for_each_entry(counter, &ctx->group_list, group_entry) { 1371 list_for_each_entry(event, &ctx->group_list, group_entry) {
1372 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 1372 if (event->state != PERF_EVENT_STATE_ACTIVE)
1373 continue; 1373 continue;
1374 1374
1375 hwc = &counter->hw; 1375 hwc = &event->hw;
1376 1376
1377 interrupts = hwc->interrupts; 1377 interrupts = hwc->interrupts;
1378 hwc->interrupts = 0; 1378 hwc->interrupts = 0;
1379 1379
1380 /* 1380 /*
1381 * unthrottle counters on the tick 1381 * unthrottle events on the tick
1382 */ 1382 */
1383 if (interrupts == MAX_INTERRUPTS) { 1383 if (interrupts == MAX_INTERRUPTS) {
1384 perf_log_throttle(counter, 1); 1384 perf_log_throttle(event, 1);
1385 counter->pmu->unthrottle(counter); 1385 event->pmu->unthrottle(event);
1386 interrupts = 2*sysctl_perf_counter_sample_rate/HZ; 1386 interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1387 } 1387 }
1388 1388
1389 if (!counter->attr.freq || !counter->attr.sample_freq) 1389 if (!event->attr.freq || !event->attr.sample_freq)
1390 continue; 1390 continue;
1391 1391
1392 /* 1392 /*
1393 * if the specified freq < HZ then we need to skip ticks 1393 * if the specified freq < HZ then we need to skip ticks
1394 */ 1394 */
1395 if (counter->attr.sample_freq < HZ) { 1395 if (event->attr.sample_freq < HZ) {
1396 freq = counter->attr.sample_freq; 1396 freq = event->attr.sample_freq;
1397 1397
1398 hwc->freq_count += freq; 1398 hwc->freq_count += freq;
1399 hwc->freq_interrupts += interrupts; 1399 hwc->freq_interrupts += interrupts;
@@ -1407,7 +1407,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1407 } else 1407 } else
1408 freq = HZ; 1408 freq = HZ;
1409 1409
1410 perf_adjust_period(counter, freq * interrupts); 1410 perf_adjust_period(event, freq * interrupts);
1411 1411
1412 /* 1412 /*
1413 * In order to avoid being stalled by an (accidental) huge 1413 * In order to avoid being stalled by an (accidental) huge
@@ -1416,9 +1416,9 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1416 */ 1416 */
1417 if (!interrupts) { 1417 if (!interrupts) {
1418 perf_disable(); 1418 perf_disable();
1419 counter->pmu->disable(counter); 1419 event->pmu->disable(event);
1420 atomic64_set(&hwc->period_left, 0); 1420 atomic64_set(&hwc->period_left, 0);
1421 counter->pmu->enable(counter); 1421 event->pmu->enable(event);
1422 perf_enable(); 1422 perf_enable();
1423 } 1423 }
1424 } 1424 }
@@ -1426,22 +1426,22 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1426} 1426}
1427 1427
1428/* 1428/*
1429 * Round-robin a context's counters: 1429 * Round-robin a context's events:
1430 */ 1430 */
1431static void rotate_ctx(struct perf_counter_context *ctx) 1431static void rotate_ctx(struct perf_event_context *ctx)
1432{ 1432{
1433 struct perf_counter *counter; 1433 struct perf_event *event;
1434 1434
1435 if (!ctx->nr_counters) 1435 if (!ctx->nr_events)
1436 return; 1436 return;
1437 1437
1438 spin_lock(&ctx->lock); 1438 spin_lock(&ctx->lock);
1439 /* 1439 /*
1440 * Rotate the first entry last (works just fine for group counters too): 1440 * Rotate the first entry last (works just fine for group events too):
1441 */ 1441 */
1442 perf_disable(); 1442 perf_disable();
1443 list_for_each_entry(counter, &ctx->group_list, group_entry) { 1443 list_for_each_entry(event, &ctx->group_list, group_entry) {
1444 list_move_tail(&counter->group_entry, &ctx->group_list); 1444 list_move_tail(&event->group_entry, &ctx->group_list);
1445 break; 1445 break;
1446 } 1446 }
1447 perf_enable(); 1447 perf_enable();
@@ -1449,93 +1449,93 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1449 spin_unlock(&ctx->lock); 1449 spin_unlock(&ctx->lock);
1450} 1450}
1451 1451
1452void perf_counter_task_tick(struct task_struct *curr, int cpu) 1452void perf_event_task_tick(struct task_struct *curr, int cpu)
1453{ 1453{
1454 struct perf_cpu_context *cpuctx; 1454 struct perf_cpu_context *cpuctx;
1455 struct perf_counter_context *ctx; 1455 struct perf_event_context *ctx;
1456 1456
1457 if (!atomic_read(&nr_counters)) 1457 if (!atomic_read(&nr_events))
1458 return; 1458 return;
1459 1459
1460 cpuctx = &per_cpu(perf_cpu_context, cpu); 1460 cpuctx = &per_cpu(perf_cpu_context, cpu);
1461 ctx = curr->perf_counter_ctxp; 1461 ctx = curr->perf_event_ctxp;
1462 1462
1463 perf_ctx_adjust_freq(&cpuctx->ctx); 1463 perf_ctx_adjust_freq(&cpuctx->ctx);
1464 if (ctx) 1464 if (ctx)
1465 perf_ctx_adjust_freq(ctx); 1465 perf_ctx_adjust_freq(ctx);
1466 1466
1467 perf_counter_cpu_sched_out(cpuctx); 1467 perf_event_cpu_sched_out(cpuctx);
1468 if (ctx) 1468 if (ctx)
1469 __perf_counter_task_sched_out(ctx); 1469 __perf_event_task_sched_out(ctx);
1470 1470
1471 rotate_ctx(&cpuctx->ctx); 1471 rotate_ctx(&cpuctx->ctx);
1472 if (ctx) 1472 if (ctx)
1473 rotate_ctx(ctx); 1473 rotate_ctx(ctx);
1474 1474
1475 perf_counter_cpu_sched_in(cpuctx, cpu); 1475 perf_event_cpu_sched_in(cpuctx, cpu);
1476 if (ctx) 1476 if (ctx)
1477 perf_counter_task_sched_in(curr, cpu); 1477 perf_event_task_sched_in(curr, cpu);
1478} 1478}
1479 1479
1480/* 1480/*
1481 * Enable all of a task's counters that have been marked enable-on-exec. 1481 * Enable all of a task's events that have been marked enable-on-exec.
1482 * This expects task == current. 1482 * This expects task == current.
1483 */ 1483 */
1484static void perf_counter_enable_on_exec(struct task_struct *task) 1484static void perf_event_enable_on_exec(struct task_struct *task)
1485{ 1485{
1486 struct perf_counter_context *ctx; 1486 struct perf_event_context *ctx;
1487 struct perf_counter *counter; 1487 struct perf_event *event;
1488 unsigned long flags; 1488 unsigned long flags;
1489 int enabled = 0; 1489 int enabled = 0;
1490 1490
1491 local_irq_save(flags); 1491 local_irq_save(flags);
1492 ctx = task->perf_counter_ctxp; 1492 ctx = task->perf_event_ctxp;
1493 if (!ctx || !ctx->nr_counters) 1493 if (!ctx || !ctx->nr_events)
1494 goto out; 1494 goto out;
1495 1495
1496 __perf_counter_task_sched_out(ctx); 1496 __perf_event_task_sched_out(ctx);
1497 1497
1498 spin_lock(&ctx->lock); 1498 spin_lock(&ctx->lock);
1499 1499
1500 list_for_each_entry(counter, &ctx->group_list, group_entry) { 1500 list_for_each_entry(event, &ctx->group_list, group_entry) {
1501 if (!counter->attr.enable_on_exec) 1501 if (!event->attr.enable_on_exec)
1502 continue; 1502 continue;
1503 counter->attr.enable_on_exec = 0; 1503 event->attr.enable_on_exec = 0;
1504 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 1504 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1505 continue; 1505 continue;
1506 __perf_counter_mark_enabled(counter, ctx); 1506 __perf_event_mark_enabled(event, ctx);
1507 enabled = 1; 1507 enabled = 1;
1508 } 1508 }
1509 1509
1510 /* 1510 /*
1511 * Unclone this context if we enabled any counter. 1511 * Unclone this context if we enabled any event.
1512 */ 1512 */
1513 if (enabled) 1513 if (enabled)
1514 unclone_ctx(ctx); 1514 unclone_ctx(ctx);
1515 1515
1516 spin_unlock(&ctx->lock); 1516 spin_unlock(&ctx->lock);
1517 1517
1518 perf_counter_task_sched_in(task, smp_processor_id()); 1518 perf_event_task_sched_in(task, smp_processor_id());
1519 out: 1519 out:
1520 local_irq_restore(flags); 1520 local_irq_restore(flags);
1521} 1521}
1522 1522
1523/* 1523/*
1524 * Cross CPU call to read the hardware counter 1524 * Cross CPU call to read the hardware event
1525 */ 1525 */
1526static void __perf_counter_read(void *info) 1526static void __perf_event_read(void *info)
1527{ 1527{
1528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1529 struct perf_counter *counter = info; 1529 struct perf_event *event = info;
1530 struct perf_counter_context *ctx = counter->ctx; 1530 struct perf_event_context *ctx = event->ctx;
1531 unsigned long flags; 1531 unsigned long flags;
1532 1532
1533 /* 1533 /*
1534 * If this is a task context, we need to check whether it is 1534 * If this is a task context, we need to check whether it is
1535 * the current task context of this cpu. If not it has been 1535 * the current task context of this cpu. If not it has been
1536 * scheduled out before the smp call arrived. In that case 1536 * scheduled out before the smp call arrived. In that case
1537 * counter->count would have been updated to a recent sample 1537 * event->count would have been updated to a recent sample
1538 * when the counter was scheduled out. 1538 * when the event was scheduled out.
1539 */ 1539 */
1540 if (ctx->task && cpuctx->task_ctx != ctx) 1540 if (ctx->task && cpuctx->task_ctx != ctx)
1541 return; 1541 return;
@@ -1543,32 +1543,32 @@ static void __perf_counter_read(void *info)
1543 local_irq_save(flags); 1543 local_irq_save(flags);
1544 if (ctx->is_active) 1544 if (ctx->is_active)
1545 update_context_time(ctx); 1545 update_context_time(ctx);
1546 counter->pmu->read(counter); 1546 event->pmu->read(event);
1547 update_counter_times(counter); 1547 update_event_times(event);
1548 local_irq_restore(flags); 1548 local_irq_restore(flags);
1549} 1549}
1550 1550
1551static u64 perf_counter_read(struct perf_counter *counter) 1551static u64 perf_event_read(struct perf_event *event)
1552{ 1552{
1553 /* 1553 /*
1554 * If counter is enabled and currently active on a CPU, update the 1554 * If event is enabled and currently active on a CPU, update the
1555 * value in the counter structure: 1555 * value in the event structure:
1556 */ 1556 */
1557 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 1557 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1558 smp_call_function_single(counter->oncpu, 1558 smp_call_function_single(event->oncpu,
1559 __perf_counter_read, counter, 1); 1559 __perf_event_read, event, 1);
1560 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1560 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1561 update_counter_times(counter); 1561 update_event_times(event);
1562 } 1562 }
1563 1563
1564 return atomic64_read(&counter->count); 1564 return atomic64_read(&event->count);
1565} 1565}
1566 1566
1567/* 1567/*
1568 * Initialize the perf_counter context in a task_struct: 1568 * Initialize the perf_event context in a task_struct:
1569 */ 1569 */
1570static void 1570static void
1571__perf_counter_init_context(struct perf_counter_context *ctx, 1571__perf_event_init_context(struct perf_event_context *ctx,
1572 struct task_struct *task) 1572 struct task_struct *task)
1573{ 1573{
1574 memset(ctx, 0, sizeof(*ctx)); 1574 memset(ctx, 0, sizeof(*ctx));
@@ -1580,19 +1580,19 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1580 ctx->task = task; 1580 ctx->task = task;
1581} 1581}
1582 1582
1583static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1583static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1584{ 1584{
1585 struct perf_counter_context *ctx; 1585 struct perf_event_context *ctx;
1586 struct perf_cpu_context *cpuctx; 1586 struct perf_cpu_context *cpuctx;
1587 struct task_struct *task; 1587 struct task_struct *task;
1588 unsigned long flags; 1588 unsigned long flags;
1589 int err; 1589 int err;
1590 1590
1591 /* 1591 /*
1592 * If cpu is not a wildcard then this is a percpu counter: 1592 * If cpu is not a wildcard then this is a percpu event:
1593 */ 1593 */
1594 if (cpu != -1) { 1594 if (cpu != -1) {
1595 /* Must be root to operate on a CPU counter: */ 1595 /* Must be root to operate on a CPU event: */
1596 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1596 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1597 return ERR_PTR(-EACCES); 1597 return ERR_PTR(-EACCES);
1598 1598
@@ -1600,7 +1600,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1600 return ERR_PTR(-EINVAL); 1600 return ERR_PTR(-EINVAL);
1601 1601
1602 /* 1602 /*
1603 * We could be clever and allow to attach a counter to an 1603 * We could be clever and allow to attach a event to an
1604 * offline CPU and activate it when the CPU comes up, but 1604 * offline CPU and activate it when the CPU comes up, but
1605 * that's for later. 1605 * that's for later.
1606 */ 1606 */
@@ -1627,7 +1627,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1627 return ERR_PTR(-ESRCH); 1627 return ERR_PTR(-ESRCH);
1628 1628
1629 /* 1629 /*
1630 * Can't attach counters to a dying task. 1630 * Can't attach events to a dying task.
1631 */ 1631 */
1632 err = -ESRCH; 1632 err = -ESRCH;
1633 if (task->flags & PF_EXITING) 1633 if (task->flags & PF_EXITING)
@@ -1646,13 +1646,13 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1646 } 1646 }
1647 1647
1648 if (!ctx) { 1648 if (!ctx) {
1649 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 1649 ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1650 err = -ENOMEM; 1650 err = -ENOMEM;
1651 if (!ctx) 1651 if (!ctx)
1652 goto errout; 1652 goto errout;
1653 __perf_counter_init_context(ctx, task); 1653 __perf_event_init_context(ctx, task);
1654 get_ctx(ctx); 1654 get_ctx(ctx);
1655 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { 1655 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1656 /* 1656 /*
1657 * We raced with some other task; use 1657 * We raced with some other task; use
1658 * the context they set. 1658 * the context they set.
@@ -1671,42 +1671,42 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1671 return ERR_PTR(err); 1671 return ERR_PTR(err);
1672} 1672}
1673 1673
1674static void free_counter_rcu(struct rcu_head *head) 1674static void free_event_rcu(struct rcu_head *head)
1675{ 1675{
1676 struct perf_counter *counter; 1676 struct perf_event *event;
1677 1677
1678 counter = container_of(head, struct perf_counter, rcu_head); 1678 event = container_of(head, struct perf_event, rcu_head);
1679 if (counter->ns) 1679 if (event->ns)
1680 put_pid_ns(counter->ns); 1680 put_pid_ns(event->ns);
1681 kfree(counter); 1681 kfree(event);
1682} 1682}
1683 1683
1684static void perf_pending_sync(struct perf_counter *counter); 1684static void perf_pending_sync(struct perf_event *event);
1685 1685
1686static void free_counter(struct perf_counter *counter) 1686static void free_event(struct perf_event *event)
1687{ 1687{
1688 perf_pending_sync(counter); 1688 perf_pending_sync(event);
1689 1689
1690 if (!counter->parent) { 1690 if (!event->parent) {
1691 atomic_dec(&nr_counters); 1691 atomic_dec(&nr_events);
1692 if (counter->attr.mmap) 1692 if (event->attr.mmap)
1693 atomic_dec(&nr_mmap_counters); 1693 atomic_dec(&nr_mmap_events);
1694 if (counter->attr.comm) 1694 if (event->attr.comm)
1695 atomic_dec(&nr_comm_counters); 1695 atomic_dec(&nr_comm_events);
1696 if (counter->attr.task) 1696 if (event->attr.task)
1697 atomic_dec(&nr_task_counters); 1697 atomic_dec(&nr_task_events);
1698 } 1698 }
1699 1699
1700 if (counter->output) { 1700 if (event->output) {
1701 fput(counter->output->filp); 1701 fput(event->output->filp);
1702 counter->output = NULL; 1702 event->output = NULL;
1703 } 1703 }
1704 1704
1705 if (counter->destroy) 1705 if (event->destroy)
1706 counter->destroy(counter); 1706 event->destroy(event);
1707 1707
1708 put_ctx(counter->ctx); 1708 put_ctx(event->ctx);
1709 call_rcu(&counter->rcu_head, free_counter_rcu); 1709 call_rcu(&event->rcu_head, free_event_rcu);
1710} 1710}
1711 1711
1712/* 1712/*
@@ -1714,43 +1714,43 @@ static void free_counter(struct perf_counter *counter)
1714 */ 1714 */
1715static int perf_release(struct inode *inode, struct file *file) 1715static int perf_release(struct inode *inode, struct file *file)
1716{ 1716{
1717 struct perf_counter *counter = file->private_data; 1717 struct perf_event *event = file->private_data;
1718 struct perf_counter_context *ctx = counter->ctx; 1718 struct perf_event_context *ctx = event->ctx;
1719 1719
1720 file->private_data = NULL; 1720 file->private_data = NULL;
1721 1721
1722 WARN_ON_ONCE(ctx->parent_ctx); 1722 WARN_ON_ONCE(ctx->parent_ctx);
1723 mutex_lock(&ctx->mutex); 1723 mutex_lock(&ctx->mutex);
1724 perf_counter_remove_from_context(counter); 1724 perf_event_remove_from_context(event);
1725 mutex_unlock(&ctx->mutex); 1725 mutex_unlock(&ctx->mutex);
1726 1726
1727 mutex_lock(&counter->owner->perf_counter_mutex); 1727 mutex_lock(&event->owner->perf_event_mutex);
1728 list_del_init(&counter->owner_entry); 1728 list_del_init(&event->owner_entry);
1729 mutex_unlock(&counter->owner->perf_counter_mutex); 1729 mutex_unlock(&event->owner->perf_event_mutex);
1730 put_task_struct(counter->owner); 1730 put_task_struct(event->owner);
1731 1731
1732 free_counter(counter); 1732 free_event(event);
1733 1733
1734 return 0; 1734 return 0;
1735} 1735}
1736 1736
1737static int perf_counter_read_size(struct perf_counter *counter) 1737static int perf_event_read_size(struct perf_event *event)
1738{ 1738{
1739 int entry = sizeof(u64); /* value */ 1739 int entry = sizeof(u64); /* value */
1740 int size = 0; 1740 int size = 0;
1741 int nr = 1; 1741 int nr = 1;
1742 1742
1743 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1743 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1744 size += sizeof(u64); 1744 size += sizeof(u64);
1745 1745
1746 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1746 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1747 size += sizeof(u64); 1747 size += sizeof(u64);
1748 1748
1749 if (counter->attr.read_format & PERF_FORMAT_ID) 1749 if (event->attr.read_format & PERF_FORMAT_ID)
1750 entry += sizeof(u64); 1750 entry += sizeof(u64);
1751 1751
1752 if (counter->attr.read_format & PERF_FORMAT_GROUP) { 1752 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1753 nr += counter->group_leader->nr_siblings; 1753 nr += event->group_leader->nr_siblings;
1754 size += sizeof(u64); 1754 size += sizeof(u64);
1755 } 1755 }
1756 1756
@@ -1759,27 +1759,27 @@ static int perf_counter_read_size(struct perf_counter *counter)
1759 return size; 1759 return size;
1760} 1760}
1761 1761
1762static u64 perf_counter_read_value(struct perf_counter *counter) 1762static u64 perf_event_read_value(struct perf_event *event)
1763{ 1763{
1764 struct perf_counter *child; 1764 struct perf_event *child;
1765 u64 total = 0; 1765 u64 total = 0;
1766 1766
1767 total += perf_counter_read(counter); 1767 total += perf_event_read(event);
1768 list_for_each_entry(child, &counter->child_list, child_list) 1768 list_for_each_entry(child, &event->child_list, child_list)
1769 total += perf_counter_read(child); 1769 total += perf_event_read(child);
1770 1770
1771 return total; 1771 return total;
1772} 1772}
1773 1773
1774static int perf_counter_read_entry(struct perf_counter *counter, 1774static int perf_event_read_entry(struct perf_event *event,
1775 u64 read_format, char __user *buf) 1775 u64 read_format, char __user *buf)
1776{ 1776{
1777 int n = 0, count = 0; 1777 int n = 0, count = 0;
1778 u64 values[2]; 1778 u64 values[2];
1779 1779
1780 values[n++] = perf_counter_read_value(counter); 1780 values[n++] = perf_event_read_value(event);
1781 if (read_format & PERF_FORMAT_ID) 1781 if (read_format & PERF_FORMAT_ID)
1782 values[n++] = primary_counter_id(counter); 1782 values[n++] = primary_event_id(event);
1783 1783
1784 count = n * sizeof(u64); 1784 count = n * sizeof(u64);
1785 1785
@@ -1789,10 +1789,10 @@ static int perf_counter_read_entry(struct perf_counter *counter,
1789 return count; 1789 return count;
1790} 1790}
1791 1791
1792static int perf_counter_read_group(struct perf_counter *counter, 1792static int perf_event_read_group(struct perf_event *event,
1793 u64 read_format, char __user *buf) 1793 u64 read_format, char __user *buf)
1794{ 1794{
1795 struct perf_counter *leader = counter->group_leader, *sub; 1795 struct perf_event *leader = event->group_leader, *sub;
1796 int n = 0, size = 0, err = -EFAULT; 1796 int n = 0, size = 0, err = -EFAULT;
1797 u64 values[3]; 1797 u64 values[3];
1798 1798
@@ -1811,14 +1811,14 @@ static int perf_counter_read_group(struct perf_counter *counter,
1811 if (copy_to_user(buf, values, size)) 1811 if (copy_to_user(buf, values, size))
1812 return -EFAULT; 1812 return -EFAULT;
1813 1813
1814 err = perf_counter_read_entry(leader, read_format, buf + size); 1814 err = perf_event_read_entry(leader, read_format, buf + size);
1815 if (err < 0) 1815 if (err < 0)
1816 return err; 1816 return err;
1817 1817
1818 size += err; 1818 size += err;
1819 1819
1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1821 err = perf_counter_read_entry(sub, read_format, 1821 err = perf_event_read_entry(sub, read_format,
1822 buf + size); 1822 buf + size);
1823 if (err < 0) 1823 if (err < 0)
1824 return err; 1824 return err;
@@ -1829,23 +1829,23 @@ static int perf_counter_read_group(struct perf_counter *counter,
1829 return size; 1829 return size;
1830} 1830}
1831 1831
1832static int perf_counter_read_one(struct perf_counter *counter, 1832static int perf_event_read_one(struct perf_event *event,
1833 u64 read_format, char __user *buf) 1833 u64 read_format, char __user *buf)
1834{ 1834{
1835 u64 values[4]; 1835 u64 values[4];
1836 int n = 0; 1836 int n = 0;
1837 1837
1838 values[n++] = perf_counter_read_value(counter); 1838 values[n++] = perf_event_read_value(event);
1839 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1839 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1840 values[n++] = counter->total_time_enabled + 1840 values[n++] = event->total_time_enabled +
1841 atomic64_read(&counter->child_total_time_enabled); 1841 atomic64_read(&event->child_total_time_enabled);
1842 } 1842 }
1843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1844 values[n++] = counter->total_time_running + 1844 values[n++] = event->total_time_running +
1845 atomic64_read(&counter->child_total_time_running); 1845 atomic64_read(&event->child_total_time_running);
1846 } 1846 }
1847 if (read_format & PERF_FORMAT_ID) 1847 if (read_format & PERF_FORMAT_ID)
1848 values[n++] = primary_counter_id(counter); 1848 values[n++] = primary_event_id(event);
1849 1849
1850 if (copy_to_user(buf, values, n * sizeof(u64))) 1850 if (copy_to_user(buf, values, n * sizeof(u64)))
1851 return -EFAULT; 1851 return -EFAULT;
@@ -1854,32 +1854,32 @@ static int perf_counter_read_one(struct perf_counter *counter,
1854} 1854}
1855 1855
1856/* 1856/*
1857 * Read the performance counter - simple non blocking version for now 1857 * Read the performance event - simple non blocking version for now
1858 */ 1858 */
1859static ssize_t 1859static ssize_t
1860perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1860perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1861{ 1861{
1862 u64 read_format = counter->attr.read_format; 1862 u64 read_format = event->attr.read_format;
1863 int ret; 1863 int ret;
1864 1864
1865 /* 1865 /*
1866 * Return end-of-file for a read on a counter that is in 1866 * Return end-of-file for a read on a event that is in
1867 * error state (i.e. because it was pinned but it couldn't be 1867 * error state (i.e. because it was pinned but it couldn't be
1868 * scheduled on to the CPU at some point). 1868 * scheduled on to the CPU at some point).
1869 */ 1869 */
1870 if (counter->state == PERF_COUNTER_STATE_ERROR) 1870 if (event->state == PERF_EVENT_STATE_ERROR)
1871 return 0; 1871 return 0;
1872 1872
1873 if (count < perf_counter_read_size(counter)) 1873 if (count < perf_event_read_size(event))
1874 return -ENOSPC; 1874 return -ENOSPC;
1875 1875
1876 WARN_ON_ONCE(counter->ctx->parent_ctx); 1876 WARN_ON_ONCE(event->ctx->parent_ctx);
1877 mutex_lock(&counter->child_mutex); 1877 mutex_lock(&event->child_mutex);
1878 if (read_format & PERF_FORMAT_GROUP) 1878 if (read_format & PERF_FORMAT_GROUP)
1879 ret = perf_counter_read_group(counter, read_format, buf); 1879 ret = perf_event_read_group(event, read_format, buf);
1880 else 1880 else
1881 ret = perf_counter_read_one(counter, read_format, buf); 1881 ret = perf_event_read_one(event, read_format, buf);
1882 mutex_unlock(&counter->child_mutex); 1882 mutex_unlock(&event->child_mutex);
1883 1883
1884 return ret; 1884 return ret;
1885} 1885}
@@ -1887,79 +1887,79 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1887static ssize_t 1887static ssize_t
1888perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 1888perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1889{ 1889{
1890 struct perf_counter *counter = file->private_data; 1890 struct perf_event *event = file->private_data;
1891 1891
1892 return perf_read_hw(counter, buf, count); 1892 return perf_read_hw(event, buf, count);
1893} 1893}
1894 1894
1895static unsigned int perf_poll(struct file *file, poll_table *wait) 1895static unsigned int perf_poll(struct file *file, poll_table *wait)
1896{ 1896{
1897 struct perf_counter *counter = file->private_data; 1897 struct perf_event *event = file->private_data;
1898 struct perf_mmap_data *data; 1898 struct perf_mmap_data *data;
1899 unsigned int events = POLL_HUP; 1899 unsigned int events = POLL_HUP;
1900 1900
1901 rcu_read_lock(); 1901 rcu_read_lock();
1902 data = rcu_dereference(counter->data); 1902 data = rcu_dereference(event->data);
1903 if (data) 1903 if (data)
1904 events = atomic_xchg(&data->poll, 0); 1904 events = atomic_xchg(&data->poll, 0);
1905 rcu_read_unlock(); 1905 rcu_read_unlock();
1906 1906
1907 poll_wait(file, &counter->waitq, wait); 1907 poll_wait(file, &event->waitq, wait);
1908 1908
1909 return events; 1909 return events;
1910} 1910}
1911 1911
1912static void perf_counter_reset(struct perf_counter *counter) 1912static void perf_event_reset(struct perf_event *event)
1913{ 1913{
1914 (void)perf_counter_read(counter); 1914 (void)perf_event_read(event);
1915 atomic64_set(&counter->count, 0); 1915 atomic64_set(&event->count, 0);
1916 perf_counter_update_userpage(counter); 1916 perf_event_update_userpage(event);
1917} 1917}
1918 1918
1919/* 1919/*
1920 * Holding the top-level counter's child_mutex means that any 1920 * Holding the top-level event's child_mutex means that any
1921 * descendant process that has inherited this counter will block 1921 * descendant process that has inherited this event will block
1922 * in sync_child_counter if it goes to exit, thus satisfying the 1922 * in sync_child_event if it goes to exit, thus satisfying the
1923 * task existence requirements of perf_counter_enable/disable. 1923 * task existence requirements of perf_event_enable/disable.
1924 */ 1924 */
1925static void perf_counter_for_each_child(struct perf_counter *counter, 1925static void perf_event_for_each_child(struct perf_event *event,
1926 void (*func)(struct perf_counter *)) 1926 void (*func)(struct perf_event *))
1927{ 1927{
1928 struct perf_counter *child; 1928 struct perf_event *child;
1929 1929
1930 WARN_ON_ONCE(counter->ctx->parent_ctx); 1930 WARN_ON_ONCE(event->ctx->parent_ctx);
1931 mutex_lock(&counter->child_mutex); 1931 mutex_lock(&event->child_mutex);
1932 func(counter); 1932 func(event);
1933 list_for_each_entry(child, &counter->child_list, child_list) 1933 list_for_each_entry(child, &event->child_list, child_list)
1934 func(child); 1934 func(child);
1935 mutex_unlock(&counter->child_mutex); 1935 mutex_unlock(&event->child_mutex);
1936} 1936}
1937 1937
1938static void perf_counter_for_each(struct perf_counter *counter, 1938static void perf_event_for_each(struct perf_event *event,
1939 void (*func)(struct perf_counter *)) 1939 void (*func)(struct perf_event *))
1940{ 1940{
1941 struct perf_counter_context *ctx = counter->ctx; 1941 struct perf_event_context *ctx = event->ctx;
1942 struct perf_counter *sibling; 1942 struct perf_event *sibling;
1943 1943
1944 WARN_ON_ONCE(ctx->parent_ctx); 1944 WARN_ON_ONCE(ctx->parent_ctx);
1945 mutex_lock(&ctx->mutex); 1945 mutex_lock(&ctx->mutex);
1946 counter = counter->group_leader; 1946 event = event->group_leader;
1947 1947
1948 perf_counter_for_each_child(counter, func); 1948 perf_event_for_each_child(event, func);
1949 func(counter); 1949 func(event);
1950 list_for_each_entry(sibling, &counter->sibling_list, group_entry) 1950 list_for_each_entry(sibling, &event->sibling_list, group_entry)
1951 perf_counter_for_each_child(counter, func); 1951 perf_event_for_each_child(event, func);
1952 mutex_unlock(&ctx->mutex); 1952 mutex_unlock(&ctx->mutex);
1953} 1953}
1954 1954
1955static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1955static int perf_event_period(struct perf_event *event, u64 __user *arg)
1956{ 1956{
1957 struct perf_counter_context *ctx = counter->ctx; 1957 struct perf_event_context *ctx = event->ctx;
1958 unsigned long size; 1958 unsigned long size;
1959 int ret = 0; 1959 int ret = 0;
1960 u64 value; 1960 u64 value;
1961 1961
1962 if (!counter->attr.sample_period) 1962 if (!event->attr.sample_period)
1963 return -EINVAL; 1963 return -EINVAL;
1964 1964
1965 size = copy_from_user(&value, arg, sizeof(value)); 1965 size = copy_from_user(&value, arg, sizeof(value));
@@ -1970,16 +1970,16 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1970 return -EINVAL; 1970 return -EINVAL;
1971 1971
1972 spin_lock_irq(&ctx->lock); 1972 spin_lock_irq(&ctx->lock);
1973 if (counter->attr.freq) { 1973 if (event->attr.freq) {
1974 if (value > sysctl_perf_counter_sample_rate) { 1974 if (value > sysctl_perf_event_sample_rate) {
1975 ret = -EINVAL; 1975 ret = -EINVAL;
1976 goto unlock; 1976 goto unlock;
1977 } 1977 }
1978 1978
1979 counter->attr.sample_freq = value; 1979 event->attr.sample_freq = value;
1980 } else { 1980 } else {
1981 counter->attr.sample_period = value; 1981 event->attr.sample_period = value;
1982 counter->hw.sample_period = value; 1982 event->hw.sample_period = value;
1983 } 1983 }
1984unlock: 1984unlock:
1985 spin_unlock_irq(&ctx->lock); 1985 spin_unlock_irq(&ctx->lock);
@@ -1987,80 +1987,80 @@ unlock:
1987 return ret; 1987 return ret;
1988} 1988}
1989 1989
1990int perf_counter_set_output(struct perf_counter *counter, int output_fd); 1990int perf_event_set_output(struct perf_event *event, int output_fd);
1991 1991
1992static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1992static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1993{ 1993{
1994 struct perf_counter *counter = file->private_data; 1994 struct perf_event *event = file->private_data;
1995 void (*func)(struct perf_counter *); 1995 void (*func)(struct perf_event *);
1996 u32 flags = arg; 1996 u32 flags = arg;
1997 1997
1998 switch (cmd) { 1998 switch (cmd) {
1999 case PERF_COUNTER_IOC_ENABLE: 1999 case PERF_EVENT_IOC_ENABLE:
2000 func = perf_counter_enable; 2000 func = perf_event_enable;
2001 break; 2001 break;
2002 case PERF_COUNTER_IOC_DISABLE: 2002 case PERF_EVENT_IOC_DISABLE:
2003 func = perf_counter_disable; 2003 func = perf_event_disable;
2004 break; 2004 break;
2005 case PERF_COUNTER_IOC_RESET: 2005 case PERF_EVENT_IOC_RESET:
2006 func = perf_counter_reset; 2006 func = perf_event_reset;
2007 break; 2007 break;
2008 2008
2009 case PERF_COUNTER_IOC_REFRESH: 2009 case PERF_EVENT_IOC_REFRESH:
2010 return perf_counter_refresh(counter, arg); 2010 return perf_event_refresh(event, arg);
2011 2011
2012 case PERF_COUNTER_IOC_PERIOD: 2012 case PERF_EVENT_IOC_PERIOD:
2013 return perf_counter_period(counter, (u64 __user *)arg); 2013 return perf_event_period(event, (u64 __user *)arg);
2014 2014
2015 case PERF_COUNTER_IOC_SET_OUTPUT: 2015 case PERF_EVENT_IOC_SET_OUTPUT:
2016 return perf_counter_set_output(counter, arg); 2016 return perf_event_set_output(event, arg);
2017 2017
2018 default: 2018 default:
2019 return -ENOTTY; 2019 return -ENOTTY;
2020 } 2020 }
2021 2021
2022 if (flags & PERF_IOC_FLAG_GROUP) 2022 if (flags & PERF_IOC_FLAG_GROUP)
2023 perf_counter_for_each(counter, func); 2023 perf_event_for_each(event, func);
2024 else 2024 else
2025 perf_counter_for_each_child(counter, func); 2025 perf_event_for_each_child(event, func);
2026 2026
2027 return 0; 2027 return 0;
2028} 2028}
2029 2029
2030int perf_counter_task_enable(void) 2030int perf_event_task_enable(void)
2031{ 2031{
2032 struct perf_counter *counter; 2032 struct perf_event *event;
2033 2033
2034 mutex_lock(&current->perf_counter_mutex); 2034 mutex_lock(&current->perf_event_mutex);
2035 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2035 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2036 perf_counter_for_each_child(counter, perf_counter_enable); 2036 perf_event_for_each_child(event, perf_event_enable);
2037 mutex_unlock(&current->perf_counter_mutex); 2037 mutex_unlock(&current->perf_event_mutex);
2038 2038
2039 return 0; 2039 return 0;
2040} 2040}
2041 2041
2042int perf_counter_task_disable(void) 2042int perf_event_task_disable(void)
2043{ 2043{
2044 struct perf_counter *counter; 2044 struct perf_event *event;
2045 2045
2046 mutex_lock(&current->perf_counter_mutex); 2046 mutex_lock(&current->perf_event_mutex);
2047 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2047 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2048 perf_counter_for_each_child(counter, perf_counter_disable); 2048 perf_event_for_each_child(event, perf_event_disable);
2049 mutex_unlock(&current->perf_counter_mutex); 2049 mutex_unlock(&current->perf_event_mutex);
2050 2050
2051 return 0; 2051 return 0;
2052} 2052}
2053 2053
2054#ifndef PERF_COUNTER_INDEX_OFFSET 2054#ifndef PERF_EVENT_INDEX_OFFSET
2055# define PERF_COUNTER_INDEX_OFFSET 0 2055# define PERF_EVENT_INDEX_OFFSET 0
2056#endif 2056#endif
2057 2057
2058static int perf_counter_index(struct perf_counter *counter) 2058static int perf_event_index(struct perf_event *event)
2059{ 2059{
2060 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2060 if (event->state != PERF_EVENT_STATE_ACTIVE)
2061 return 0; 2061 return 0;
2062 2062
2063 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; 2063 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2064} 2064}
2065 2065
2066/* 2066/*
@@ -2068,13 +2068,13 @@ static int perf_counter_index(struct perf_counter *counter)
2068 * the seqlock logic goes bad. We can not serialize this because the arch 2068 * the seqlock logic goes bad. We can not serialize this because the arch
2069 * code calls this from NMI context. 2069 * code calls this from NMI context.
2070 */ 2070 */
2071void perf_counter_update_userpage(struct perf_counter *counter) 2071void perf_event_update_userpage(struct perf_event *event)
2072{ 2072{
2073 struct perf_counter_mmap_page *userpg; 2073 struct perf_event_mmap_page *userpg;
2074 struct perf_mmap_data *data; 2074 struct perf_mmap_data *data;
2075 2075
2076 rcu_read_lock(); 2076 rcu_read_lock();
2077 data = rcu_dereference(counter->data); 2077 data = rcu_dereference(event->data);
2078 if (!data) 2078 if (!data)
2079 goto unlock; 2079 goto unlock;
2080 2080
@@ -2087,16 +2087,16 @@ void perf_counter_update_userpage(struct perf_counter *counter)
2087 preempt_disable(); 2087 preempt_disable();
2088 ++userpg->lock; 2088 ++userpg->lock;
2089 barrier(); 2089 barrier();
2090 userpg->index = perf_counter_index(counter); 2090 userpg->index = perf_event_index(event);
2091 userpg->offset = atomic64_read(&counter->count); 2091 userpg->offset = atomic64_read(&event->count);
2092 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 2092 if (event->state == PERF_EVENT_STATE_ACTIVE)
2093 userpg->offset -= atomic64_read(&counter->hw.prev_count); 2093 userpg->offset -= atomic64_read(&event->hw.prev_count);
2094 2094
2095 userpg->time_enabled = counter->total_time_enabled + 2095 userpg->time_enabled = event->total_time_enabled +
2096 atomic64_read(&counter->child_total_time_enabled); 2096 atomic64_read(&event->child_total_time_enabled);
2097 2097
2098 userpg->time_running = counter->total_time_running + 2098 userpg->time_running = event->total_time_running +
2099 atomic64_read(&counter->child_total_time_running); 2099 atomic64_read(&event->child_total_time_running);
2100 2100
2101 barrier(); 2101 barrier();
2102 ++userpg->lock; 2102 ++userpg->lock;
@@ -2107,7 +2107,7 @@ unlock:
2107 2107
2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2109{ 2109{
2110 struct perf_counter *counter = vma->vm_file->private_data; 2110 struct perf_event *event = vma->vm_file->private_data;
2111 struct perf_mmap_data *data; 2111 struct perf_mmap_data *data;
2112 int ret = VM_FAULT_SIGBUS; 2112 int ret = VM_FAULT_SIGBUS;
2113 2113
@@ -2118,7 +2118,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2118 } 2118 }
2119 2119
2120 rcu_read_lock(); 2120 rcu_read_lock();
2121 data = rcu_dereference(counter->data); 2121 data = rcu_dereference(event->data);
2122 if (!data) 2122 if (!data)
2123 goto unlock; 2123 goto unlock;
2124 2124
@@ -2147,13 +2147,13 @@ unlock:
2147 return ret; 2147 return ret;
2148} 2148}
2149 2149
2150static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) 2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2151{ 2151{
2152 struct perf_mmap_data *data; 2152 struct perf_mmap_data *data;
2153 unsigned long size; 2153 unsigned long size;
2154 int i; 2154 int i;
2155 2155
2156 WARN_ON(atomic_read(&counter->mmap_count)); 2156 WARN_ON(atomic_read(&event->mmap_count));
2157 2157
2158 size = sizeof(struct perf_mmap_data); 2158 size = sizeof(struct perf_mmap_data);
2159 size += nr_pages * sizeof(void *); 2159 size += nr_pages * sizeof(void *);
@@ -2175,14 +2175,14 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2175 data->nr_pages = nr_pages; 2175 data->nr_pages = nr_pages;
2176 atomic_set(&data->lock, -1); 2176 atomic_set(&data->lock, -1);
2177 2177
2178 if (counter->attr.watermark) { 2178 if (event->attr.watermark) {
2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages, 2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2180 counter->attr.wakeup_watermark); 2180 event->attr.wakeup_watermark);
2181 } 2181 }
2182 if (!data->watermark) 2182 if (!data->watermark)
2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); 2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2184 2184
2185 rcu_assign_pointer(counter->data, data); 2185 rcu_assign_pointer(event->data, data);
2186 2186
2187 return 0; 2187 return 0;
2188 2188
@@ -2221,35 +2221,35 @@ static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2221 kfree(data); 2221 kfree(data);
2222} 2222}
2223 2223
2224static void perf_mmap_data_free(struct perf_counter *counter) 2224static void perf_mmap_data_free(struct perf_event *event)
2225{ 2225{
2226 struct perf_mmap_data *data = counter->data; 2226 struct perf_mmap_data *data = event->data;
2227 2227
2228 WARN_ON(atomic_read(&counter->mmap_count)); 2228 WARN_ON(atomic_read(&event->mmap_count));
2229 2229
2230 rcu_assign_pointer(counter->data, NULL); 2230 rcu_assign_pointer(event->data, NULL);
2231 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2231 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2232} 2232}
2233 2233
2234static void perf_mmap_open(struct vm_area_struct *vma) 2234static void perf_mmap_open(struct vm_area_struct *vma)
2235{ 2235{
2236 struct perf_counter *counter = vma->vm_file->private_data; 2236 struct perf_event *event = vma->vm_file->private_data;
2237 2237
2238 atomic_inc(&counter->mmap_count); 2238 atomic_inc(&event->mmap_count);
2239} 2239}
2240 2240
2241static void perf_mmap_close(struct vm_area_struct *vma) 2241static void perf_mmap_close(struct vm_area_struct *vma)
2242{ 2242{
2243 struct perf_counter *counter = vma->vm_file->private_data; 2243 struct perf_event *event = vma->vm_file->private_data;
2244 2244
2245 WARN_ON_ONCE(counter->ctx->parent_ctx); 2245 WARN_ON_ONCE(event->ctx->parent_ctx);
2246 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { 2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2247 struct user_struct *user = current_user(); 2247 struct user_struct *user = current_user();
2248 2248
2249 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); 2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm);
2250 vma->vm_mm->locked_vm -= counter->data->nr_locked; 2250 vma->vm_mm->locked_vm -= event->data->nr_locked;
2251 perf_mmap_data_free(counter); 2251 perf_mmap_data_free(event);
2252 mutex_unlock(&counter->mmap_mutex); 2252 mutex_unlock(&event->mmap_mutex);
2253 } 2253 }
2254} 2254}
2255 2255
@@ -2262,7 +2262,7 @@ static struct vm_operations_struct perf_mmap_vmops = {
2262 2262
2263static int perf_mmap(struct file *file, struct vm_area_struct *vma) 2263static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2264{ 2264{
2265 struct perf_counter *counter = file->private_data; 2265 struct perf_event *event = file->private_data;
2266 unsigned long user_locked, user_lock_limit; 2266 unsigned long user_locked, user_lock_limit;
2267 struct user_struct *user = current_user(); 2267 struct user_struct *user = current_user();
2268 unsigned long locked, lock_limit; 2268 unsigned long locked, lock_limit;
@@ -2290,21 +2290,21 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2290 if (vma->vm_pgoff != 0) 2290 if (vma->vm_pgoff != 0)
2291 return -EINVAL; 2291 return -EINVAL;
2292 2292
2293 WARN_ON_ONCE(counter->ctx->parent_ctx); 2293 WARN_ON_ONCE(event->ctx->parent_ctx);
2294 mutex_lock(&counter->mmap_mutex); 2294 mutex_lock(&event->mmap_mutex);
2295 if (counter->output) { 2295 if (event->output) {
2296 ret = -EINVAL; 2296 ret = -EINVAL;
2297 goto unlock; 2297 goto unlock;
2298 } 2298 }
2299 2299
2300 if (atomic_inc_not_zero(&counter->mmap_count)) { 2300 if (atomic_inc_not_zero(&event->mmap_count)) {
2301 if (nr_pages != counter->data->nr_pages) 2301 if (nr_pages != event->data->nr_pages)
2302 ret = -EINVAL; 2302 ret = -EINVAL;
2303 goto unlock; 2303 goto unlock;
2304 } 2304 }
2305 2305
2306 user_extra = nr_pages + 1; 2306 user_extra = nr_pages + 1;
2307 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 2307 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2308 2308
2309 /* 2309 /*
2310 * Increase the limit linearly with more CPUs: 2310 * Increase the limit linearly with more CPUs:
@@ -2327,20 +2327,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2327 goto unlock; 2327 goto unlock;
2328 } 2328 }
2329 2329
2330 WARN_ON(counter->data); 2330 WARN_ON(event->data);
2331 ret = perf_mmap_data_alloc(counter, nr_pages); 2331 ret = perf_mmap_data_alloc(event, nr_pages);
2332 if (ret) 2332 if (ret)
2333 goto unlock; 2333 goto unlock;
2334 2334
2335 atomic_set(&counter->mmap_count, 1); 2335 atomic_set(&event->mmap_count, 1);
2336 atomic_long_add(user_extra, &user->locked_vm); 2336 atomic_long_add(user_extra, &user->locked_vm);
2337 vma->vm_mm->locked_vm += extra; 2337 vma->vm_mm->locked_vm += extra;
2338 counter->data->nr_locked = extra; 2338 event->data->nr_locked = extra;
2339 if (vma->vm_flags & VM_WRITE) 2339 if (vma->vm_flags & VM_WRITE)
2340 counter->data->writable = 1; 2340 event->data->writable = 1;
2341 2341
2342unlock: 2342unlock:
2343 mutex_unlock(&counter->mmap_mutex); 2343 mutex_unlock(&event->mmap_mutex);
2344 2344
2345 vma->vm_flags |= VM_RESERVED; 2345 vma->vm_flags |= VM_RESERVED;
2346 vma->vm_ops = &perf_mmap_vmops; 2346 vma->vm_ops = &perf_mmap_vmops;
@@ -2351,11 +2351,11 @@ unlock:
2351static int perf_fasync(int fd, struct file *filp, int on) 2351static int perf_fasync(int fd, struct file *filp, int on)
2352{ 2352{
2353 struct inode *inode = filp->f_path.dentry->d_inode; 2353 struct inode *inode = filp->f_path.dentry->d_inode;
2354 struct perf_counter *counter = filp->private_data; 2354 struct perf_event *event = filp->private_data;
2355 int retval; 2355 int retval;
2356 2356
2357 mutex_lock(&inode->i_mutex); 2357 mutex_lock(&inode->i_mutex);
2358 retval = fasync_helper(fd, filp, on, &counter->fasync); 2358 retval = fasync_helper(fd, filp, on, &event->fasync);
2359 mutex_unlock(&inode->i_mutex); 2359 mutex_unlock(&inode->i_mutex);
2360 2360
2361 if (retval < 0) 2361 if (retval < 0)
@@ -2375,19 +2375,19 @@ static const struct file_operations perf_fops = {
2375}; 2375};
2376 2376
2377/* 2377/*
2378 * Perf counter wakeup 2378 * Perf event wakeup
2379 * 2379 *
2380 * If there's data, ensure we set the poll() state and publish everything 2380 * If there's data, ensure we set the poll() state and publish everything
2381 * to user-space before waking everybody up. 2381 * to user-space before waking everybody up.
2382 */ 2382 */
2383 2383
2384void perf_counter_wakeup(struct perf_counter *counter) 2384void perf_event_wakeup(struct perf_event *event)
2385{ 2385{
2386 wake_up_all(&counter->waitq); 2386 wake_up_all(&event->waitq);
2387 2387
2388 if (counter->pending_kill) { 2388 if (event->pending_kill) {
2389 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); 2389 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2390 counter->pending_kill = 0; 2390 event->pending_kill = 0;
2391 } 2391 }
2392} 2392}
2393 2393
@@ -2400,19 +2400,19 @@ void perf_counter_wakeup(struct perf_counter *counter)
2400 * single linked list and use cmpxchg() to add entries lockless. 2400 * single linked list and use cmpxchg() to add entries lockless.
2401 */ 2401 */
2402 2402
2403static void perf_pending_counter(struct perf_pending_entry *entry) 2403static void perf_pending_event(struct perf_pending_entry *entry)
2404{ 2404{
2405 struct perf_counter *counter = container_of(entry, 2405 struct perf_event *event = container_of(entry,
2406 struct perf_counter, pending); 2406 struct perf_event, pending);
2407 2407
2408 if (counter->pending_disable) { 2408 if (event->pending_disable) {
2409 counter->pending_disable = 0; 2409 event->pending_disable = 0;
2410 __perf_counter_disable(counter); 2410 __perf_event_disable(event);
2411 } 2411 }
2412 2412
2413 if (counter->pending_wakeup) { 2413 if (event->pending_wakeup) {
2414 counter->pending_wakeup = 0; 2414 event->pending_wakeup = 0;
2415 perf_counter_wakeup(counter); 2415 perf_event_wakeup(event);
2416 } 2416 }
2417} 2417}
2418 2418
@@ -2438,7 +2438,7 @@ static void perf_pending_queue(struct perf_pending_entry *entry,
2438 entry->next = *head; 2438 entry->next = *head;
2439 } while (cmpxchg(head, entry->next, entry) != entry->next); 2439 } while (cmpxchg(head, entry->next, entry) != entry->next);
2440 2440
2441 set_perf_counter_pending(); 2441 set_perf_event_pending();
2442 2442
2443 put_cpu_var(perf_pending_head); 2443 put_cpu_var(perf_pending_head);
2444} 2444}
@@ -2471,7 +2471,7 @@ static int __perf_pending_run(void)
2471 return nr; 2471 return nr;
2472} 2472}
2473 2473
2474static inline int perf_not_pending(struct perf_counter *counter) 2474static inline int perf_not_pending(struct perf_event *event)
2475{ 2475{
2476 /* 2476 /*
2477 * If we flush on whatever cpu we run, there is a chance we don't 2477 * If we flush on whatever cpu we run, there is a chance we don't
@@ -2486,15 +2486,15 @@ static inline int perf_not_pending(struct perf_counter *counter)
2486 * so that we do not miss the wakeup. -- see perf_pending_handle() 2486 * so that we do not miss the wakeup. -- see perf_pending_handle()
2487 */ 2487 */
2488 smp_rmb(); 2488 smp_rmb();
2489 return counter->pending.next == NULL; 2489 return event->pending.next == NULL;
2490} 2490}
2491 2491
2492static void perf_pending_sync(struct perf_counter *counter) 2492static void perf_pending_sync(struct perf_event *event)
2493{ 2493{
2494 wait_event(counter->waitq, perf_not_pending(counter)); 2494 wait_event(event->waitq, perf_not_pending(event));
2495} 2495}
2496 2496
2497void perf_counter_do_pending(void) 2497void perf_event_do_pending(void)
2498{ 2498{
2499 __perf_pending_run(); 2499 __perf_pending_run();
2500} 2500}
@@ -2535,25 +2535,25 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2535 atomic_set(&handle->data->poll, POLL_IN); 2535 atomic_set(&handle->data->poll, POLL_IN);
2536 2536
2537 if (handle->nmi) { 2537 if (handle->nmi) {
2538 handle->counter->pending_wakeup = 1; 2538 handle->event->pending_wakeup = 1;
2539 perf_pending_queue(&handle->counter->pending, 2539 perf_pending_queue(&handle->event->pending,
2540 perf_pending_counter); 2540 perf_pending_event);
2541 } else 2541 } else
2542 perf_counter_wakeup(handle->counter); 2542 perf_event_wakeup(handle->event);
2543} 2543}
2544 2544
2545/* 2545/*
2546 * Curious locking construct. 2546 * Curious locking construct.
2547 * 2547 *
2548 * We need to ensure a later event doesn't publish a head when a former 2548 * We need to ensure a later event_id doesn't publish a head when a former
2549 * event isn't done writing. However since we need to deal with NMIs we 2549 * event_id isn't done writing. However since we need to deal with NMIs we
2550 * cannot fully serialize things. 2550 * cannot fully serialize things.
2551 * 2551 *
2552 * What we do is serialize between CPUs so we only have to deal with NMI 2552 * What we do is serialize between CPUs so we only have to deal with NMI
2553 * nesting on a single CPU. 2553 * nesting on a single CPU.
2554 * 2554 *
2555 * We only publish the head (and generate a wakeup) when the outer-most 2555 * We only publish the head (and generate a wakeup) when the outer-most
2556 * event completes. 2556 * event_id completes.
2557 */ 2557 */
2558static void perf_output_lock(struct perf_output_handle *handle) 2558static void perf_output_lock(struct perf_output_handle *handle)
2559{ 2559{
@@ -2657,10 +2657,10 @@ void perf_output_copy(struct perf_output_handle *handle,
2657} 2657}
2658 2658
2659int perf_output_begin(struct perf_output_handle *handle, 2659int perf_output_begin(struct perf_output_handle *handle,
2660 struct perf_counter *counter, unsigned int size, 2660 struct perf_event *event, unsigned int size,
2661 int nmi, int sample) 2661 int nmi, int sample)
2662{ 2662{
2663 struct perf_counter *output_counter; 2663 struct perf_event *output_event;
2664 struct perf_mmap_data *data; 2664 struct perf_mmap_data *data;
2665 unsigned long tail, offset, head; 2665 unsigned long tail, offset, head;
2666 int have_lost; 2666 int have_lost;
@@ -2672,21 +2672,21 @@ int perf_output_begin(struct perf_output_handle *handle,
2672 2672
2673 rcu_read_lock(); 2673 rcu_read_lock();
2674 /* 2674 /*
2675 * For inherited counters we send all the output towards the parent. 2675 * For inherited events we send all the output towards the parent.
2676 */ 2676 */
2677 if (counter->parent) 2677 if (event->parent)
2678 counter = counter->parent; 2678 event = event->parent;
2679 2679
2680 output_counter = rcu_dereference(counter->output); 2680 output_event = rcu_dereference(event->output);
2681 if (output_counter) 2681 if (output_event)
2682 counter = output_counter; 2682 event = output_event;
2683 2683
2684 data = rcu_dereference(counter->data); 2684 data = rcu_dereference(event->data);
2685 if (!data) 2685 if (!data)
2686 goto out; 2686 goto out;
2687 2687
2688 handle->data = data; 2688 handle->data = data;
2689 handle->counter = counter; 2689 handle->event = event;
2690 handle->nmi = nmi; 2690 handle->nmi = nmi;
2691 handle->sample = sample; 2691 handle->sample = sample;
2692 2692
@@ -2720,10 +2720,10 @@ int perf_output_begin(struct perf_output_handle *handle,
2720 atomic_set(&data->wakeup, 1); 2720 atomic_set(&data->wakeup, 1);
2721 2721
2722 if (have_lost) { 2722 if (have_lost) {
2723 lost_event.header.type = PERF_EVENT_LOST; 2723 lost_event.header.type = PERF_RECORD_LOST;
2724 lost_event.header.misc = 0; 2724 lost_event.header.misc = 0;
2725 lost_event.header.size = sizeof(lost_event); 2725 lost_event.header.size = sizeof(lost_event);
2726 lost_event.id = counter->id; 2726 lost_event.id = event->id;
2727 lost_event.lost = atomic_xchg(&data->lost, 0); 2727 lost_event.lost = atomic_xchg(&data->lost, 0);
2728 2728
2729 perf_output_put(handle, lost_event); 2729 perf_output_put(handle, lost_event);
@@ -2742,10 +2742,10 @@ out:
2742 2742
2743void perf_output_end(struct perf_output_handle *handle) 2743void perf_output_end(struct perf_output_handle *handle)
2744{ 2744{
2745 struct perf_counter *counter = handle->counter; 2745 struct perf_event *event = handle->event;
2746 struct perf_mmap_data *data = handle->data; 2746 struct perf_mmap_data *data = handle->data;
2747 2747
2748 int wakeup_events = counter->attr.wakeup_events; 2748 int wakeup_events = event->attr.wakeup_events;
2749 2749
2750 if (handle->sample && wakeup_events) { 2750 if (handle->sample && wakeup_events) {
2751 int events = atomic_inc_return(&data->events); 2751 int events = atomic_inc_return(&data->events);
@@ -2759,58 +2759,58 @@ void perf_output_end(struct perf_output_handle *handle)
2759 rcu_read_unlock(); 2759 rcu_read_unlock();
2760} 2760}
2761 2761
2762static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) 2762static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2763{ 2763{
2764 /* 2764 /*
2765 * only top level counters have the pid namespace they were created in 2765 * only top level events have the pid namespace they were created in
2766 */ 2766 */
2767 if (counter->parent) 2767 if (event->parent)
2768 counter = counter->parent; 2768 event = event->parent;
2769 2769
2770 return task_tgid_nr_ns(p, counter->ns); 2770 return task_tgid_nr_ns(p, event->ns);
2771} 2771}
2772 2772
2773static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) 2773static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2774{ 2774{
2775 /* 2775 /*
2776 * only top level counters have the pid namespace they were created in 2776 * only top level events have the pid namespace they were created in
2777 */ 2777 */
2778 if (counter->parent) 2778 if (event->parent)
2779 counter = counter->parent; 2779 event = event->parent;
2780 2780
2781 return task_pid_nr_ns(p, counter->ns); 2781 return task_pid_nr_ns(p, event->ns);
2782} 2782}
2783 2783
2784static void perf_output_read_one(struct perf_output_handle *handle, 2784static void perf_output_read_one(struct perf_output_handle *handle,
2785 struct perf_counter *counter) 2785 struct perf_event *event)
2786{ 2786{
2787 u64 read_format = counter->attr.read_format; 2787 u64 read_format = event->attr.read_format;
2788 u64 values[4]; 2788 u64 values[4];
2789 int n = 0; 2789 int n = 0;
2790 2790
2791 values[n++] = atomic64_read(&counter->count); 2791 values[n++] = atomic64_read(&event->count);
2792 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2792 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2793 values[n++] = counter->total_time_enabled + 2793 values[n++] = event->total_time_enabled +
2794 atomic64_read(&counter->child_total_time_enabled); 2794 atomic64_read(&event->child_total_time_enabled);
2795 } 2795 }
2796 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2796 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2797 values[n++] = counter->total_time_running + 2797 values[n++] = event->total_time_running +
2798 atomic64_read(&counter->child_total_time_running); 2798 atomic64_read(&event->child_total_time_running);
2799 } 2799 }
2800 if (read_format & PERF_FORMAT_ID) 2800 if (read_format & PERF_FORMAT_ID)
2801 values[n++] = primary_counter_id(counter); 2801 values[n++] = primary_event_id(event);
2802 2802
2803 perf_output_copy(handle, values, n * sizeof(u64)); 2803 perf_output_copy(handle, values, n * sizeof(u64));
2804} 2804}
2805 2805
2806/* 2806/*
2807 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. 2807 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2808 */ 2808 */
2809static void perf_output_read_group(struct perf_output_handle *handle, 2809static void perf_output_read_group(struct perf_output_handle *handle,
2810 struct perf_counter *counter) 2810 struct perf_event *event)
2811{ 2811{
2812 struct perf_counter *leader = counter->group_leader, *sub; 2812 struct perf_event *leader = event->group_leader, *sub;
2813 u64 read_format = counter->attr.read_format; 2813 u64 read_format = event->attr.read_format;
2814 u64 values[5]; 2814 u64 values[5];
2815 int n = 0; 2815 int n = 0;
2816 2816
@@ -2822,42 +2822,42 @@ static void perf_output_read_group(struct perf_output_handle *handle,
2822 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2822 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2823 values[n++] = leader->total_time_running; 2823 values[n++] = leader->total_time_running;
2824 2824
2825 if (leader != counter) 2825 if (leader != event)
2826 leader->pmu->read(leader); 2826 leader->pmu->read(leader);
2827 2827
2828 values[n++] = atomic64_read(&leader->count); 2828 values[n++] = atomic64_read(&leader->count);
2829 if (read_format & PERF_FORMAT_ID) 2829 if (read_format & PERF_FORMAT_ID)
2830 values[n++] = primary_counter_id(leader); 2830 values[n++] = primary_event_id(leader);
2831 2831
2832 perf_output_copy(handle, values, n * sizeof(u64)); 2832 perf_output_copy(handle, values, n * sizeof(u64));
2833 2833
2834 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 2834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2835 n = 0; 2835 n = 0;
2836 2836
2837 if (sub != counter) 2837 if (sub != event)
2838 sub->pmu->read(sub); 2838 sub->pmu->read(sub);
2839 2839
2840 values[n++] = atomic64_read(&sub->count); 2840 values[n++] = atomic64_read(&sub->count);
2841 if (read_format & PERF_FORMAT_ID) 2841 if (read_format & PERF_FORMAT_ID)
2842 values[n++] = primary_counter_id(sub); 2842 values[n++] = primary_event_id(sub);
2843 2843
2844 perf_output_copy(handle, values, n * sizeof(u64)); 2844 perf_output_copy(handle, values, n * sizeof(u64));
2845 } 2845 }
2846} 2846}
2847 2847
2848static void perf_output_read(struct perf_output_handle *handle, 2848static void perf_output_read(struct perf_output_handle *handle,
2849 struct perf_counter *counter) 2849 struct perf_event *event)
2850{ 2850{
2851 if (counter->attr.read_format & PERF_FORMAT_GROUP) 2851 if (event->attr.read_format & PERF_FORMAT_GROUP)
2852 perf_output_read_group(handle, counter); 2852 perf_output_read_group(handle, event);
2853 else 2853 else
2854 perf_output_read_one(handle, counter); 2854 perf_output_read_one(handle, event);
2855} 2855}
2856 2856
2857void perf_output_sample(struct perf_output_handle *handle, 2857void perf_output_sample(struct perf_output_handle *handle,
2858 struct perf_event_header *header, 2858 struct perf_event_header *header,
2859 struct perf_sample_data *data, 2859 struct perf_sample_data *data,
2860 struct perf_counter *counter) 2860 struct perf_event *event)
2861{ 2861{
2862 u64 sample_type = data->type; 2862 u64 sample_type = data->type;
2863 2863
@@ -2888,7 +2888,7 @@ void perf_output_sample(struct perf_output_handle *handle,
2888 perf_output_put(handle, data->period); 2888 perf_output_put(handle, data->period);
2889 2889
2890 if (sample_type & PERF_SAMPLE_READ) 2890 if (sample_type & PERF_SAMPLE_READ)
2891 perf_output_read(handle, counter); 2891 perf_output_read(handle, event);
2892 2892
2893 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2893 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2894 if (data->callchain) { 2894 if (data->callchain) {
@@ -2926,14 +2926,14 @@ void perf_output_sample(struct perf_output_handle *handle,
2926 2926
2927void perf_prepare_sample(struct perf_event_header *header, 2927void perf_prepare_sample(struct perf_event_header *header,
2928 struct perf_sample_data *data, 2928 struct perf_sample_data *data,
2929 struct perf_counter *counter, 2929 struct perf_event *event,
2930 struct pt_regs *regs) 2930 struct pt_regs *regs)
2931{ 2931{
2932 u64 sample_type = counter->attr.sample_type; 2932 u64 sample_type = event->attr.sample_type;
2933 2933
2934 data->type = sample_type; 2934 data->type = sample_type;
2935 2935
2936 header->type = PERF_EVENT_SAMPLE; 2936 header->type = PERF_RECORD_SAMPLE;
2937 header->size = sizeof(*header); 2937 header->size = sizeof(*header);
2938 2938
2939 header->misc = 0; 2939 header->misc = 0;
@@ -2947,8 +2947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
2947 2947
2948 if (sample_type & PERF_SAMPLE_TID) { 2948 if (sample_type & PERF_SAMPLE_TID) {
2949 /* namespace issues */ 2949 /* namespace issues */
2950 data->tid_entry.pid = perf_counter_pid(counter, current); 2950 data->tid_entry.pid = perf_event_pid(event, current);
2951 data->tid_entry.tid = perf_counter_tid(counter, current); 2951 data->tid_entry.tid = perf_event_tid(event, current);
2952 2952
2953 header->size += sizeof(data->tid_entry); 2953 header->size += sizeof(data->tid_entry);
2954 } 2954 }
@@ -2963,13 +2963,13 @@ void perf_prepare_sample(struct perf_event_header *header,
2963 header->size += sizeof(data->addr); 2963 header->size += sizeof(data->addr);
2964 2964
2965 if (sample_type & PERF_SAMPLE_ID) { 2965 if (sample_type & PERF_SAMPLE_ID) {
2966 data->id = primary_counter_id(counter); 2966 data->id = primary_event_id(event);
2967 2967
2968 header->size += sizeof(data->id); 2968 header->size += sizeof(data->id);
2969 } 2969 }
2970 2970
2971 if (sample_type & PERF_SAMPLE_STREAM_ID) { 2971 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2972 data->stream_id = counter->id; 2972 data->stream_id = event->id;
2973 2973
2974 header->size += sizeof(data->stream_id); 2974 header->size += sizeof(data->stream_id);
2975 } 2975 }
@@ -2985,7 +2985,7 @@ void perf_prepare_sample(struct perf_event_header *header,
2985 header->size += sizeof(data->period); 2985 header->size += sizeof(data->period);
2986 2986
2987 if (sample_type & PERF_SAMPLE_READ) 2987 if (sample_type & PERF_SAMPLE_READ)
2988 header->size += perf_counter_read_size(counter); 2988 header->size += perf_event_read_size(event);
2989 2989
2990 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2990 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2991 int size = 1; 2991 int size = 1;
@@ -3011,25 +3011,25 @@ void perf_prepare_sample(struct perf_event_header *header,
3011 } 3011 }
3012} 3012}
3013 3013
3014static void perf_counter_output(struct perf_counter *counter, int nmi, 3014static void perf_event_output(struct perf_event *event, int nmi,
3015 struct perf_sample_data *data, 3015 struct perf_sample_data *data,
3016 struct pt_regs *regs) 3016 struct pt_regs *regs)
3017{ 3017{
3018 struct perf_output_handle handle; 3018 struct perf_output_handle handle;
3019 struct perf_event_header header; 3019 struct perf_event_header header;
3020 3020
3021 perf_prepare_sample(&header, data, counter, regs); 3021 perf_prepare_sample(&header, data, event, regs);
3022 3022
3023 if (perf_output_begin(&handle, counter, header.size, nmi, 1)) 3023 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3024 return; 3024 return;
3025 3025
3026 perf_output_sample(&handle, &header, data, counter); 3026 perf_output_sample(&handle, &header, data, event);
3027 3027
3028 perf_output_end(&handle); 3028 perf_output_end(&handle);
3029} 3029}
3030 3030
3031/* 3031/*
3032 * read event 3032 * read event_id
3033 */ 3033 */
3034 3034
3035struct perf_read_event { 3035struct perf_read_event {
@@ -3040,27 +3040,27 @@ struct perf_read_event {
3040}; 3040};
3041 3041
3042static void 3042static void
3043perf_counter_read_event(struct perf_counter *counter, 3043perf_event_read_event(struct perf_event *event,
3044 struct task_struct *task) 3044 struct task_struct *task)
3045{ 3045{
3046 struct perf_output_handle handle; 3046 struct perf_output_handle handle;
3047 struct perf_read_event read_event = { 3047 struct perf_read_event read_event = {
3048 .header = { 3048 .header = {
3049 .type = PERF_EVENT_READ, 3049 .type = PERF_RECORD_READ,
3050 .misc = 0, 3050 .misc = 0,
3051 .size = sizeof(read_event) + perf_counter_read_size(counter), 3051 .size = sizeof(read_event) + perf_event_read_size(event),
3052 }, 3052 },
3053 .pid = perf_counter_pid(counter, task), 3053 .pid = perf_event_pid(event, task),
3054 .tid = perf_counter_tid(counter, task), 3054 .tid = perf_event_tid(event, task),
3055 }; 3055 };
3056 int ret; 3056 int ret;
3057 3057
3058 ret = perf_output_begin(&handle, counter, read_event.header.size, 0, 0); 3058 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3059 if (ret) 3059 if (ret)
3060 return; 3060 return;
3061 3061
3062 perf_output_put(&handle, read_event); 3062 perf_output_put(&handle, read_event);
3063 perf_output_read(&handle, counter); 3063 perf_output_read(&handle, event);
3064 3064
3065 perf_output_end(&handle); 3065 perf_output_end(&handle);
3066} 3066}
@@ -3073,7 +3073,7 @@ perf_counter_read_event(struct perf_counter *counter,
3073 3073
3074struct perf_task_event { 3074struct perf_task_event {
3075 struct task_struct *task; 3075 struct task_struct *task;
3076 struct perf_counter_context *task_ctx; 3076 struct perf_event_context *task_ctx;
3077 3077
3078 struct { 3078 struct {
3079 struct perf_event_header header; 3079 struct perf_event_header header;
@@ -3083,10 +3083,10 @@ struct perf_task_event {
3083 u32 tid; 3083 u32 tid;
3084 u32 ptid; 3084 u32 ptid;
3085 u64 time; 3085 u64 time;
3086 } event; 3086 } event_id;
3087}; 3087};
3088 3088
3089static void perf_counter_task_output(struct perf_counter *counter, 3089static void perf_event_task_output(struct perf_event *event,
3090 struct perf_task_event *task_event) 3090 struct perf_task_event *task_event)
3091{ 3091{
3092 struct perf_output_handle handle; 3092 struct perf_output_handle handle;
@@ -3094,85 +3094,85 @@ static void perf_counter_task_output(struct perf_counter *counter,
3094 struct task_struct *task = task_event->task; 3094 struct task_struct *task = task_event->task;
3095 int ret; 3095 int ret;
3096 3096
3097 size = task_event->event.header.size; 3097 size = task_event->event_id.header.size;
3098 ret = perf_output_begin(&handle, counter, size, 0, 0); 3098 ret = perf_output_begin(&handle, event, size, 0, 0);
3099 3099
3100 if (ret) 3100 if (ret)
3101 return; 3101 return;
3102 3102
3103 task_event->event.pid = perf_counter_pid(counter, task); 3103 task_event->event_id.pid = perf_event_pid(event, task);
3104 task_event->event.ppid = perf_counter_pid(counter, current); 3104 task_event->event_id.ppid = perf_event_pid(event, current);
3105 3105
3106 task_event->event.tid = perf_counter_tid(counter, task); 3106 task_event->event_id.tid = perf_event_tid(event, task);
3107 task_event->event.ptid = perf_counter_tid(counter, current); 3107 task_event->event_id.ptid = perf_event_tid(event, current);
3108 3108
3109 task_event->event.time = perf_clock(); 3109 task_event->event_id.time = perf_clock();
3110 3110
3111 perf_output_put(&handle, task_event->event); 3111 perf_output_put(&handle, task_event->event_id);
3112 3112
3113 perf_output_end(&handle); 3113 perf_output_end(&handle);
3114} 3114}
3115 3115
3116static int perf_counter_task_match(struct perf_counter *counter) 3116static int perf_event_task_match(struct perf_event *event)
3117{ 3117{
3118 if (counter->attr.comm || counter->attr.mmap || counter->attr.task) 3118 if (event->attr.comm || event->attr.mmap || event->attr.task)
3119 return 1; 3119 return 1;
3120 3120
3121 return 0; 3121 return 0;
3122} 3122}
3123 3123
3124static void perf_counter_task_ctx(struct perf_counter_context *ctx, 3124static void perf_event_task_ctx(struct perf_event_context *ctx,
3125 struct perf_task_event *task_event) 3125 struct perf_task_event *task_event)
3126{ 3126{
3127 struct perf_counter *counter; 3127 struct perf_event *event;
3128 3128
3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3130 return; 3130 return;
3131 3131
3132 rcu_read_lock(); 3132 rcu_read_lock();
3133 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3133 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3134 if (perf_counter_task_match(counter)) 3134 if (perf_event_task_match(event))
3135 perf_counter_task_output(counter, task_event); 3135 perf_event_task_output(event, task_event);
3136 } 3136 }
3137 rcu_read_unlock(); 3137 rcu_read_unlock();
3138} 3138}
3139 3139
3140static void perf_counter_task_event(struct perf_task_event *task_event) 3140static void perf_event_task_event(struct perf_task_event *task_event)
3141{ 3141{
3142 struct perf_cpu_context *cpuctx; 3142 struct perf_cpu_context *cpuctx;
3143 struct perf_counter_context *ctx = task_event->task_ctx; 3143 struct perf_event_context *ctx = task_event->task_ctx;
3144 3144
3145 cpuctx = &get_cpu_var(perf_cpu_context); 3145 cpuctx = &get_cpu_var(perf_cpu_context);
3146 perf_counter_task_ctx(&cpuctx->ctx, task_event); 3146 perf_event_task_ctx(&cpuctx->ctx, task_event);
3147 put_cpu_var(perf_cpu_context); 3147 put_cpu_var(perf_cpu_context);
3148 3148
3149 rcu_read_lock(); 3149 rcu_read_lock();
3150 if (!ctx) 3150 if (!ctx)
3151 ctx = rcu_dereference(task_event->task->perf_counter_ctxp); 3151 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3152 if (ctx) 3152 if (ctx)
3153 perf_counter_task_ctx(ctx, task_event); 3153 perf_event_task_ctx(ctx, task_event);
3154 rcu_read_unlock(); 3154 rcu_read_unlock();
3155} 3155}
3156 3156
3157static void perf_counter_task(struct task_struct *task, 3157static void perf_event_task(struct task_struct *task,
3158 struct perf_counter_context *task_ctx, 3158 struct perf_event_context *task_ctx,
3159 int new) 3159 int new)
3160{ 3160{
3161 struct perf_task_event task_event; 3161 struct perf_task_event task_event;
3162 3162
3163 if (!atomic_read(&nr_comm_counters) && 3163 if (!atomic_read(&nr_comm_events) &&
3164 !atomic_read(&nr_mmap_counters) && 3164 !atomic_read(&nr_mmap_events) &&
3165 !atomic_read(&nr_task_counters)) 3165 !atomic_read(&nr_task_events))
3166 return; 3166 return;
3167 3167
3168 task_event = (struct perf_task_event){ 3168 task_event = (struct perf_task_event){
3169 .task = task, 3169 .task = task,
3170 .task_ctx = task_ctx, 3170 .task_ctx = task_ctx,
3171 .event = { 3171 .event_id = {
3172 .header = { 3172 .header = {
3173 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, 3173 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3174 .misc = 0, 3174 .misc = 0,
3175 .size = sizeof(task_event.event), 3175 .size = sizeof(task_event.event_id),
3176 }, 3176 },
3177 /* .pid */ 3177 /* .pid */
3178 /* .ppid */ 3178 /* .ppid */
@@ -3181,12 +3181,12 @@ static void perf_counter_task(struct task_struct *task,
3181 }, 3181 },
3182 }; 3182 };
3183 3183
3184 perf_counter_task_event(&task_event); 3184 perf_event_task_event(&task_event);
3185} 3185}
3186 3186
3187void perf_counter_fork(struct task_struct *task) 3187void perf_event_fork(struct task_struct *task)
3188{ 3188{
3189 perf_counter_task(task, NULL, 1); 3189 perf_event_task(task, NULL, 1);
3190} 3190}
3191 3191
3192/* 3192/*
@@ -3203,56 +3203,56 @@ struct perf_comm_event {
3203 3203
3204 u32 pid; 3204 u32 pid;
3205 u32 tid; 3205 u32 tid;
3206 } event; 3206 } event_id;
3207}; 3207};
3208 3208
3209static void perf_counter_comm_output(struct perf_counter *counter, 3209static void perf_event_comm_output(struct perf_event *event,
3210 struct perf_comm_event *comm_event) 3210 struct perf_comm_event *comm_event)
3211{ 3211{
3212 struct perf_output_handle handle; 3212 struct perf_output_handle handle;
3213 int size = comm_event->event.header.size; 3213 int size = comm_event->event_id.header.size;
3214 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3214 int ret = perf_output_begin(&handle, event, size, 0, 0);
3215 3215
3216 if (ret) 3216 if (ret)
3217 return; 3217 return;
3218 3218
3219 comm_event->event.pid = perf_counter_pid(counter, comm_event->task); 3219 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3220 comm_event->event.tid = perf_counter_tid(counter, comm_event->task); 3220 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3221 3221
3222 perf_output_put(&handle, comm_event->event); 3222 perf_output_put(&handle, comm_event->event_id);
3223 perf_output_copy(&handle, comm_event->comm, 3223 perf_output_copy(&handle, comm_event->comm,
3224 comm_event->comm_size); 3224 comm_event->comm_size);
3225 perf_output_end(&handle); 3225 perf_output_end(&handle);
3226} 3226}
3227 3227
3228static int perf_counter_comm_match(struct perf_counter *counter) 3228static int perf_event_comm_match(struct perf_event *event)
3229{ 3229{
3230 if (counter->attr.comm) 3230 if (event->attr.comm)
3231 return 1; 3231 return 1;
3232 3232
3233 return 0; 3233 return 0;
3234} 3234}
3235 3235
3236static void perf_counter_comm_ctx(struct perf_counter_context *ctx, 3236static void perf_event_comm_ctx(struct perf_event_context *ctx,
3237 struct perf_comm_event *comm_event) 3237 struct perf_comm_event *comm_event)
3238{ 3238{
3239 struct perf_counter *counter; 3239 struct perf_event *event;
3240 3240
3241 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3241 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3242 return; 3242 return;
3243 3243
3244 rcu_read_lock(); 3244 rcu_read_lock();
3245 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3246 if (perf_counter_comm_match(counter)) 3246 if (perf_event_comm_match(event))
3247 perf_counter_comm_output(counter, comm_event); 3247 perf_event_comm_output(event, comm_event);
3248 } 3248 }
3249 rcu_read_unlock(); 3249 rcu_read_unlock();
3250} 3250}
3251 3251
3252static void perf_counter_comm_event(struct perf_comm_event *comm_event) 3252static void perf_event_comm_event(struct perf_comm_event *comm_event)
3253{ 3253{
3254 struct perf_cpu_context *cpuctx; 3254 struct perf_cpu_context *cpuctx;
3255 struct perf_counter_context *ctx; 3255 struct perf_event_context *ctx;
3256 unsigned int size; 3256 unsigned int size;
3257 char comm[TASK_COMM_LEN]; 3257 char comm[TASK_COMM_LEN];
3258 3258
@@ -3263,10 +3263,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3263 comm_event->comm = comm; 3263 comm_event->comm = comm;
3264 comm_event->comm_size = size; 3264 comm_event->comm_size = size;
3265 3265
3266 comm_event->event.header.size = sizeof(comm_event->event) + size; 3266 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3267 3267
3268 cpuctx = &get_cpu_var(perf_cpu_context); 3268 cpuctx = &get_cpu_var(perf_cpu_context);
3269 perf_counter_comm_ctx(&cpuctx->ctx, comm_event); 3269 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3270 put_cpu_var(perf_cpu_context); 3270 put_cpu_var(perf_cpu_context);
3271 3271
3272 rcu_read_lock(); 3272 rcu_read_lock();
@@ -3274,29 +3274,29 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3274 * doesn't really matter which of the child contexts the 3274 * doesn't really matter which of the child contexts the
3275 * events ends up in. 3275 * events ends up in.
3276 */ 3276 */
3277 ctx = rcu_dereference(current->perf_counter_ctxp); 3277 ctx = rcu_dereference(current->perf_event_ctxp);
3278 if (ctx) 3278 if (ctx)
3279 perf_counter_comm_ctx(ctx, comm_event); 3279 perf_event_comm_ctx(ctx, comm_event);
3280 rcu_read_unlock(); 3280 rcu_read_unlock();
3281} 3281}
3282 3282
3283void perf_counter_comm(struct task_struct *task) 3283void perf_event_comm(struct task_struct *task)
3284{ 3284{
3285 struct perf_comm_event comm_event; 3285 struct perf_comm_event comm_event;
3286 3286
3287 if (task->perf_counter_ctxp) 3287 if (task->perf_event_ctxp)
3288 perf_counter_enable_on_exec(task); 3288 perf_event_enable_on_exec(task);
3289 3289
3290 if (!atomic_read(&nr_comm_counters)) 3290 if (!atomic_read(&nr_comm_events))
3291 return; 3291 return;
3292 3292
3293 comm_event = (struct perf_comm_event){ 3293 comm_event = (struct perf_comm_event){
3294 .task = task, 3294 .task = task,
3295 /* .comm */ 3295 /* .comm */
3296 /* .comm_size */ 3296 /* .comm_size */
3297 .event = { 3297 .event_id = {
3298 .header = { 3298 .header = {
3299 .type = PERF_EVENT_COMM, 3299 .type = PERF_RECORD_COMM,
3300 .misc = 0, 3300 .misc = 0,
3301 /* .size */ 3301 /* .size */
3302 }, 3302 },
@@ -3305,7 +3305,7 @@ void perf_counter_comm(struct task_struct *task)
3305 }, 3305 },
3306 }; 3306 };
3307 3307
3308 perf_counter_comm_event(&comm_event); 3308 perf_event_comm_event(&comm_event);
3309} 3309}
3310 3310
3311/* 3311/*
@@ -3326,57 +3326,57 @@ struct perf_mmap_event {
3326 u64 start; 3326 u64 start;
3327 u64 len; 3327 u64 len;
3328 u64 pgoff; 3328 u64 pgoff;
3329 } event; 3329 } event_id;
3330}; 3330};
3331 3331
3332static void perf_counter_mmap_output(struct perf_counter *counter, 3332static void perf_event_mmap_output(struct perf_event *event,
3333 struct perf_mmap_event *mmap_event) 3333 struct perf_mmap_event *mmap_event)
3334{ 3334{
3335 struct perf_output_handle handle; 3335 struct perf_output_handle handle;
3336 int size = mmap_event->event.header.size; 3336 int size = mmap_event->event_id.header.size;
3337 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3337 int ret = perf_output_begin(&handle, event, size, 0, 0);
3338 3338
3339 if (ret) 3339 if (ret)
3340 return; 3340 return;
3341 3341
3342 mmap_event->event.pid = perf_counter_pid(counter, current); 3342 mmap_event->event_id.pid = perf_event_pid(event, current);
3343 mmap_event->event.tid = perf_counter_tid(counter, current); 3343 mmap_event->event_id.tid = perf_event_tid(event, current);
3344 3344
3345 perf_output_put(&handle, mmap_event->event); 3345 perf_output_put(&handle, mmap_event->event_id);
3346 perf_output_copy(&handle, mmap_event->file_name, 3346 perf_output_copy(&handle, mmap_event->file_name,
3347 mmap_event->file_size); 3347 mmap_event->file_size);
3348 perf_output_end(&handle); 3348 perf_output_end(&handle);
3349} 3349}
3350 3350
3351static int perf_counter_mmap_match(struct perf_counter *counter, 3351static int perf_event_mmap_match(struct perf_event *event,
3352 struct perf_mmap_event *mmap_event) 3352 struct perf_mmap_event *mmap_event)
3353{ 3353{
3354 if (counter->attr.mmap) 3354 if (event->attr.mmap)
3355 return 1; 3355 return 1;
3356 3356
3357 return 0; 3357 return 0;
3358} 3358}
3359 3359
3360static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, 3360static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3361 struct perf_mmap_event *mmap_event) 3361 struct perf_mmap_event *mmap_event)
3362{ 3362{
3363 struct perf_counter *counter; 3363 struct perf_event *event;
3364 3364
3365 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3365 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3366 return; 3366 return;
3367 3367
3368 rcu_read_lock(); 3368 rcu_read_lock();
3369 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3369 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3370 if (perf_counter_mmap_match(counter, mmap_event)) 3370 if (perf_event_mmap_match(event, mmap_event))
3371 perf_counter_mmap_output(counter, mmap_event); 3371 perf_event_mmap_output(event, mmap_event);
3372 } 3372 }
3373 rcu_read_unlock(); 3373 rcu_read_unlock();
3374} 3374}
3375 3375
3376static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) 3376static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3377{ 3377{
3378 struct perf_cpu_context *cpuctx; 3378 struct perf_cpu_context *cpuctx;
3379 struct perf_counter_context *ctx; 3379 struct perf_event_context *ctx;
3380 struct vm_area_struct *vma = mmap_event->vma; 3380 struct vm_area_struct *vma = mmap_event->vma;
3381 struct file *file = vma->vm_file; 3381 struct file *file = vma->vm_file;
3382 unsigned int size; 3382 unsigned int size;
@@ -3424,10 +3424,10 @@ got_name:
3424 mmap_event->file_name = name; 3424 mmap_event->file_name = name;
3425 mmap_event->file_size = size; 3425 mmap_event->file_size = size;
3426 3426
3427 mmap_event->event.header.size = sizeof(mmap_event->event) + size; 3427 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3428 3428
3429 cpuctx = &get_cpu_var(perf_cpu_context); 3429 cpuctx = &get_cpu_var(perf_cpu_context);
3430 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); 3430 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3431 put_cpu_var(perf_cpu_context); 3431 put_cpu_var(perf_cpu_context);
3432 3432
3433 rcu_read_lock(); 3433 rcu_read_lock();
@@ -3435,28 +3435,28 @@ got_name:
3435 * doesn't really matter which of the child contexts the 3435 * doesn't really matter which of the child contexts the
3436 * events ends up in. 3436 * events ends up in.
3437 */ 3437 */
3438 ctx = rcu_dereference(current->perf_counter_ctxp); 3438 ctx = rcu_dereference(current->perf_event_ctxp);
3439 if (ctx) 3439 if (ctx)
3440 perf_counter_mmap_ctx(ctx, mmap_event); 3440 perf_event_mmap_ctx(ctx, mmap_event);
3441 rcu_read_unlock(); 3441 rcu_read_unlock();
3442 3442
3443 kfree(buf); 3443 kfree(buf);
3444} 3444}
3445 3445
3446void __perf_counter_mmap(struct vm_area_struct *vma) 3446void __perf_event_mmap(struct vm_area_struct *vma)
3447{ 3447{
3448 struct perf_mmap_event mmap_event; 3448 struct perf_mmap_event mmap_event;
3449 3449
3450 if (!atomic_read(&nr_mmap_counters)) 3450 if (!atomic_read(&nr_mmap_events))
3451 return; 3451 return;
3452 3452
3453 mmap_event = (struct perf_mmap_event){ 3453 mmap_event = (struct perf_mmap_event){
3454 .vma = vma, 3454 .vma = vma,
3455 /* .file_name */ 3455 /* .file_name */
3456 /* .file_size */ 3456 /* .file_size */
3457 .event = { 3457 .event_id = {
3458 .header = { 3458 .header = {
3459 .type = PERF_EVENT_MMAP, 3459 .type = PERF_RECORD_MMAP,
3460 .misc = 0, 3460 .misc = 0,
3461 /* .size */ 3461 /* .size */
3462 }, 3462 },
@@ -3468,14 +3468,14 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3468 }, 3468 },
3469 }; 3469 };
3470 3470
3471 perf_counter_mmap_event(&mmap_event); 3471 perf_event_mmap_event(&mmap_event);
3472} 3472}
3473 3473
3474/* 3474/*
3475 * IRQ throttle logging 3475 * IRQ throttle logging
3476 */ 3476 */
3477 3477
3478static void perf_log_throttle(struct perf_counter *counter, int enable) 3478static void perf_log_throttle(struct perf_event *event, int enable)
3479{ 3479{
3480 struct perf_output_handle handle; 3480 struct perf_output_handle handle;
3481 int ret; 3481 int ret;
@@ -3487,19 +3487,19 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3487 u64 stream_id; 3487 u64 stream_id;
3488 } throttle_event = { 3488 } throttle_event = {
3489 .header = { 3489 .header = {
3490 .type = PERF_EVENT_THROTTLE, 3490 .type = PERF_RECORD_THROTTLE,
3491 .misc = 0, 3491 .misc = 0,
3492 .size = sizeof(throttle_event), 3492 .size = sizeof(throttle_event),
3493 }, 3493 },
3494 .time = perf_clock(), 3494 .time = perf_clock(),
3495 .id = primary_counter_id(counter), 3495 .id = primary_event_id(event),
3496 .stream_id = counter->id, 3496 .stream_id = event->id,
3497 }; 3497 };
3498 3498
3499 if (enable) 3499 if (enable)
3500 throttle_event.header.type = PERF_EVENT_UNTHROTTLE; 3500 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3501 3501
3502 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3502 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3503 if (ret) 3503 if (ret)
3504 return; 3504 return;
3505 3505
@@ -3508,18 +3508,18 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3508} 3508}
3509 3509
3510/* 3510/*
3511 * Generic counter overflow handling, sampling. 3511 * Generic event overflow handling, sampling.
3512 */ 3512 */
3513 3513
3514static int __perf_counter_overflow(struct perf_counter *counter, int nmi, 3514static int __perf_event_overflow(struct perf_event *event, int nmi,
3515 int throttle, struct perf_sample_data *data, 3515 int throttle, struct perf_sample_data *data,
3516 struct pt_regs *regs) 3516 struct pt_regs *regs)
3517{ 3517{
3518 int events = atomic_read(&counter->event_limit); 3518 int events = atomic_read(&event->event_limit);
3519 struct hw_perf_counter *hwc = &counter->hw; 3519 struct hw_perf_event *hwc = &event->hw;
3520 int ret = 0; 3520 int ret = 0;
3521 3521
3522 throttle = (throttle && counter->pmu->unthrottle != NULL); 3522 throttle = (throttle && event->pmu->unthrottle != NULL);
3523 3523
3524 if (!throttle) { 3524 if (!throttle) {
3525 hwc->interrupts++; 3525 hwc->interrupts++;
@@ -3527,73 +3527,73 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
3527 if (hwc->interrupts != MAX_INTERRUPTS) { 3527 if (hwc->interrupts != MAX_INTERRUPTS) {
3528 hwc->interrupts++; 3528 hwc->interrupts++;
3529 if (HZ * hwc->interrupts > 3529 if (HZ * hwc->interrupts >
3530 (u64)sysctl_perf_counter_sample_rate) { 3530 (u64)sysctl_perf_event_sample_rate) {
3531 hwc->interrupts = MAX_INTERRUPTS; 3531 hwc->interrupts = MAX_INTERRUPTS;
3532 perf_log_throttle(counter, 0); 3532 perf_log_throttle(event, 0);
3533 ret = 1; 3533 ret = 1;
3534 } 3534 }
3535 } else { 3535 } else {
3536 /* 3536 /*
3537 * Keep re-disabling counters even though on the previous 3537 * Keep re-disabling events even though on the previous
3538 * pass we disabled it - just in case we raced with a 3538 * pass we disabled it - just in case we raced with a
3539 * sched-in and the counter got enabled again: 3539 * sched-in and the event got enabled again:
3540 */ 3540 */
3541 ret = 1; 3541 ret = 1;
3542 } 3542 }
3543 } 3543 }
3544 3544
3545 if (counter->attr.freq) { 3545 if (event->attr.freq) {
3546 u64 now = perf_clock(); 3546 u64 now = perf_clock();
3547 s64 delta = now - hwc->freq_stamp; 3547 s64 delta = now - hwc->freq_stamp;
3548 3548
3549 hwc->freq_stamp = now; 3549 hwc->freq_stamp = now;
3550 3550
3551 if (delta > 0 && delta < TICK_NSEC) 3551 if (delta > 0 && delta < TICK_NSEC)
3552 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); 3552 perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3553 } 3553 }
3554 3554
3555 /* 3555 /*
3556 * XXX event_limit might not quite work as expected on inherited 3556 * XXX event_limit might not quite work as expected on inherited
3557 * counters 3557 * events
3558 */ 3558 */
3559 3559
3560 counter->pending_kill = POLL_IN; 3560 event->pending_kill = POLL_IN;
3561 if (events && atomic_dec_and_test(&counter->event_limit)) { 3561 if (events && atomic_dec_and_test(&event->event_limit)) {
3562 ret = 1; 3562 ret = 1;
3563 counter->pending_kill = POLL_HUP; 3563 event->pending_kill = POLL_HUP;
3564 if (nmi) { 3564 if (nmi) {
3565 counter->pending_disable = 1; 3565 event->pending_disable = 1;
3566 perf_pending_queue(&counter->pending, 3566 perf_pending_queue(&event->pending,
3567 perf_pending_counter); 3567 perf_pending_event);
3568 } else 3568 } else
3569 perf_counter_disable(counter); 3569 perf_event_disable(event);
3570 } 3570 }
3571 3571
3572 perf_counter_output(counter, nmi, data, regs); 3572 perf_event_output(event, nmi, data, regs);
3573 return ret; 3573 return ret;
3574} 3574}
3575 3575
3576int perf_counter_overflow(struct perf_counter *counter, int nmi, 3576int perf_event_overflow(struct perf_event *event, int nmi,
3577 struct perf_sample_data *data, 3577 struct perf_sample_data *data,
3578 struct pt_regs *regs) 3578 struct pt_regs *regs)
3579{ 3579{
3580 return __perf_counter_overflow(counter, nmi, 1, data, regs); 3580 return __perf_event_overflow(event, nmi, 1, data, regs);
3581} 3581}
3582 3582
3583/* 3583/*
3584 * Generic software counter infrastructure 3584 * Generic software event infrastructure
3585 */ 3585 */
3586 3586
3587/* 3587/*
3588 * We directly increment counter->count and keep a second value in 3588 * We directly increment event->count and keep a second value in
3589 * counter->hw.period_left to count intervals. This period counter 3589 * event->hw.period_left to count intervals. This period event
3590 * is kept in the range [-sample_period, 0] so that we can use the 3590 * is kept in the range [-sample_period, 0] so that we can use the
3591 * sign as trigger. 3591 * sign as trigger.
3592 */ 3592 */
3593 3593
3594static u64 perf_swcounter_set_period(struct perf_counter *counter) 3594static u64 perf_swevent_set_period(struct perf_event *event)
3595{ 3595{
3596 struct hw_perf_counter *hwc = &counter->hw; 3596 struct hw_perf_event *hwc = &event->hw;
3597 u64 period = hwc->last_period; 3597 u64 period = hwc->last_period;
3598 u64 nr, offset; 3598 u64 nr, offset;
3599 s64 old, val; 3599 s64 old, val;
@@ -3614,22 +3614,22 @@ again:
3614 return nr; 3614 return nr;
3615} 3615}
3616 3616
3617static void perf_swcounter_overflow(struct perf_counter *counter, 3617static void perf_swevent_overflow(struct perf_event *event,
3618 int nmi, struct perf_sample_data *data, 3618 int nmi, struct perf_sample_data *data,
3619 struct pt_regs *regs) 3619 struct pt_regs *regs)
3620{ 3620{
3621 struct hw_perf_counter *hwc = &counter->hw; 3621 struct hw_perf_event *hwc = &event->hw;
3622 int throttle = 0; 3622 int throttle = 0;
3623 u64 overflow; 3623 u64 overflow;
3624 3624
3625 data->period = counter->hw.last_period; 3625 data->period = event->hw.last_period;
3626 overflow = perf_swcounter_set_period(counter); 3626 overflow = perf_swevent_set_period(event);
3627 3627
3628 if (hwc->interrupts == MAX_INTERRUPTS) 3628 if (hwc->interrupts == MAX_INTERRUPTS)
3629 return; 3629 return;
3630 3630
3631 for (; overflow; overflow--) { 3631 for (; overflow; overflow--) {
3632 if (__perf_counter_overflow(counter, nmi, throttle, 3632 if (__perf_event_overflow(event, nmi, throttle,
3633 data, regs)) { 3633 data, regs)) {
3634 /* 3634 /*
3635 * We inhibit the overflow from happening when 3635 * We inhibit the overflow from happening when
@@ -3641,20 +3641,20 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
3641 } 3641 }
3642} 3642}
3643 3643
3644static void perf_swcounter_unthrottle(struct perf_counter *counter) 3644static void perf_swevent_unthrottle(struct perf_event *event)
3645{ 3645{
3646 /* 3646 /*
3647 * Nothing to do, we already reset hwc->interrupts. 3647 * Nothing to do, we already reset hwc->interrupts.
3648 */ 3648 */
3649} 3649}
3650 3650
3651static void perf_swcounter_add(struct perf_counter *counter, u64 nr, 3651static void perf_swevent_add(struct perf_event *event, u64 nr,
3652 int nmi, struct perf_sample_data *data, 3652 int nmi, struct perf_sample_data *data,
3653 struct pt_regs *regs) 3653 struct pt_regs *regs)
3654{ 3654{
3655 struct hw_perf_counter *hwc = &counter->hw; 3655 struct hw_perf_event *hwc = &event->hw;
3656 3656
3657 atomic64_add(nr, &counter->count); 3657 atomic64_add(nr, &event->count);
3658 3658
3659 if (!hwc->sample_period) 3659 if (!hwc->sample_period)
3660 return; 3660 return;
@@ -3663,29 +3663,29 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3663 return; 3663 return;
3664 3664
3665 if (!atomic64_add_negative(nr, &hwc->period_left)) 3665 if (!atomic64_add_negative(nr, &hwc->period_left))
3666 perf_swcounter_overflow(counter, nmi, data, regs); 3666 perf_swevent_overflow(event, nmi, data, regs);
3667} 3667}
3668 3668
3669static int perf_swcounter_is_counting(struct perf_counter *counter) 3669static int perf_swevent_is_counting(struct perf_event *event)
3670{ 3670{
3671 /* 3671 /*
3672 * The counter is active, we're good! 3672 * The event is active, we're good!
3673 */ 3673 */
3674 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3674 if (event->state == PERF_EVENT_STATE_ACTIVE)
3675 return 1; 3675 return 1;
3676 3676
3677 /* 3677 /*
3678 * The counter is off/error, not counting. 3678 * The event is off/error, not counting.
3679 */ 3679 */
3680 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3680 if (event->state != PERF_EVENT_STATE_INACTIVE)
3681 return 0; 3681 return 0;
3682 3682
3683 /* 3683 /*
3684 * The counter is inactive, if the context is active 3684 * The event is inactive, if the context is active
3685 * we're part of a group that didn't make it on the 'pmu', 3685 * we're part of a group that didn't make it on the 'pmu',
3686 * not counting. 3686 * not counting.
3687 */ 3687 */
3688 if (counter->ctx->is_active) 3688 if (event->ctx->is_active)
3689 return 0; 3689 return 0;
3690 3690
3691 /* 3691 /*
@@ -3696,49 +3696,49 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
3696 return 1; 3696 return 1;
3697} 3697}
3698 3698
3699static int perf_swcounter_match(struct perf_counter *counter, 3699static int perf_swevent_match(struct perf_event *event,
3700 enum perf_type_id type, 3700 enum perf_type_id type,
3701 u32 event_id, struct pt_regs *regs) 3701 u32 event_id, struct pt_regs *regs)
3702{ 3702{
3703 if (!perf_swcounter_is_counting(counter)) 3703 if (!perf_swevent_is_counting(event))
3704 return 0; 3704 return 0;
3705 3705
3706 if (counter->attr.type != type) 3706 if (event->attr.type != type)
3707 return 0; 3707 return 0;
3708 if (counter->attr.config != event_id) 3708 if (event->attr.config != event_id)
3709 return 0; 3709 return 0;
3710 3710
3711 if (regs) { 3711 if (regs) {
3712 if (counter->attr.exclude_user && user_mode(regs)) 3712 if (event->attr.exclude_user && user_mode(regs))
3713 return 0; 3713 return 0;
3714 3714
3715 if (counter->attr.exclude_kernel && !user_mode(regs)) 3715 if (event->attr.exclude_kernel && !user_mode(regs))
3716 return 0; 3716 return 0;
3717 } 3717 }
3718 3718
3719 return 1; 3719 return 1;
3720} 3720}
3721 3721
3722static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3722static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3723 enum perf_type_id type, 3723 enum perf_type_id type,
3724 u32 event_id, u64 nr, int nmi, 3724 u32 event_id, u64 nr, int nmi,
3725 struct perf_sample_data *data, 3725 struct perf_sample_data *data,
3726 struct pt_regs *regs) 3726 struct pt_regs *regs)
3727{ 3727{
3728 struct perf_counter *counter; 3728 struct perf_event *event;
3729 3729
3730 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3730 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3731 return; 3731 return;
3732 3732
3733 rcu_read_lock(); 3733 rcu_read_lock();
3734 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3734 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3735 if (perf_swcounter_match(counter, type, event_id, regs)) 3735 if (perf_swevent_match(event, type, event_id, regs))
3736 perf_swcounter_add(counter, nr, nmi, data, regs); 3736 perf_swevent_add(event, nr, nmi, data, regs);
3737 } 3737 }
3738 rcu_read_unlock(); 3738 rcu_read_unlock();
3739} 3739}
3740 3740
3741static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) 3741static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
3742{ 3742{
3743 if (in_nmi()) 3743 if (in_nmi())
3744 return &cpuctx->recursion[3]; 3744 return &cpuctx->recursion[3];
@@ -3752,14 +3752,14 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3752 return &cpuctx->recursion[0]; 3752 return &cpuctx->recursion[0];
3753} 3753}
3754 3754
3755static void do_perf_swcounter_event(enum perf_type_id type, u32 event, 3755static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3756 u64 nr, int nmi, 3756 u64 nr, int nmi,
3757 struct perf_sample_data *data, 3757 struct perf_sample_data *data,
3758 struct pt_regs *regs) 3758 struct pt_regs *regs)
3759{ 3759{
3760 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3760 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3761 int *recursion = perf_swcounter_recursion_context(cpuctx); 3761 int *recursion = perf_swevent_recursion_context(cpuctx);
3762 struct perf_counter_context *ctx; 3762 struct perf_event_context *ctx;
3763 3763
3764 if (*recursion) 3764 if (*recursion)
3765 goto out; 3765 goto out;
@@ -3767,16 +3767,16 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3767 (*recursion)++; 3767 (*recursion)++;
3768 barrier(); 3768 barrier();
3769 3769
3770 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 3770 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3771 nr, nmi, data, regs); 3771 nr, nmi, data, regs);
3772 rcu_read_lock(); 3772 rcu_read_lock();
3773 /* 3773 /*
3774 * doesn't really matter which of the child contexts the 3774 * doesn't really matter which of the child contexts the
3775 * events ends up in. 3775 * events ends up in.
3776 */ 3776 */
3777 ctx = rcu_dereference(current->perf_counter_ctxp); 3777 ctx = rcu_dereference(current->perf_event_ctxp);
3778 if (ctx) 3778 if (ctx)
3779 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); 3779 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3780 rcu_read_unlock(); 3780 rcu_read_unlock();
3781 3781
3782 barrier(); 3782 barrier();
@@ -3786,57 +3786,57 @@ out:
3786 put_cpu_var(perf_cpu_context); 3786 put_cpu_var(perf_cpu_context);
3787} 3787}
3788 3788
3789void __perf_swcounter_event(u32 event, u64 nr, int nmi, 3789void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3790 struct pt_regs *regs, u64 addr) 3790 struct pt_regs *regs, u64 addr)
3791{ 3791{
3792 struct perf_sample_data data = { 3792 struct perf_sample_data data = {
3793 .addr = addr, 3793 .addr = addr,
3794 }; 3794 };
3795 3795
3796 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, 3796 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
3797 &data, regs); 3797 &data, regs);
3798} 3798}
3799 3799
3800static void perf_swcounter_read(struct perf_counter *counter) 3800static void perf_swevent_read(struct perf_event *event)
3801{ 3801{
3802} 3802}
3803 3803
3804static int perf_swcounter_enable(struct perf_counter *counter) 3804static int perf_swevent_enable(struct perf_event *event)
3805{ 3805{
3806 struct hw_perf_counter *hwc = &counter->hw; 3806 struct hw_perf_event *hwc = &event->hw;
3807 3807
3808 if (hwc->sample_period) { 3808 if (hwc->sample_period) {
3809 hwc->last_period = hwc->sample_period; 3809 hwc->last_period = hwc->sample_period;
3810 perf_swcounter_set_period(counter); 3810 perf_swevent_set_period(event);
3811 } 3811 }
3812 return 0; 3812 return 0;
3813} 3813}
3814 3814
3815static void perf_swcounter_disable(struct perf_counter *counter) 3815static void perf_swevent_disable(struct perf_event *event)
3816{ 3816{
3817} 3817}
3818 3818
3819static const struct pmu perf_ops_generic = { 3819static const struct pmu perf_ops_generic = {
3820 .enable = perf_swcounter_enable, 3820 .enable = perf_swevent_enable,
3821 .disable = perf_swcounter_disable, 3821 .disable = perf_swevent_disable,
3822 .read = perf_swcounter_read, 3822 .read = perf_swevent_read,
3823 .unthrottle = perf_swcounter_unthrottle, 3823 .unthrottle = perf_swevent_unthrottle,
3824}; 3824};
3825 3825
3826/* 3826/*
3827 * hrtimer based swcounter callback 3827 * hrtimer based swevent callback
3828 */ 3828 */
3829 3829
3830static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3830static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3831{ 3831{
3832 enum hrtimer_restart ret = HRTIMER_RESTART; 3832 enum hrtimer_restart ret = HRTIMER_RESTART;
3833 struct perf_sample_data data; 3833 struct perf_sample_data data;
3834 struct pt_regs *regs; 3834 struct pt_regs *regs;
3835 struct perf_counter *counter; 3835 struct perf_event *event;
3836 u64 period; 3836 u64 period;
3837 3837
3838 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 3838 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
3839 counter->pmu->read(counter); 3839 event->pmu->read(event);
3840 3840
3841 data.addr = 0; 3841 data.addr = 0;
3842 regs = get_irq_regs(); 3842 regs = get_irq_regs();
@@ -3844,45 +3844,45 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3844 * In case we exclude kernel IPs or are somehow not in interrupt 3844 * In case we exclude kernel IPs or are somehow not in interrupt
3845 * context, provide the next best thing, the user IP. 3845 * context, provide the next best thing, the user IP.
3846 */ 3846 */
3847 if ((counter->attr.exclude_kernel || !regs) && 3847 if ((event->attr.exclude_kernel || !regs) &&
3848 !counter->attr.exclude_user) 3848 !event->attr.exclude_user)
3849 regs = task_pt_regs(current); 3849 regs = task_pt_regs(current);
3850 3850
3851 if (regs) { 3851 if (regs) {
3852 if (perf_counter_overflow(counter, 0, &data, regs)) 3852 if (perf_event_overflow(event, 0, &data, regs))
3853 ret = HRTIMER_NORESTART; 3853 ret = HRTIMER_NORESTART;
3854 } 3854 }
3855 3855
3856 period = max_t(u64, 10000, counter->hw.sample_period); 3856 period = max_t(u64, 10000, event->hw.sample_period);
3857 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 3857 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3858 3858
3859 return ret; 3859 return ret;
3860} 3860}
3861 3861
3862/* 3862/*
3863 * Software counter: cpu wall time clock 3863 * Software event: cpu wall time clock
3864 */ 3864 */
3865 3865
3866static void cpu_clock_perf_counter_update(struct perf_counter *counter) 3866static void cpu_clock_perf_event_update(struct perf_event *event)
3867{ 3867{
3868 int cpu = raw_smp_processor_id(); 3868 int cpu = raw_smp_processor_id();
3869 s64 prev; 3869 s64 prev;
3870 u64 now; 3870 u64 now;
3871 3871
3872 now = cpu_clock(cpu); 3872 now = cpu_clock(cpu);
3873 prev = atomic64_read(&counter->hw.prev_count); 3873 prev = atomic64_read(&event->hw.prev_count);
3874 atomic64_set(&counter->hw.prev_count, now); 3874 atomic64_set(&event->hw.prev_count, now);
3875 atomic64_add(now - prev, &counter->count); 3875 atomic64_add(now - prev, &event->count);
3876} 3876}
3877 3877
3878static int cpu_clock_perf_counter_enable(struct perf_counter *counter) 3878static int cpu_clock_perf_event_enable(struct perf_event *event)
3879{ 3879{
3880 struct hw_perf_counter *hwc = &counter->hw; 3880 struct hw_perf_event *hwc = &event->hw;
3881 int cpu = raw_smp_processor_id(); 3881 int cpu = raw_smp_processor_id();
3882 3882
3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3885 hwc->hrtimer.function = perf_swcounter_hrtimer; 3885 hwc->hrtimer.function = perf_swevent_hrtimer;
3886 if (hwc->sample_period) { 3886 if (hwc->sample_period) {
3887 u64 period = max_t(u64, 10000, hwc->sample_period); 3887 u64 period = max_t(u64, 10000, hwc->sample_period);
3888 __hrtimer_start_range_ns(&hwc->hrtimer, 3888 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3893,48 +3893,48 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3893 return 0; 3893 return 0;
3894} 3894}
3895 3895
3896static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 3896static void cpu_clock_perf_event_disable(struct perf_event *event)
3897{ 3897{
3898 if (counter->hw.sample_period) 3898 if (event->hw.sample_period)
3899 hrtimer_cancel(&counter->hw.hrtimer); 3899 hrtimer_cancel(&event->hw.hrtimer);
3900 cpu_clock_perf_counter_update(counter); 3900 cpu_clock_perf_event_update(event);
3901} 3901}
3902 3902
3903static void cpu_clock_perf_counter_read(struct perf_counter *counter) 3903static void cpu_clock_perf_event_read(struct perf_event *event)
3904{ 3904{
3905 cpu_clock_perf_counter_update(counter); 3905 cpu_clock_perf_event_update(event);
3906} 3906}
3907 3907
3908static const struct pmu perf_ops_cpu_clock = { 3908static const struct pmu perf_ops_cpu_clock = {
3909 .enable = cpu_clock_perf_counter_enable, 3909 .enable = cpu_clock_perf_event_enable,
3910 .disable = cpu_clock_perf_counter_disable, 3910 .disable = cpu_clock_perf_event_disable,
3911 .read = cpu_clock_perf_counter_read, 3911 .read = cpu_clock_perf_event_read,
3912}; 3912};
3913 3913
3914/* 3914/*
3915 * Software counter: task time clock 3915 * Software event: task time clock
3916 */ 3916 */
3917 3917
3918static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) 3918static void task_clock_perf_event_update(struct perf_event *event, u64 now)
3919{ 3919{
3920 u64 prev; 3920 u64 prev;
3921 s64 delta; 3921 s64 delta;
3922 3922
3923 prev = atomic64_xchg(&counter->hw.prev_count, now); 3923 prev = atomic64_xchg(&event->hw.prev_count, now);
3924 delta = now - prev; 3924 delta = now - prev;
3925 atomic64_add(delta, &counter->count); 3925 atomic64_add(delta, &event->count);
3926} 3926}
3927 3927
3928static int task_clock_perf_counter_enable(struct perf_counter *counter) 3928static int task_clock_perf_event_enable(struct perf_event *event)
3929{ 3929{
3930 struct hw_perf_counter *hwc = &counter->hw; 3930 struct hw_perf_event *hwc = &event->hw;
3931 u64 now; 3931 u64 now;
3932 3932
3933 now = counter->ctx->time; 3933 now = event->ctx->time;
3934 3934
3935 atomic64_set(&hwc->prev_count, now); 3935 atomic64_set(&hwc->prev_count, now);
3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3937 hwc->hrtimer.function = perf_swcounter_hrtimer; 3937 hwc->hrtimer.function = perf_swevent_hrtimer;
3938 if (hwc->sample_period) { 3938 if (hwc->sample_period) {
3939 u64 period = max_t(u64, 10000, hwc->sample_period); 3939 u64 period = max_t(u64, 10000, hwc->sample_period);
3940 __hrtimer_start_range_ns(&hwc->hrtimer, 3940 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3945,38 +3945,38 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
3945 return 0; 3945 return 0;
3946} 3946}
3947 3947
3948static void task_clock_perf_counter_disable(struct perf_counter *counter) 3948static void task_clock_perf_event_disable(struct perf_event *event)
3949{ 3949{
3950 if (counter->hw.sample_period) 3950 if (event->hw.sample_period)
3951 hrtimer_cancel(&counter->hw.hrtimer); 3951 hrtimer_cancel(&event->hw.hrtimer);
3952 task_clock_perf_counter_update(counter, counter->ctx->time); 3952 task_clock_perf_event_update(event, event->ctx->time);
3953 3953
3954} 3954}
3955 3955
3956static void task_clock_perf_counter_read(struct perf_counter *counter) 3956static void task_clock_perf_event_read(struct perf_event *event)
3957{ 3957{
3958 u64 time; 3958 u64 time;
3959 3959
3960 if (!in_nmi()) { 3960 if (!in_nmi()) {
3961 update_context_time(counter->ctx); 3961 update_context_time(event->ctx);
3962 time = counter->ctx->time; 3962 time = event->ctx->time;
3963 } else { 3963 } else {
3964 u64 now = perf_clock(); 3964 u64 now = perf_clock();
3965 u64 delta = now - counter->ctx->timestamp; 3965 u64 delta = now - event->ctx->timestamp;
3966 time = counter->ctx->time + delta; 3966 time = event->ctx->time + delta;
3967 } 3967 }
3968 3968
3969 task_clock_perf_counter_update(counter, time); 3969 task_clock_perf_event_update(event, time);
3970} 3970}
3971 3971
3972static const struct pmu perf_ops_task_clock = { 3972static const struct pmu perf_ops_task_clock = {
3973 .enable = task_clock_perf_counter_enable, 3973 .enable = task_clock_perf_event_enable,
3974 .disable = task_clock_perf_counter_disable, 3974 .disable = task_clock_perf_event_disable,
3975 .read = task_clock_perf_counter_read, 3975 .read = task_clock_perf_event_read,
3976}; 3976};
3977 3977
3978#ifdef CONFIG_EVENT_PROFILE 3978#ifdef CONFIG_EVENT_PROFILE
3979void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, 3979void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3980 int entry_size) 3980 int entry_size)
3981{ 3981{
3982 struct perf_raw_record raw = { 3982 struct perf_raw_record raw = {
@@ -3994,62 +3994,62 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3994 if (!regs) 3994 if (!regs)
3995 regs = task_pt_regs(current); 3995 regs = task_pt_regs(current);
3996 3996
3997 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 3997 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3998 &data, regs); 3998 &data, regs);
3999} 3999}
4000EXPORT_SYMBOL_GPL(perf_tpcounter_event); 4000EXPORT_SYMBOL_GPL(perf_tp_event);
4001 4001
4002extern int ftrace_profile_enable(int); 4002extern int ftrace_profile_enable(int);
4003extern void ftrace_profile_disable(int); 4003extern void ftrace_profile_disable(int);
4004 4004
4005static void tp_perf_counter_destroy(struct perf_counter *counter) 4005static void tp_perf_event_destroy(struct perf_event *event)
4006{ 4006{
4007 ftrace_profile_disable(counter->attr.config); 4007 ftrace_profile_disable(event->attr.config);
4008} 4008}
4009 4009
4010static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4010static const struct pmu *tp_perf_event_init(struct perf_event *event)
4011{ 4011{
4012 /* 4012 /*
4013 * Raw tracepoint data is a severe data leak, only allow root to 4013 * Raw tracepoint data is a severe data leak, only allow root to
4014 * have these. 4014 * have these.
4015 */ 4015 */
4016 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && 4016 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4017 perf_paranoid_tracepoint_raw() && 4017 perf_paranoid_tracepoint_raw() &&
4018 !capable(CAP_SYS_ADMIN)) 4018 !capable(CAP_SYS_ADMIN))
4019 return ERR_PTR(-EPERM); 4019 return ERR_PTR(-EPERM);
4020 4020
4021 if (ftrace_profile_enable(counter->attr.config)) 4021 if (ftrace_profile_enable(event->attr.config))
4022 return NULL; 4022 return NULL;
4023 4023
4024 counter->destroy = tp_perf_counter_destroy; 4024 event->destroy = tp_perf_event_destroy;
4025 4025
4026 return &perf_ops_generic; 4026 return &perf_ops_generic;
4027} 4027}
4028#else 4028#else
4029static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4029static const struct pmu *tp_perf_event_init(struct perf_event *event)
4030{ 4030{
4031 return NULL; 4031 return NULL;
4032} 4032}
4033#endif 4033#endif
4034 4034
4035atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 4035atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4036 4036
4037static void sw_perf_counter_destroy(struct perf_counter *counter) 4037static void sw_perf_event_destroy(struct perf_event *event)
4038{ 4038{
4039 u64 event_id = counter->attr.config; 4039 u64 event_id = event->attr.config;
4040 4040
4041 WARN_ON(counter->parent); 4041 WARN_ON(event->parent);
4042 4042
4043 atomic_dec(&perf_swcounter_enabled[event_id]); 4043 atomic_dec(&perf_swevent_enabled[event_id]);
4044} 4044}
4045 4045
4046static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) 4046static const struct pmu *sw_perf_event_init(struct perf_event *event)
4047{ 4047{
4048 const struct pmu *pmu = NULL; 4048 const struct pmu *pmu = NULL;
4049 u64 event_id = counter->attr.config; 4049 u64 event_id = event->attr.config;
4050 4050
4051 /* 4051 /*
4052 * Software counters (currently) can't in general distinguish 4052 * Software events (currently) can't in general distinguish
4053 * between user, kernel and hypervisor events. 4053 * between user, kernel and hypervisor events.
4054 * However, context switches and cpu migrations are considered 4054 * However, context switches and cpu migrations are considered
4055 * to be kernel events, and page faults are never hypervisor 4055 * to be kernel events, and page faults are never hypervisor
@@ -4062,10 +4062,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4062 break; 4062 break;
4063 case PERF_COUNT_SW_TASK_CLOCK: 4063 case PERF_COUNT_SW_TASK_CLOCK:
4064 /* 4064 /*
4065 * If the user instantiates this as a per-cpu counter, 4065 * If the user instantiates this as a per-cpu event,
4066 * use the cpu_clock counter instead. 4066 * use the cpu_clock event instead.
4067 */ 4067 */
4068 if (counter->ctx->task) 4068 if (event->ctx->task)
4069 pmu = &perf_ops_task_clock; 4069 pmu = &perf_ops_task_clock;
4070 else 4070 else
4071 pmu = &perf_ops_cpu_clock; 4071 pmu = &perf_ops_cpu_clock;
@@ -4076,9 +4076,9 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4076 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4076 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4077 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4077 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4078 case PERF_COUNT_SW_CPU_MIGRATIONS: 4078 case PERF_COUNT_SW_CPU_MIGRATIONS:
4079 if (!counter->parent) { 4079 if (!event->parent) {
4080 atomic_inc(&perf_swcounter_enabled[event_id]); 4080 atomic_inc(&perf_swevent_enabled[event_id]);
4081 counter->destroy = sw_perf_counter_destroy; 4081 event->destroy = sw_perf_event_destroy;
4082 } 4082 }
4083 pmu = &perf_ops_generic; 4083 pmu = &perf_ops_generic;
4084 break; 4084 break;
@@ -4088,62 +4088,62 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4088} 4088}
4089 4089
4090/* 4090/*
4091 * Allocate and initialize a counter structure 4091 * Allocate and initialize a event structure
4092 */ 4092 */
4093static struct perf_counter * 4093static struct perf_event *
4094perf_counter_alloc(struct perf_counter_attr *attr, 4094perf_event_alloc(struct perf_event_attr *attr,
4095 int cpu, 4095 int cpu,
4096 struct perf_counter_context *ctx, 4096 struct perf_event_context *ctx,
4097 struct perf_counter *group_leader, 4097 struct perf_event *group_leader,
4098 struct perf_counter *parent_counter, 4098 struct perf_event *parent_event,
4099 gfp_t gfpflags) 4099 gfp_t gfpflags)
4100{ 4100{
4101 const struct pmu *pmu; 4101 const struct pmu *pmu;
4102 struct perf_counter *counter; 4102 struct perf_event *event;
4103 struct hw_perf_counter *hwc; 4103 struct hw_perf_event *hwc;
4104 long err; 4104 long err;
4105 4105
4106 counter = kzalloc(sizeof(*counter), gfpflags); 4106 event = kzalloc(sizeof(*event), gfpflags);
4107 if (!counter) 4107 if (!event)
4108 return ERR_PTR(-ENOMEM); 4108 return ERR_PTR(-ENOMEM);
4109 4109
4110 /* 4110 /*
4111 * Single counters are their own group leaders, with an 4111 * Single events are their own group leaders, with an
4112 * empty sibling list: 4112 * empty sibling list:
4113 */ 4113 */
4114 if (!group_leader) 4114 if (!group_leader)
4115 group_leader = counter; 4115 group_leader = event;
4116 4116
4117 mutex_init(&counter->child_mutex); 4117 mutex_init(&event->child_mutex);
4118 INIT_LIST_HEAD(&counter->child_list); 4118 INIT_LIST_HEAD(&event->child_list);
4119 4119
4120 INIT_LIST_HEAD(&counter->group_entry); 4120 INIT_LIST_HEAD(&event->group_entry);
4121 INIT_LIST_HEAD(&counter->event_entry); 4121 INIT_LIST_HEAD(&event->event_entry);
4122 INIT_LIST_HEAD(&counter->sibling_list); 4122 INIT_LIST_HEAD(&event->sibling_list);
4123 init_waitqueue_head(&counter->waitq); 4123 init_waitqueue_head(&event->waitq);
4124 4124
4125 mutex_init(&counter->mmap_mutex); 4125 mutex_init(&event->mmap_mutex);
4126 4126
4127 counter->cpu = cpu; 4127 event->cpu = cpu;
4128 counter->attr = *attr; 4128 event->attr = *attr;
4129 counter->group_leader = group_leader; 4129 event->group_leader = group_leader;
4130 counter->pmu = NULL; 4130 event->pmu = NULL;
4131 counter->ctx = ctx; 4131 event->ctx = ctx;
4132 counter->oncpu = -1; 4132 event->oncpu = -1;
4133 4133
4134 counter->parent = parent_counter; 4134 event->parent = parent_event;
4135 4135
4136 counter->ns = get_pid_ns(current->nsproxy->pid_ns); 4136 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4137 counter->id = atomic64_inc_return(&perf_counter_id); 4137 event->id = atomic64_inc_return(&perf_event_id);
4138 4138
4139 counter->state = PERF_COUNTER_STATE_INACTIVE; 4139 event->state = PERF_EVENT_STATE_INACTIVE;
4140 4140
4141 if (attr->disabled) 4141 if (attr->disabled)
4142 counter->state = PERF_COUNTER_STATE_OFF; 4142 event->state = PERF_EVENT_STATE_OFF;
4143 4143
4144 pmu = NULL; 4144 pmu = NULL;
4145 4145
4146 hwc = &counter->hw; 4146 hwc = &event->hw;
4147 hwc->sample_period = attr->sample_period; 4147 hwc->sample_period = attr->sample_period;
4148 if (attr->freq && attr->sample_freq) 4148 if (attr->freq && attr->sample_freq)
4149 hwc->sample_period = 1; 4149 hwc->sample_period = 1;
@@ -4152,7 +4152,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4152 atomic64_set(&hwc->period_left, hwc->sample_period); 4152 atomic64_set(&hwc->period_left, hwc->sample_period);
4153 4153
4154 /* 4154 /*
4155 * we currently do not support PERF_FORMAT_GROUP on inherited counters 4155 * we currently do not support PERF_FORMAT_GROUP on inherited events
4156 */ 4156 */
4157 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 4157 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4158 goto done; 4158 goto done;
@@ -4161,15 +4161,15 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4161 case PERF_TYPE_RAW: 4161 case PERF_TYPE_RAW:
4162 case PERF_TYPE_HARDWARE: 4162 case PERF_TYPE_HARDWARE:
4163 case PERF_TYPE_HW_CACHE: 4163 case PERF_TYPE_HW_CACHE:
4164 pmu = hw_perf_counter_init(counter); 4164 pmu = hw_perf_event_init(event);
4165 break; 4165 break;
4166 4166
4167 case PERF_TYPE_SOFTWARE: 4167 case PERF_TYPE_SOFTWARE:
4168 pmu = sw_perf_counter_init(counter); 4168 pmu = sw_perf_event_init(event);
4169 break; 4169 break;
4170 4170
4171 case PERF_TYPE_TRACEPOINT: 4171 case PERF_TYPE_TRACEPOINT:
4172 pmu = tp_perf_counter_init(counter); 4172 pmu = tp_perf_event_init(event);
4173 break; 4173 break;
4174 4174
4175 default: 4175 default:
@@ -4183,29 +4183,29 @@ done:
4183 err = PTR_ERR(pmu); 4183 err = PTR_ERR(pmu);
4184 4184
4185 if (err) { 4185 if (err) {
4186 if (counter->ns) 4186 if (event->ns)
4187 put_pid_ns(counter->ns); 4187 put_pid_ns(event->ns);
4188 kfree(counter); 4188 kfree(event);
4189 return ERR_PTR(err); 4189 return ERR_PTR(err);
4190 } 4190 }
4191 4191
4192 counter->pmu = pmu; 4192 event->pmu = pmu;
4193 4193
4194 if (!counter->parent) { 4194 if (!event->parent) {
4195 atomic_inc(&nr_counters); 4195 atomic_inc(&nr_events);
4196 if (counter->attr.mmap) 4196 if (event->attr.mmap)
4197 atomic_inc(&nr_mmap_counters); 4197 atomic_inc(&nr_mmap_events);
4198 if (counter->attr.comm) 4198 if (event->attr.comm)
4199 atomic_inc(&nr_comm_counters); 4199 atomic_inc(&nr_comm_events);
4200 if (counter->attr.task) 4200 if (event->attr.task)
4201 atomic_inc(&nr_task_counters); 4201 atomic_inc(&nr_task_events);
4202 } 4202 }
4203 4203
4204 return counter; 4204 return event;
4205} 4205}
4206 4206
4207static int perf_copy_attr(struct perf_counter_attr __user *uattr, 4207static int perf_copy_attr(struct perf_event_attr __user *uattr,
4208 struct perf_counter_attr *attr) 4208 struct perf_event_attr *attr)
4209{ 4209{
4210 u32 size; 4210 u32 size;
4211 int ret; 4211 int ret;
@@ -4284,11 +4284,11 @@ err_size:
4284 goto out; 4284 goto out;
4285} 4285}
4286 4286
4287int perf_counter_set_output(struct perf_counter *counter, int output_fd) 4287int perf_event_set_output(struct perf_event *event, int output_fd)
4288{ 4288{
4289 struct perf_counter *output_counter = NULL; 4289 struct perf_event *output_event = NULL;
4290 struct file *output_file = NULL; 4290 struct file *output_file = NULL;
4291 struct perf_counter *old_output; 4291 struct perf_event *old_output;
4292 int fput_needed = 0; 4292 int fput_needed = 0;
4293 int ret = -EINVAL; 4293 int ret = -EINVAL;
4294 4294
@@ -4302,28 +4302,28 @@ int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4302 if (output_file->f_op != &perf_fops) 4302 if (output_file->f_op != &perf_fops)
4303 goto out; 4303 goto out;
4304 4304
4305 output_counter = output_file->private_data; 4305 output_event = output_file->private_data;
4306 4306
4307 /* Don't chain output fds */ 4307 /* Don't chain output fds */
4308 if (output_counter->output) 4308 if (output_event->output)
4309 goto out; 4309 goto out;
4310 4310
4311 /* Don't set an output fd when we already have an output channel */ 4311 /* Don't set an output fd when we already have an output channel */
4312 if (counter->data) 4312 if (event->data)
4313 goto out; 4313 goto out;
4314 4314
4315 atomic_long_inc(&output_file->f_count); 4315 atomic_long_inc(&output_file->f_count);
4316 4316
4317set: 4317set:
4318 mutex_lock(&counter->mmap_mutex); 4318 mutex_lock(&event->mmap_mutex);
4319 old_output = counter->output; 4319 old_output = event->output;
4320 rcu_assign_pointer(counter->output, output_counter); 4320 rcu_assign_pointer(event->output, output_event);
4321 mutex_unlock(&counter->mmap_mutex); 4321 mutex_unlock(&event->mmap_mutex);
4322 4322
4323 if (old_output) { 4323 if (old_output) {
4324 /* 4324 /*
4325 * we need to make sure no existing perf_output_*() 4325 * we need to make sure no existing perf_output_*()
4326 * is still referencing this counter. 4326 * is still referencing this event.
4327 */ 4327 */
4328 synchronize_rcu(); 4328 synchronize_rcu();
4329 fput(old_output->filp); 4329 fput(old_output->filp);
@@ -4336,21 +4336,21 @@ out:
4336} 4336}
4337 4337
4338/** 4338/**
4339 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 4339 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4340 * 4340 *
4341 * @attr_uptr: event type attributes for monitoring/sampling 4341 * @attr_uptr: event_id type attributes for monitoring/sampling
4342 * @pid: target pid 4342 * @pid: target pid
4343 * @cpu: target cpu 4343 * @cpu: target cpu
4344 * @group_fd: group leader counter fd 4344 * @group_fd: group leader event fd
4345 */ 4345 */
4346SYSCALL_DEFINE5(perf_counter_open, 4346SYSCALL_DEFINE5(perf_event_open,
4347 struct perf_counter_attr __user *, attr_uptr, 4347 struct perf_event_attr __user *, attr_uptr,
4348 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 4348 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4349{ 4349{
4350 struct perf_counter *counter, *group_leader; 4350 struct perf_event *event, *group_leader;
4351 struct perf_counter_attr attr; 4351 struct perf_event_attr attr;
4352 struct perf_counter_context *ctx; 4352 struct perf_event_context *ctx;
4353 struct file *counter_file = NULL; 4353 struct file *event_file = NULL;
4354 struct file *group_file = NULL; 4354 struct file *group_file = NULL;
4355 int fput_needed = 0; 4355 int fput_needed = 0;
4356 int fput_needed2 = 0; 4356 int fput_needed2 = 0;
@@ -4370,7 +4370,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4370 } 4370 }
4371 4371
4372 if (attr.freq) { 4372 if (attr.freq) {
4373 if (attr.sample_freq > sysctl_perf_counter_sample_rate) 4373 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4374 return -EINVAL; 4374 return -EINVAL;
4375 } 4375 }
4376 4376
@@ -4382,7 +4382,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4382 return PTR_ERR(ctx); 4382 return PTR_ERR(ctx);
4383 4383
4384 /* 4384 /*
4385 * Look up the group leader (we will attach this counter to it): 4385 * Look up the group leader (we will attach this event to it):
4386 */ 4386 */
4387 group_leader = NULL; 4387 group_leader = NULL;
4388 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { 4388 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
@@ -4413,45 +4413,45 @@ SYSCALL_DEFINE5(perf_counter_open,
4413 goto err_put_context; 4413 goto err_put_context;
4414 } 4414 }
4415 4415
4416 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, 4416 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4417 NULL, GFP_KERNEL); 4417 NULL, GFP_KERNEL);
4418 err = PTR_ERR(counter); 4418 err = PTR_ERR(event);
4419 if (IS_ERR(counter)) 4419 if (IS_ERR(event))
4420 goto err_put_context; 4420 goto err_put_context;
4421 4421
4422 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 4422 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
4423 if (err < 0) 4423 if (err < 0)
4424 goto err_free_put_context; 4424 goto err_free_put_context;
4425 4425
4426 counter_file = fget_light(err, &fput_needed2); 4426 event_file = fget_light(err, &fput_needed2);
4427 if (!counter_file) 4427 if (!event_file)
4428 goto err_free_put_context; 4428 goto err_free_put_context;
4429 4429
4430 if (flags & PERF_FLAG_FD_OUTPUT) { 4430 if (flags & PERF_FLAG_FD_OUTPUT) {
4431 err = perf_counter_set_output(counter, group_fd); 4431 err = perf_event_set_output(event, group_fd);
4432 if (err) 4432 if (err)
4433 goto err_fput_free_put_context; 4433 goto err_fput_free_put_context;
4434 } 4434 }
4435 4435
4436 counter->filp = counter_file; 4436 event->filp = event_file;
4437 WARN_ON_ONCE(ctx->parent_ctx); 4437 WARN_ON_ONCE(ctx->parent_ctx);
4438 mutex_lock(&ctx->mutex); 4438 mutex_lock(&ctx->mutex);
4439 perf_install_in_context(ctx, counter, cpu); 4439 perf_install_in_context(ctx, event, cpu);
4440 ++ctx->generation; 4440 ++ctx->generation;
4441 mutex_unlock(&ctx->mutex); 4441 mutex_unlock(&ctx->mutex);
4442 4442
4443 counter->owner = current; 4443 event->owner = current;
4444 get_task_struct(current); 4444 get_task_struct(current);
4445 mutex_lock(&current->perf_counter_mutex); 4445 mutex_lock(&current->perf_event_mutex);
4446 list_add_tail(&counter->owner_entry, &current->perf_counter_list); 4446 list_add_tail(&event->owner_entry, &current->perf_event_list);
4447 mutex_unlock(&current->perf_counter_mutex); 4447 mutex_unlock(&current->perf_event_mutex);
4448 4448
4449err_fput_free_put_context: 4449err_fput_free_put_context:
4450 fput_light(counter_file, fput_needed2); 4450 fput_light(event_file, fput_needed2);
4451 4451
4452err_free_put_context: 4452err_free_put_context:
4453 if (err < 0) 4453 if (err < 0)
4454 kfree(counter); 4454 kfree(event);
4455 4455
4456err_put_context: 4456err_put_context:
4457 if (err < 0) 4457 if (err < 0)
@@ -4463,88 +4463,88 @@ err_put_context:
4463} 4463}
4464 4464
4465/* 4465/*
4466 * inherit a counter from parent task to child task: 4466 * inherit a event from parent task to child task:
4467 */ 4467 */
4468static struct perf_counter * 4468static struct perf_event *
4469inherit_counter(struct perf_counter *parent_counter, 4469inherit_event(struct perf_event *parent_event,
4470 struct task_struct *parent, 4470 struct task_struct *parent,
4471 struct perf_counter_context *parent_ctx, 4471 struct perf_event_context *parent_ctx,
4472 struct task_struct *child, 4472 struct task_struct *child,
4473 struct perf_counter *group_leader, 4473 struct perf_event *group_leader,
4474 struct perf_counter_context *child_ctx) 4474 struct perf_event_context *child_ctx)
4475{ 4475{
4476 struct perf_counter *child_counter; 4476 struct perf_event *child_event;
4477 4477
4478 /* 4478 /*
4479 * Instead of creating recursive hierarchies of counters, 4479 * Instead of creating recursive hierarchies of events,
4480 * we link inherited counters back to the original parent, 4480 * we link inherited events back to the original parent,
4481 * which has a filp for sure, which we use as the reference 4481 * which has a filp for sure, which we use as the reference
4482 * count: 4482 * count:
4483 */ 4483 */
4484 if (parent_counter->parent) 4484 if (parent_event->parent)
4485 parent_counter = parent_counter->parent; 4485 parent_event = parent_event->parent;
4486 4486
4487 child_counter = perf_counter_alloc(&parent_counter->attr, 4487 child_event = perf_event_alloc(&parent_event->attr,
4488 parent_counter->cpu, child_ctx, 4488 parent_event->cpu, child_ctx,
4489 group_leader, parent_counter, 4489 group_leader, parent_event,
4490 GFP_KERNEL); 4490 GFP_KERNEL);
4491 if (IS_ERR(child_counter)) 4491 if (IS_ERR(child_event))
4492 return child_counter; 4492 return child_event;
4493 get_ctx(child_ctx); 4493 get_ctx(child_ctx);
4494 4494
4495 /* 4495 /*
4496 * Make the child state follow the state of the parent counter, 4496 * Make the child state follow the state of the parent event,
4497 * not its attr.disabled bit. We hold the parent's mutex, 4497 * not its attr.disabled bit. We hold the parent's mutex,
4498 * so we won't race with perf_counter_{en, dis}able_family. 4498 * so we won't race with perf_event_{en, dis}able_family.
4499 */ 4499 */
4500 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) 4500 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4501 child_counter->state = PERF_COUNTER_STATE_INACTIVE; 4501 child_event->state = PERF_EVENT_STATE_INACTIVE;
4502 else 4502 else
4503 child_counter->state = PERF_COUNTER_STATE_OFF; 4503 child_event->state = PERF_EVENT_STATE_OFF;
4504 4504
4505 if (parent_counter->attr.freq) 4505 if (parent_event->attr.freq)
4506 child_counter->hw.sample_period = parent_counter->hw.sample_period; 4506 child_event->hw.sample_period = parent_event->hw.sample_period;
4507 4507
4508 /* 4508 /*
4509 * Link it up in the child's context: 4509 * Link it up in the child's context:
4510 */ 4510 */
4511 add_counter_to_ctx(child_counter, child_ctx); 4511 add_event_to_ctx(child_event, child_ctx);
4512 4512
4513 /* 4513 /*
4514 * Get a reference to the parent filp - we will fput it 4514 * Get a reference to the parent filp - we will fput it
4515 * when the child counter exits. This is safe to do because 4515 * when the child event exits. This is safe to do because
4516 * we are in the parent and we know that the filp still 4516 * we are in the parent and we know that the filp still
4517 * exists and has a nonzero count: 4517 * exists and has a nonzero count:
4518 */ 4518 */
4519 atomic_long_inc(&parent_counter->filp->f_count); 4519 atomic_long_inc(&parent_event->filp->f_count);
4520 4520
4521 /* 4521 /*
4522 * Link this into the parent counter's child list 4522 * Link this into the parent event's child list
4523 */ 4523 */
4524 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4524 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4525 mutex_lock(&parent_counter->child_mutex); 4525 mutex_lock(&parent_event->child_mutex);
4526 list_add_tail(&child_counter->child_list, &parent_counter->child_list); 4526 list_add_tail(&child_event->child_list, &parent_event->child_list);
4527 mutex_unlock(&parent_counter->child_mutex); 4527 mutex_unlock(&parent_event->child_mutex);
4528 4528
4529 return child_counter; 4529 return child_event;
4530} 4530}
4531 4531
4532static int inherit_group(struct perf_counter *parent_counter, 4532static int inherit_group(struct perf_event *parent_event,
4533 struct task_struct *parent, 4533 struct task_struct *parent,
4534 struct perf_counter_context *parent_ctx, 4534 struct perf_event_context *parent_ctx,
4535 struct task_struct *child, 4535 struct task_struct *child,
4536 struct perf_counter_context *child_ctx) 4536 struct perf_event_context *child_ctx)
4537{ 4537{
4538 struct perf_counter *leader; 4538 struct perf_event *leader;
4539 struct perf_counter *sub; 4539 struct perf_event *sub;
4540 struct perf_counter *child_ctr; 4540 struct perf_event *child_ctr;
4541 4541
4542 leader = inherit_counter(parent_counter, parent, parent_ctx, 4542 leader = inherit_event(parent_event, parent, parent_ctx,
4543 child, NULL, child_ctx); 4543 child, NULL, child_ctx);
4544 if (IS_ERR(leader)) 4544 if (IS_ERR(leader))
4545 return PTR_ERR(leader); 4545 return PTR_ERR(leader);
4546 list_for_each_entry(sub, &parent_counter->sibling_list, group_entry) { 4546 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4547 child_ctr = inherit_counter(sub, parent, parent_ctx, 4547 child_ctr = inherit_event(sub, parent, parent_ctx,
4548 child, leader, child_ctx); 4548 child, leader, child_ctx);
4549 if (IS_ERR(child_ctr)) 4549 if (IS_ERR(child_ctr))
4550 return PTR_ERR(child_ctr); 4550 return PTR_ERR(child_ctr);
@@ -4552,74 +4552,74 @@ static int inherit_group(struct perf_counter *parent_counter,
4552 return 0; 4552 return 0;
4553} 4553}
4554 4554
4555static void sync_child_counter(struct perf_counter *child_counter, 4555static void sync_child_event(struct perf_event *child_event,
4556 struct task_struct *child) 4556 struct task_struct *child)
4557{ 4557{
4558 struct perf_counter *parent_counter = child_counter->parent; 4558 struct perf_event *parent_event = child_event->parent;
4559 u64 child_val; 4559 u64 child_val;
4560 4560
4561 if (child_counter->attr.inherit_stat) 4561 if (child_event->attr.inherit_stat)
4562 perf_counter_read_event(child_counter, child); 4562 perf_event_read_event(child_event, child);
4563 4563
4564 child_val = atomic64_read(&child_counter->count); 4564 child_val = atomic64_read(&child_event->count);
4565 4565
4566 /* 4566 /*
4567 * Add back the child's count to the parent's count: 4567 * Add back the child's count to the parent's count:
4568 */ 4568 */
4569 atomic64_add(child_val, &parent_counter->count); 4569 atomic64_add(child_val, &parent_event->count);
4570 atomic64_add(child_counter->total_time_enabled, 4570 atomic64_add(child_event->total_time_enabled,
4571 &parent_counter->child_total_time_enabled); 4571 &parent_event->child_total_time_enabled);
4572 atomic64_add(child_counter->total_time_running, 4572 atomic64_add(child_event->total_time_running,
4573 &parent_counter->child_total_time_running); 4573 &parent_event->child_total_time_running);
4574 4574
4575 /* 4575 /*
4576 * Remove this counter from the parent's list 4576 * Remove this event from the parent's list
4577 */ 4577 */
4578 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4578 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4579 mutex_lock(&parent_counter->child_mutex); 4579 mutex_lock(&parent_event->child_mutex);
4580 list_del_init(&child_counter->child_list); 4580 list_del_init(&child_event->child_list);
4581 mutex_unlock(&parent_counter->child_mutex); 4581 mutex_unlock(&parent_event->child_mutex);
4582 4582
4583 /* 4583 /*
4584 * Release the parent counter, if this was the last 4584 * Release the parent event, if this was the last
4585 * reference to it. 4585 * reference to it.
4586 */ 4586 */
4587 fput(parent_counter->filp); 4587 fput(parent_event->filp);
4588} 4588}
4589 4589
4590static void 4590static void
4591__perf_counter_exit_task(struct perf_counter *child_counter, 4591__perf_event_exit_task(struct perf_event *child_event,
4592 struct perf_counter_context *child_ctx, 4592 struct perf_event_context *child_ctx,
4593 struct task_struct *child) 4593 struct task_struct *child)
4594{ 4594{
4595 struct perf_counter *parent_counter; 4595 struct perf_event *parent_event;
4596 4596
4597 update_counter_times(child_counter); 4597 update_event_times(child_event);
4598 perf_counter_remove_from_context(child_counter); 4598 perf_event_remove_from_context(child_event);
4599 4599
4600 parent_counter = child_counter->parent; 4600 parent_event = child_event->parent;
4601 /* 4601 /*
4602 * It can happen that parent exits first, and has counters 4602 * It can happen that parent exits first, and has events
4603 * that are still around due to the child reference. These 4603 * that are still around due to the child reference. These
4604 * counters need to be zapped - but otherwise linger. 4604 * events need to be zapped - but otherwise linger.
4605 */ 4605 */
4606 if (parent_counter) { 4606 if (parent_event) {
4607 sync_child_counter(child_counter, child); 4607 sync_child_event(child_event, child);
4608 free_counter(child_counter); 4608 free_event(child_event);
4609 } 4609 }
4610} 4610}
4611 4611
4612/* 4612/*
4613 * When a child task exits, feed back counter values to parent counters. 4613 * When a child task exits, feed back event values to parent events.
4614 */ 4614 */
4615void perf_counter_exit_task(struct task_struct *child) 4615void perf_event_exit_task(struct task_struct *child)
4616{ 4616{
4617 struct perf_counter *child_counter, *tmp; 4617 struct perf_event *child_event, *tmp;
4618 struct perf_counter_context *child_ctx; 4618 struct perf_event_context *child_ctx;
4619 unsigned long flags; 4619 unsigned long flags;
4620 4620
4621 if (likely(!child->perf_counter_ctxp)) { 4621 if (likely(!child->perf_event_ctxp)) {
4622 perf_counter_task(child, NULL, 0); 4622 perf_event_task(child, NULL, 0);
4623 return; 4623 return;
4624 } 4624 }
4625 4625
@@ -4630,37 +4630,37 @@ void perf_counter_exit_task(struct task_struct *child)
4630 * scheduled, so we are now safe from rescheduling changing 4630 * scheduled, so we are now safe from rescheduling changing
4631 * our context. 4631 * our context.
4632 */ 4632 */
4633 child_ctx = child->perf_counter_ctxp; 4633 child_ctx = child->perf_event_ctxp;
4634 __perf_counter_task_sched_out(child_ctx); 4634 __perf_event_task_sched_out(child_ctx);
4635 4635
4636 /* 4636 /*
4637 * Take the context lock here so that if find_get_context is 4637 * Take the context lock here so that if find_get_context is
4638 * reading child->perf_counter_ctxp, we wait until it has 4638 * reading child->perf_event_ctxp, we wait until it has
4639 * incremented the context's refcount before we do put_ctx below. 4639 * incremented the context's refcount before we do put_ctx below.
4640 */ 4640 */
4641 spin_lock(&child_ctx->lock); 4641 spin_lock(&child_ctx->lock);
4642 child->perf_counter_ctxp = NULL; 4642 child->perf_event_ctxp = NULL;
4643 /* 4643 /*
4644 * If this context is a clone; unclone it so it can't get 4644 * If this context is a clone; unclone it so it can't get
4645 * swapped to another process while we're removing all 4645 * swapped to another process while we're removing all
4646 * the counters from it. 4646 * the events from it.
4647 */ 4647 */
4648 unclone_ctx(child_ctx); 4648 unclone_ctx(child_ctx);
4649 spin_unlock_irqrestore(&child_ctx->lock, flags); 4649 spin_unlock_irqrestore(&child_ctx->lock, flags);
4650 4650
4651 /* 4651 /*
4652 * Report the task dead after unscheduling the counters so that we 4652 * Report the task dead after unscheduling the events so that we
4653 * won't get any samples after PERF_EVENT_EXIT. We can however still 4653 * won't get any samples after PERF_RECORD_EXIT. We can however still
4654 * get a few PERF_EVENT_READ events. 4654 * get a few PERF_RECORD_READ events.
4655 */ 4655 */
4656 perf_counter_task(child, child_ctx, 0); 4656 perf_event_task(child, child_ctx, 0);
4657 4657
4658 /* 4658 /*
4659 * We can recurse on the same lock type through: 4659 * We can recurse on the same lock type through:
4660 * 4660 *
4661 * __perf_counter_exit_task() 4661 * __perf_event_exit_task()
4662 * sync_child_counter() 4662 * sync_child_event()
4663 * fput(parent_counter->filp) 4663 * fput(parent_event->filp)
4664 * perf_release() 4664 * perf_release()
4665 * mutex_lock(&ctx->mutex) 4665 * mutex_lock(&ctx->mutex)
4666 * 4666 *
@@ -4669,12 +4669,12 @@ void perf_counter_exit_task(struct task_struct *child)
4669 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 4669 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4670 4670
4671again: 4671again:
4672 list_for_each_entry_safe(child_counter, tmp, &child_ctx->group_list, 4672 list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
4673 group_entry) 4673 group_entry)
4674 __perf_counter_exit_task(child_counter, child_ctx, child); 4674 __perf_event_exit_task(child_event, child_ctx, child);
4675 4675
4676 /* 4676 /*
4677 * If the last counter was a group counter, it will have appended all 4677 * If the last event was a group event, it will have appended all
4678 * its siblings to the list, but we obtained 'tmp' before that which 4678 * its siblings to the list, but we obtained 'tmp' before that which
4679 * will still point to the list head terminating the iteration. 4679 * will still point to the list head terminating the iteration.
4680 */ 4680 */
@@ -4690,30 +4690,30 @@ again:
4690 * free an unexposed, unused context as created by inheritance by 4690 * free an unexposed, unused context as created by inheritance by
4691 * init_task below, used by fork() in case of fail. 4691 * init_task below, used by fork() in case of fail.
4692 */ 4692 */
4693void perf_counter_free_task(struct task_struct *task) 4693void perf_event_free_task(struct task_struct *task)
4694{ 4694{
4695 struct perf_counter_context *ctx = task->perf_counter_ctxp; 4695 struct perf_event_context *ctx = task->perf_event_ctxp;
4696 struct perf_counter *counter, *tmp; 4696 struct perf_event *event, *tmp;
4697 4697
4698 if (!ctx) 4698 if (!ctx)
4699 return; 4699 return;
4700 4700
4701 mutex_lock(&ctx->mutex); 4701 mutex_lock(&ctx->mutex);
4702again: 4702again:
4703 list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) { 4703 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
4704 struct perf_counter *parent = counter->parent; 4704 struct perf_event *parent = event->parent;
4705 4705
4706 if (WARN_ON_ONCE(!parent)) 4706 if (WARN_ON_ONCE(!parent))
4707 continue; 4707 continue;
4708 4708
4709 mutex_lock(&parent->child_mutex); 4709 mutex_lock(&parent->child_mutex);
4710 list_del_init(&counter->child_list); 4710 list_del_init(&event->child_list);
4711 mutex_unlock(&parent->child_mutex); 4711 mutex_unlock(&parent->child_mutex);
4712 4712
4713 fput(parent->filp); 4713 fput(parent->filp);
4714 4714
4715 list_del_counter(counter, ctx); 4715 list_del_event(event, ctx);
4716 free_counter(counter); 4716 free_event(event);
4717 } 4717 }
4718 4718
4719 if (!list_empty(&ctx->group_list)) 4719 if (!list_empty(&ctx->group_list))
@@ -4725,37 +4725,37 @@ again:
4725} 4725}
4726 4726
4727/* 4727/*
4728 * Initialize the perf_counter context in task_struct 4728 * Initialize the perf_event context in task_struct
4729 */ 4729 */
4730int perf_counter_init_task(struct task_struct *child) 4730int perf_event_init_task(struct task_struct *child)
4731{ 4731{
4732 struct perf_counter_context *child_ctx, *parent_ctx; 4732 struct perf_event_context *child_ctx, *parent_ctx;
4733 struct perf_counter_context *cloned_ctx; 4733 struct perf_event_context *cloned_ctx;
4734 struct perf_counter *counter; 4734 struct perf_event *event;
4735 struct task_struct *parent = current; 4735 struct task_struct *parent = current;
4736 int inherited_all = 1; 4736 int inherited_all = 1;
4737 int ret = 0; 4737 int ret = 0;
4738 4738
4739 child->perf_counter_ctxp = NULL; 4739 child->perf_event_ctxp = NULL;
4740 4740
4741 mutex_init(&child->perf_counter_mutex); 4741 mutex_init(&child->perf_event_mutex);
4742 INIT_LIST_HEAD(&child->perf_counter_list); 4742 INIT_LIST_HEAD(&child->perf_event_list);
4743 4743
4744 if (likely(!parent->perf_counter_ctxp)) 4744 if (likely(!parent->perf_event_ctxp))
4745 return 0; 4745 return 0;
4746 4746
4747 /* 4747 /*
4748 * This is executed from the parent task context, so inherit 4748 * This is executed from the parent task context, so inherit
4749 * counters that have been marked for cloning. 4749 * events that have been marked for cloning.
4750 * First allocate and initialize a context for the child. 4750 * First allocate and initialize a context for the child.
4751 */ 4751 */
4752 4752
4753 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 4753 child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4754 if (!child_ctx) 4754 if (!child_ctx)
4755 return -ENOMEM; 4755 return -ENOMEM;
4756 4756
4757 __perf_counter_init_context(child_ctx, child); 4757 __perf_event_init_context(child_ctx, child);
4758 child->perf_counter_ctxp = child_ctx; 4758 child->perf_event_ctxp = child_ctx;
4759 get_task_struct(child); 4759 get_task_struct(child);
4760 4760
4761 /* 4761 /*
@@ -4781,16 +4781,16 @@ int perf_counter_init_task(struct task_struct *child)
4781 * We dont have to disable NMIs - we are only looking at 4781 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it: 4782 * the list, not manipulating it:
4783 */ 4783 */
4784 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { 4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
4785 if (counter != counter->group_leader) 4785 if (event != event->group_leader)
4786 continue; 4786 continue;
4787 4787
4788 if (!counter->attr.inherit) { 4788 if (!event->attr.inherit) {
4789 inherited_all = 0; 4789 inherited_all = 0;
4790 continue; 4790 continue;
4791 } 4791 }
4792 4792
4793 ret = inherit_group(counter, parent, parent_ctx, 4793 ret = inherit_group(event, parent, parent_ctx,
4794 child, child_ctx); 4794 child, child_ctx);
4795 if (ret) { 4795 if (ret) {
4796 inherited_all = 0; 4796 inherited_all = 0;
@@ -4804,7 +4804,7 @@ int perf_counter_init_task(struct task_struct *child)
4804 * context, or of whatever the parent is a clone of. 4804 * context, or of whatever the parent is a clone of.
4805 * Note that if the parent is a clone, it could get 4805 * Note that if the parent is a clone, it could get
4806 * uncloned at any point, but that doesn't matter 4806 * uncloned at any point, but that doesn't matter
4807 * because the list of counters and the generation 4807 * because the list of events and the generation
4808 * count can't have changed since we took the mutex. 4808 * count can't have changed since we took the mutex.
4809 */ 4809 */
4810 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); 4810 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
@@ -4825,41 +4825,41 @@ int perf_counter_init_task(struct task_struct *child)
4825 return ret; 4825 return ret;
4826} 4826}
4827 4827
4828static void __cpuinit perf_counter_init_cpu(int cpu) 4828static void __cpuinit perf_event_init_cpu(int cpu)
4829{ 4829{
4830 struct perf_cpu_context *cpuctx; 4830 struct perf_cpu_context *cpuctx;
4831 4831
4832 cpuctx = &per_cpu(perf_cpu_context, cpu); 4832 cpuctx = &per_cpu(perf_cpu_context, cpu);
4833 __perf_counter_init_context(&cpuctx->ctx, NULL); 4833 __perf_event_init_context(&cpuctx->ctx, NULL);
4834 4834
4835 spin_lock(&perf_resource_lock); 4835 spin_lock(&perf_resource_lock);
4836 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 4836 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4837 spin_unlock(&perf_resource_lock); 4837 spin_unlock(&perf_resource_lock);
4838 4838
4839 hw_perf_counter_setup(cpu); 4839 hw_perf_event_setup(cpu);
4840} 4840}
4841 4841
4842#ifdef CONFIG_HOTPLUG_CPU 4842#ifdef CONFIG_HOTPLUG_CPU
4843static void __perf_counter_exit_cpu(void *info) 4843static void __perf_event_exit_cpu(void *info)
4844{ 4844{
4845 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 4845 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4846 struct perf_counter_context *ctx = &cpuctx->ctx; 4846 struct perf_event_context *ctx = &cpuctx->ctx;
4847 struct perf_counter *counter, *tmp; 4847 struct perf_event *event, *tmp;
4848 4848
4849 list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) 4849 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
4850 __perf_counter_remove_from_context(counter); 4850 __perf_event_remove_from_context(event);
4851} 4851}
4852static void perf_counter_exit_cpu(int cpu) 4852static void perf_event_exit_cpu(int cpu)
4853{ 4853{
4854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 4854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4855 struct perf_counter_context *ctx = &cpuctx->ctx; 4855 struct perf_event_context *ctx = &cpuctx->ctx;
4856 4856
4857 mutex_lock(&ctx->mutex); 4857 mutex_lock(&ctx->mutex);
4858 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); 4858 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
4859 mutex_unlock(&ctx->mutex); 4859 mutex_unlock(&ctx->mutex);
4860} 4860}
4861#else 4861#else
4862static inline void perf_counter_exit_cpu(int cpu) { } 4862static inline void perf_event_exit_cpu(int cpu) { }
4863#endif 4863#endif
4864 4864
4865static int __cpuinit 4865static int __cpuinit
@@ -4871,17 +4871,17 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4871 4871
4872 case CPU_UP_PREPARE: 4872 case CPU_UP_PREPARE:
4873 case CPU_UP_PREPARE_FROZEN: 4873 case CPU_UP_PREPARE_FROZEN:
4874 perf_counter_init_cpu(cpu); 4874 perf_event_init_cpu(cpu);
4875 break; 4875 break;
4876 4876
4877 case CPU_ONLINE: 4877 case CPU_ONLINE:
4878 case CPU_ONLINE_FROZEN: 4878 case CPU_ONLINE_FROZEN:
4879 hw_perf_counter_setup_online(cpu); 4879 hw_perf_event_setup_online(cpu);
4880 break; 4880 break;
4881 4881
4882 case CPU_DOWN_PREPARE: 4882 case CPU_DOWN_PREPARE:
4883 case CPU_DOWN_PREPARE_FROZEN: 4883 case CPU_DOWN_PREPARE_FROZEN:
4884 perf_counter_exit_cpu(cpu); 4884 perf_event_exit_cpu(cpu);
4885 break; 4885 break;
4886 4886
4887 default: 4887 default:
@@ -4899,7 +4899,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
4899 .priority = 20, 4899 .priority = 20,
4900}; 4900};
4901 4901
4902void __init perf_counter_init(void) 4902void __init perf_event_init(void)
4903{ 4903{
4904 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4904 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4905 (void *)(long)smp_processor_id()); 4905 (void *)(long)smp_processor_id());
@@ -4925,7 +4925,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4925 err = strict_strtoul(buf, 10, &val); 4925 err = strict_strtoul(buf, 10, &val);
4926 if (err) 4926 if (err)
4927 return err; 4927 return err;
4928 if (val > perf_max_counters) 4928 if (val > perf_max_events)
4929 return -EINVAL; 4929 return -EINVAL;
4930 4930
4931 spin_lock(&perf_resource_lock); 4931 spin_lock(&perf_resource_lock);
@@ -4933,8 +4933,8 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4933 for_each_online_cpu(cpu) { 4933 for_each_online_cpu(cpu) {
4934 cpuctx = &per_cpu(perf_cpu_context, cpu); 4934 cpuctx = &per_cpu(perf_cpu_context, cpu);
4935 spin_lock_irq(&cpuctx->ctx.lock); 4935 spin_lock_irq(&cpuctx->ctx.lock);
4936 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, 4936 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
4937 perf_max_counters - perf_reserved_percpu); 4937 perf_max_events - perf_reserved_percpu);
4938 cpuctx->max_pertask = mpt; 4938 cpuctx->max_pertask = mpt;
4939 spin_unlock_irq(&cpuctx->ctx.lock); 4939 spin_unlock_irq(&cpuctx->ctx.lock);
4940 } 4940 }
@@ -4989,12 +4989,12 @@ static struct attribute *perfclass_attrs[] = {
4989 4989
4990static struct attribute_group perfclass_attr_group = { 4990static struct attribute_group perfclass_attr_group = {
4991 .attrs = perfclass_attrs, 4991 .attrs = perfclass_attrs,
4992 .name = "perf_counters", 4992 .name = "perf_events",
4993}; 4993};
4994 4994
4995static int __init perf_counter_sysfs_init(void) 4995static int __init perf_event_sysfs_init(void)
4996{ 4996{
4997 return sysfs_create_group(&cpu_sysdev_class.kset.kobj, 4997 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4998 &perfclass_attr_group); 4998 &perfclass_attr_group);
4999} 4999}
5000device_initcall(perf_counter_sysfs_init); 5000device_initcall(perf_event_sysfs_init);
diff --git a/kernel/sched.c b/kernel/sched.c
index faf4d463bbff..291c8d213d13 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -2059,7 +2059,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2059 if (task_hot(p, old_rq->clock, NULL)) 2059 if (task_hot(p, old_rq->clock, NULL))
2060 schedstat_inc(p, se.nr_forced2_migrations); 2060 schedstat_inc(p, se.nr_forced2_migrations);
2061#endif 2061#endif
2062 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2062 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2063 1, 1, NULL, 0); 2063 1, 1, NULL, 0);
2064 } 2064 }
2065 p->se.vruntime -= old_cfsrq->min_vruntime - 2065 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2724,7 +2724,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2724 */ 2724 */
2725 prev_state = prev->state; 2725 prev_state = prev->state;
2726 finish_arch_switch(prev); 2726 finish_arch_switch(prev);
2727 perf_counter_task_sched_in(current, cpu_of(rq)); 2727 perf_event_task_sched_in(current, cpu_of(rq));
2728 finish_lock_switch(rq, prev); 2728 finish_lock_switch(rq, prev);
2729 2729
2730 fire_sched_in_preempt_notifiers(current); 2730 fire_sched_in_preempt_notifiers(current);
@@ -5199,7 +5199,7 @@ void scheduler_tick(void)
5199 curr->sched_class->task_tick(rq, curr, 0); 5199 curr->sched_class->task_tick(rq, curr, 0);
5200 spin_unlock(&rq->lock); 5200 spin_unlock(&rq->lock);
5201 5201
5202 perf_counter_task_tick(curr, cpu); 5202 perf_event_task_tick(curr, cpu);
5203 5203
5204#ifdef CONFIG_SMP 5204#ifdef CONFIG_SMP
5205 rq->idle_at_tick = idle_cpu(cpu); 5205 rq->idle_at_tick = idle_cpu(cpu);
@@ -5415,7 +5415,7 @@ need_resched_nonpreemptible:
5415 5415
5416 if (likely(prev != next)) { 5416 if (likely(prev != next)) {
5417 sched_info_switch(prev, next); 5417 sched_info_switch(prev, next);
5418 perf_counter_task_sched_out(prev, next, cpu); 5418 perf_event_task_sched_out(prev, next, cpu);
5419 5419
5420 rq->nr_switches++; 5420 rq->nr_switches++;
5421 rq->curr = next; 5421 rq->curr = next;
@@ -7692,7 +7692,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7692/* 7692/*
7693 * Register at high priority so that task migration (migrate_all_tasks) 7693 * Register at high priority so that task migration (migrate_all_tasks)
7694 * happens before everything else. This has to be lower priority than 7694 * happens before everything else. This has to be lower priority than
7695 * the notifier in the perf_counter subsystem, though. 7695 * the notifier in the perf_event subsystem, though.
7696 */ 7696 */
7697static struct notifier_block __cpuinitdata migration_notifier = { 7697static struct notifier_block __cpuinitdata migration_notifier = {
7698 .notifier_call = migration_call, 7698 .notifier_call = migration_call,
@@ -9549,7 +9549,7 @@ void __init sched_init(void)
9549 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9549 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9550#endif /* SMP */ 9550#endif /* SMP */
9551 9551
9552 perf_counter_init(); 9552 perf_event_init();
9553 9553
9554 scheduler_running = 1; 9554 scheduler_running = 1;
9555} 9555}
diff --git a/kernel/sys.c b/kernel/sys.c
index b3f1097c76fa..ea5c3bcac881 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,7 +14,7 @@
14#include <linux/prctl.h> 14#include <linux/prctl.h>
15#include <linux/highuid.h> 15#include <linux/highuid.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/resource.h> 18#include <linux/resource.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
@@ -1511,11 +1511,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1511 case PR_SET_TSC: 1511 case PR_SET_TSC:
1512 error = SET_TSC_CTL(arg2); 1512 error = SET_TSC_CTL(arg2);
1513 break; 1513 break;
1514 case PR_TASK_PERF_COUNTERS_DISABLE: 1514 case PR_TASK_PERF_EVENTS_DISABLE:
1515 error = perf_counter_task_disable(); 1515 error = perf_event_task_disable();
1516 break; 1516 break;
1517 case PR_TASK_PERF_COUNTERS_ENABLE: 1517 case PR_TASK_PERF_EVENTS_ENABLE:
1518 error = perf_counter_task_enable(); 1518 error = perf_event_task_enable();
1519 break; 1519 break;
1520 case PR_GET_TIMERSLACK: 1520 case PR_GET_TIMERSLACK:
1521 error = current->timer_slack_ns; 1521 error = current->timer_slack_ns;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 68320f6b07b5..515bc230ac2a 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -177,4 +177,4 @@ cond_syscall(sys_eventfd);
177cond_syscall(sys_eventfd2); 177cond_syscall(sys_eventfd2);
178 178
179/* performance counters: */ 179/* performance counters: */
180cond_syscall(sys_perf_counter_open); 180cond_syscall(sys_perf_event_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1a631ba684a4..6ba49c7cb128 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,7 +50,7 @@
50#include <linux/reboot.h> 50#include <linux/reboot.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/slow-work.h> 52#include <linux/slow-work.h>
53#include <linux/perf_counter.h> 53#include <linux/perf_event.h>
54 54
55#include <asm/uaccess.h> 55#include <asm/uaccess.h>
56#include <asm/processor.h> 56#include <asm/processor.h>
@@ -964,28 +964,28 @@ static struct ctl_table kern_table[] = {
964 .child = slow_work_sysctls, 964 .child = slow_work_sysctls,
965 }, 965 },
966#endif 966#endif
967#ifdef CONFIG_PERF_COUNTERS 967#ifdef CONFIG_PERF_EVENTS
968 { 968 {
969 .ctl_name = CTL_UNNUMBERED, 969 .ctl_name = CTL_UNNUMBERED,
970 .procname = "perf_counter_paranoid", 970 .procname = "perf_event_paranoid",
971 .data = &sysctl_perf_counter_paranoid, 971 .data = &sysctl_perf_event_paranoid,
972 .maxlen = sizeof(sysctl_perf_counter_paranoid), 972 .maxlen = sizeof(sysctl_perf_event_paranoid),
973 .mode = 0644, 973 .mode = 0644,
974 .proc_handler = &proc_dointvec, 974 .proc_handler = &proc_dointvec,
975 }, 975 },
976 { 976 {
977 .ctl_name = CTL_UNNUMBERED, 977 .ctl_name = CTL_UNNUMBERED,
978 .procname = "perf_counter_mlock_kb", 978 .procname = "perf_event_mlock_kb",
979 .data = &sysctl_perf_counter_mlock, 979 .data = &sysctl_perf_event_mlock,
980 .maxlen = sizeof(sysctl_perf_counter_mlock), 980 .maxlen = sizeof(sysctl_perf_event_mlock),
981 .mode = 0644, 981 .mode = 0644,
982 .proc_handler = &proc_dointvec, 982 .proc_handler = &proc_dointvec,
983 }, 983 },
984 { 984 {
985 .ctl_name = CTL_UNNUMBERED, 985 .ctl_name = CTL_UNNUMBERED,
986 .procname = "perf_counter_max_sample_rate", 986 .procname = "perf_event_max_sample_rate",
987 .data = &sysctl_perf_counter_sample_rate, 987 .data = &sysctl_perf_event_sample_rate,
988 .maxlen = sizeof(sysctl_perf_counter_sample_rate), 988 .maxlen = sizeof(sysctl_perf_event_sample_rate),
989 .mode = 0644, 989 .mode = 0644,
990 .proc_handler = &proc_dointvec, 990 .proc_handler = &proc_dointvec,
991 }, 991 },
diff --git a/kernel/timer.c b/kernel/timer.c
index bbb51074680e..811e5c391456 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_counter.h> 40#include <linux/perf_event.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42 42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -1187,7 +1187,7 @@ static void run_timer_softirq(struct softirq_action *h)
1187{ 1187{
1188 struct tvec_base *base = __get_cpu_var(tvec_bases); 1188 struct tvec_base *base = __get_cpu_var(tvec_bases);
1189 1189
1190 perf_counter_do_pending(); 1190 perf_event_do_pending();
1191 1191
1192 hrtimer_run_pending(); 1192 hrtimer_run_pending();
1193 1193
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8712ce3c6a0e..233f3483ac83 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
2#include <trace/events/syscalls.h> 2#include <trace/events/syscalls.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/ftrace.h> 4#include <linux/ftrace.h>
5#include <linux/perf_counter.h> 5#include <linux/perf_event.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8#include "trace_output.h" 8#include "trace_output.h"
@@ -414,7 +414,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
414 rec->nr = syscall_nr; 414 rec->nr = syscall_nr;
415 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 415 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
416 (unsigned long *)&rec->args); 416 (unsigned long *)&rec->args);
417 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); 417 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
418 } while(0); 418 } while(0);
419} 419}
420 420
@@ -476,7 +476,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
476 rec.nr = syscall_nr; 476 rec.nr = syscall_nr;
477 rec.ret = syscall_get_return_value(current, regs); 477 rec.ret = syscall_get_return_value(current, regs);
478 478
479 perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec)); 479 perf_tp_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
480} 480}
481 481
482int reg_prof_syscall_exit(char *name) 482int reg_prof_syscall_exit(char *name)