aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:15:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:15:07 -0400
commit43c1266ce4dc06bfd236cec31e11e9ecd69c0bef (patch)
tree40a86739ca4c36200f447f655b01c57cfe646e26 /kernel
parentb8c7f1dc5ca4e0d10709182233cdab932cef593d (diff)
parent57c0c15b5244320065374ad2c54f4fbec77a6428 (diff)
Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Tidy up after the big rename perf: Do the big rename: Performance Counters -> Performance Events perf_counter: Rename 'event' to event_id/hw_event perf_counter: Rename list_entry -> group_entry, counter_list -> group_list Manually resolved some fairly trivial conflicts with the tracing tree in include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/perf_event.c (renamed from kernel/perf_counter.c)2449
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c22
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/trace/trace_syscalls.c6
10 files changed, 1262 insertions, 1263 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 7c9b0a585502..187c89b4783d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -95,7 +95,7 @@ obj-$(CONFIG_X86_DS) += trace/
95obj-$(CONFIG_RING_BUFFER) += trace/ 95obj-$(CONFIG_RING_BUFFER) += trace/
96obj-$(CONFIG_SMP) += sched_cpupri.o 96obj-$(CONFIG_SMP) += sched_cpupri.o
97obj-$(CONFIG_SLOW_WORK) += slow-work.o 97obj-$(CONFIG_SLOW_WORK) += slow-work.o
98obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 98obj-$(CONFIG_PERF_EVENTS) += perf_event.o
99 99
100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
101# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 101# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/exit.c b/kernel/exit.c
index ae5d8660ddff..e47ee8a06135 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,7 +47,7 @@
47#include <linux/tracehook.h> 47#include <linux/tracehook.h>
48#include <linux/fs_struct.h> 48#include <linux/fs_struct.h>
49#include <linux/init_task.h> 49#include <linux/init_task.h>
50#include <linux/perf_counter.h> 50#include <linux/perf_event.h>
51#include <trace/events/sched.h> 51#include <trace/events/sched.h>
52 52
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
@@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
154{ 154{
155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
156 156
157#ifdef CONFIG_PERF_COUNTERS 157#ifdef CONFIG_PERF_EVENTS
158 WARN_ON_ONCE(tsk->perf_counter_ctxp); 158 WARN_ON_ONCE(tsk->perf_event_ctxp);
159#endif 159#endif
160 trace_sched_process_free(tsk); 160 trace_sched_process_free(tsk);
161 put_task_struct(tsk); 161 put_task_struct(tsk);
@@ -981,7 +981,7 @@ NORET_TYPE void do_exit(long code)
981 * Flush inherited counters to the parent - before the parent 981 * Flush inherited counters to the parent - before the parent
982 * gets woken up by child-exit notifications. 982 * gets woken up by child-exit notifications.
983 */ 983 */
984 perf_counter_exit_task(tsk); 984 perf_event_exit_task(tsk);
985 985
986 exit_notify(tsk, group_dead); 986 exit_notify(tsk, group_dead);
987#ifdef CONFIG_NUMA 987#ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index bfee931ee3fb..2cebfb23b0b8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,7 +61,7 @@
61#include <linux/blkdev.h> 61#include <linux/blkdev.h>
62#include <linux/fs_struct.h> 62#include <linux/fs_struct.h>
63#include <linux/magic.h> 63#include <linux/magic.h>
64#include <linux/perf_counter.h> 64#include <linux/perf_event.h>
65 65
66#include <asm/pgtable.h> 66#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 67#include <asm/pgalloc.h>
@@ -1078,7 +1078,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1078 /* Perform scheduler related setup. Assign this task to a CPU. */ 1078 /* Perform scheduler related setup. Assign this task to a CPU. */
1079 sched_fork(p, clone_flags); 1079 sched_fork(p, clone_flags);
1080 1080
1081 retval = perf_counter_init_task(p); 1081 retval = perf_event_init_task(p);
1082 if (retval) 1082 if (retval)
1083 goto bad_fork_cleanup_policy; 1083 goto bad_fork_cleanup_policy;
1084 1084
@@ -1253,7 +1253,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1253 write_unlock_irq(&tasklist_lock); 1253 write_unlock_irq(&tasklist_lock);
1254 proc_fork_connector(p); 1254 proc_fork_connector(p);
1255 cgroup_post_fork(p); 1255 cgroup_post_fork(p);
1256 perf_counter_fork(p); 1256 perf_event_fork(p);
1257 return p; 1257 return p;
1258 1258
1259bad_fork_free_pid: 1259bad_fork_free_pid:
@@ -1280,7 +1280,7 @@ bad_fork_cleanup_semundo:
1280bad_fork_cleanup_audit: 1280bad_fork_cleanup_audit:
1281 audit_free(p); 1281 audit_free(p);
1282bad_fork_cleanup_policy: 1282bad_fork_cleanup_policy:
1283 perf_counter_free_task(p); 1283 perf_event_free_task(p);
1284#ifdef CONFIG_NUMA 1284#ifdef CONFIG_NUMA
1285 mpol_put(p->mempolicy); 1285 mpol_put(p->mempolicy);
1286bad_fork_cleanup_cgroup: 1286bad_fork_cleanup_cgroup:
diff --git a/kernel/perf_counter.c b/kernel/perf_event.c
index cc768ab81ac8..76ac4db405e9 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_event.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * Performance counter core code 2 * Performance events core code:
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
10 */ 10 */
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
@@ -26,66 +26,66 @@
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h> 27#include <linux/anon_inodes.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/perf_counter.h> 29#include <linux/perf_event.h>
30 30
31#include <asm/irq_regs.h> 31#include <asm/irq_regs.h>
32 32
33/* 33/*
34 * Each CPU has a list of per CPU counters: 34 * Each CPU has a list of per CPU events:
35 */ 35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37 37
38int perf_max_counters __read_mostly = 1; 38int perf_max_events __read_mostly = 1;
39static int perf_reserved_percpu __read_mostly; 39static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1; 40static int perf_overcommit __read_mostly = 1;
41 41
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_events __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_events __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_events __read_mostly;
45static atomic_t nr_task_counters __read_mostly; 45static atomic_t nr_task_events __read_mostly;
46 46
47/* 47/*
48 * perf counter paranoia level: 48 * perf event paranoia level:
49 * -1 - not paranoid at all 49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv 50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu counters for unpriv 51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv 52 * 2 - disallow kernel profiling for unpriv
53 */ 53 */
54int sysctl_perf_counter_paranoid __read_mostly = 1; 54int sysctl_perf_event_paranoid __read_mostly = 1;
55 55
56static inline bool perf_paranoid_tracepoint_raw(void) 56static inline bool perf_paranoid_tracepoint_raw(void)
57{ 57{
58 return sysctl_perf_counter_paranoid > -1; 58 return sysctl_perf_event_paranoid > -1;
59} 59}
60 60
61static inline bool perf_paranoid_cpu(void) 61static inline bool perf_paranoid_cpu(void)
62{ 62{
63 return sysctl_perf_counter_paranoid > 0; 63 return sysctl_perf_event_paranoid > 0;
64} 64}
65 65
66static inline bool perf_paranoid_kernel(void) 66static inline bool perf_paranoid_kernel(void)
67{ 67{
68 return sysctl_perf_counter_paranoid > 1; 68 return sysctl_perf_event_paranoid > 1;
69} 69}
70 70
71int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 71int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
72 72
73/* 73/*
74 * max perf counter sample rate 74 * max perf event sample rate
75 */ 75 */
76int sysctl_perf_counter_sample_rate __read_mostly = 100000; 76int sysctl_perf_event_sample_rate __read_mostly = 100000;
77 77
78static atomic64_t perf_counter_id; 78static atomic64_t perf_event_id;
79 79
80/* 80/*
81 * Lock for (sysadmin-configurable) counter reservations: 81 * Lock for (sysadmin-configurable) event reservations:
82 */ 82 */
83static DEFINE_SPINLOCK(perf_resource_lock); 83static DEFINE_SPINLOCK(perf_resource_lock);
84 84
85/* 85/*
86 * Architecture provided APIs - weak aliases: 86 * Architecture provided APIs - weak aliases:
87 */ 87 */
88extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 88extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
89{ 89{
90 return NULL; 90 return NULL;
91} 91}
@@ -93,18 +93,18 @@ extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counte
93void __weak hw_perf_disable(void) { barrier(); } 93void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); } 94void __weak hw_perf_enable(void) { barrier(); }
95 95
96void __weak hw_perf_counter_setup(int cpu) { barrier(); } 96void __weak hw_perf_event_setup(int cpu) { barrier(); }
97void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } 97void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
98 98
99int __weak 99int __weak
100hw_perf_group_sched_in(struct perf_counter *group_leader, 100hw_perf_group_sched_in(struct perf_event *group_leader,
101 struct perf_cpu_context *cpuctx, 101 struct perf_cpu_context *cpuctx,
102 struct perf_counter_context *ctx, int cpu) 102 struct perf_event_context *ctx, int cpu)
103{ 103{
104 return 0; 104 return 0;
105} 105}
106 106
107void __weak perf_counter_print_debug(void) { } 107void __weak perf_event_print_debug(void) { }
108 108
109static DEFINE_PER_CPU(int, perf_disable_count); 109static DEFINE_PER_CPU(int, perf_disable_count);
110 110
@@ -130,20 +130,20 @@ void perf_enable(void)
130 hw_perf_enable(); 130 hw_perf_enable();
131} 131}
132 132
133static void get_ctx(struct perf_counter_context *ctx) 133static void get_ctx(struct perf_event_context *ctx)
134{ 134{
135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
136} 136}
137 137
138static void free_ctx(struct rcu_head *head) 138static void free_ctx(struct rcu_head *head)
139{ 139{
140 struct perf_counter_context *ctx; 140 struct perf_event_context *ctx;
141 141
142 ctx = container_of(head, struct perf_counter_context, rcu_head); 142 ctx = container_of(head, struct perf_event_context, rcu_head);
143 kfree(ctx); 143 kfree(ctx);
144} 144}
145 145
146static void put_ctx(struct perf_counter_context *ctx) 146static void put_ctx(struct perf_event_context *ctx)
147{ 147{
148 if (atomic_dec_and_test(&ctx->refcount)) { 148 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx) 149 if (ctx->parent_ctx)
@@ -154,7 +154,7 @@ static void put_ctx(struct perf_counter_context *ctx)
154 } 154 }
155} 155}
156 156
157static void unclone_ctx(struct perf_counter_context *ctx) 157static void unclone_ctx(struct perf_event_context *ctx)
158{ 158{
159 if (ctx->parent_ctx) { 159 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx); 160 put_ctx(ctx->parent_ctx);
@@ -163,37 +163,37 @@ static void unclone_ctx(struct perf_counter_context *ctx)
163} 163}
164 164
165/* 165/*
166 * If we inherit counters we want to return the parent counter id 166 * If we inherit events we want to return the parent event id
167 * to userspace. 167 * to userspace.
168 */ 168 */
169static u64 primary_counter_id(struct perf_counter *counter) 169static u64 primary_event_id(struct perf_event *event)
170{ 170{
171 u64 id = counter->id; 171 u64 id = event->id;
172 172
173 if (counter->parent) 173 if (event->parent)
174 id = counter->parent->id; 174 id = event->parent->id;
175 175
176 return id; 176 return id;
177} 177}
178 178
179/* 179/*
180 * Get the perf_counter_context for a task and lock it. 180 * Get the perf_event_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked, 181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task. 182 * the context could get moved to another task.
183 */ 183 */
184static struct perf_counter_context * 184static struct perf_event_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags) 185perf_lock_task_context(struct task_struct *task, unsigned long *flags)
186{ 186{
187 struct perf_counter_context *ctx; 187 struct perf_event_context *ctx;
188 188
189 rcu_read_lock(); 189 rcu_read_lock();
190 retry: 190 retry:
191 ctx = rcu_dereference(task->perf_counter_ctxp); 191 ctx = rcu_dereference(task->perf_event_ctxp);
192 if (ctx) { 192 if (ctx) {
193 /* 193 /*
194 * If this context is a clone of another, it might 194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by 195 * get swapped for another underneath us by
196 * perf_counter_task_sched_out, though the 196 * perf_event_task_sched_out, though the
197 * rcu_read_lock() protects us from any context 197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it 198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry 199 * got swapped before we could get the lock, and retry
@@ -201,7 +201,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
201 * can't get swapped on us any more. 201 * can't get swapped on us any more.
202 */ 202 */
203 spin_lock_irqsave(&ctx->lock, *flags); 203 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_counter_ctxp)) { 204 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags); 205 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry; 206 goto retry;
207 } 207 }
@@ -220,9 +220,9 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
220 * can't get swapped to another task. This also increments its 220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed. 221 * reference count so that the context can't get freed.
222 */ 222 */
223static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) 223static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
224{ 224{
225 struct perf_counter_context *ctx; 225 struct perf_event_context *ctx;
226 unsigned long flags; 226 unsigned long flags;
227 227
228 ctx = perf_lock_task_context(task, &flags); 228 ctx = perf_lock_task_context(task, &flags);
@@ -233,7 +233,7 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
233 return ctx; 233 return ctx;
234} 234}
235 235
236static void perf_unpin_context(struct perf_counter_context *ctx) 236static void perf_unpin_context(struct perf_event_context *ctx)
237{ 237{
238 unsigned long flags; 238 unsigned long flags;
239 239
@@ -244,123 +244,122 @@ static void perf_unpin_context(struct perf_counter_context *ctx)
244} 244}
245 245
246/* 246/*
247 * Add a counter from the lists for its context. 247 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held. 248 * Must be called with ctx->mutex and ctx->lock held.
249 */ 249 */
250static void 250static void
251list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 251list_add_event(struct perf_event *event, struct perf_event_context *ctx)
252{ 252{
253 struct perf_counter *group_leader = counter->group_leader; 253 struct perf_event *group_leader = event->group_leader;
254 254
255 /* 255 /*
256 * Depending on whether it is a standalone or sibling counter, 256 * Depending on whether it is a standalone or sibling event,
257 * add it straight to the context's counter list, or to the group 257 * add it straight to the context's event list, or to the group
258 * leader's sibling list: 258 * leader's sibling list:
259 */ 259 */
260 if (group_leader == counter) 260 if (group_leader == event)
261 list_add_tail(&counter->list_entry, &ctx->counter_list); 261 list_add_tail(&event->group_entry, &ctx->group_list);
262 else { 262 else {
263 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 263 list_add_tail(&event->group_entry, &group_leader->sibling_list);
264 group_leader->nr_siblings++; 264 group_leader->nr_siblings++;
265 } 265 }
266 266
267 list_add_rcu(&counter->event_entry, &ctx->event_list); 267 list_add_rcu(&event->event_entry, &ctx->event_list);
268 ctx->nr_counters++; 268 ctx->nr_events++;
269 if (counter->attr.inherit_stat) 269 if (event->attr.inherit_stat)
270 ctx->nr_stat++; 270 ctx->nr_stat++;
271} 271}
272 272
273/* 273/*
274 * Remove a counter from the lists for its context. 274 * Remove a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held. 275 * Must be called with ctx->mutex and ctx->lock held.
276 */ 276 */
277static void 277static void
278list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 278list_del_event(struct perf_event *event, struct perf_event_context *ctx)
279{ 279{
280 struct perf_counter *sibling, *tmp; 280 struct perf_event *sibling, *tmp;
281 281
282 if (list_empty(&counter->list_entry)) 282 if (list_empty(&event->group_entry))
283 return; 283 return;
284 ctx->nr_counters--; 284 ctx->nr_events--;
285 if (counter->attr.inherit_stat) 285 if (event->attr.inherit_stat)
286 ctx->nr_stat--; 286 ctx->nr_stat--;
287 287
288 list_del_init(&counter->list_entry); 288 list_del_init(&event->group_entry);
289 list_del_rcu(&counter->event_entry); 289 list_del_rcu(&event->event_entry);
290 290
291 if (counter->group_leader != counter) 291 if (event->group_leader != event)
292 counter->group_leader->nr_siblings--; 292 event->group_leader->nr_siblings--;
293 293
294 /* 294 /*
295 * If this was a group counter with sibling counters then 295 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton counters by adding them 296 * upgrade the siblings to singleton events by adding them
297 * to the context list directly: 297 * to the context list directly:
298 */ 298 */
299 list_for_each_entry_safe(sibling, tmp, 299 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
300 &counter->sibling_list, list_entry) {
301 300
302 list_move_tail(&sibling->list_entry, &ctx->counter_list); 301 list_move_tail(&sibling->group_entry, &ctx->group_list);
303 sibling->group_leader = sibling; 302 sibling->group_leader = sibling;
304 } 303 }
305} 304}
306 305
307static void 306static void
308counter_sched_out(struct perf_counter *counter, 307event_sched_out(struct perf_event *event,
309 struct perf_cpu_context *cpuctx, 308 struct perf_cpu_context *cpuctx,
310 struct perf_counter_context *ctx) 309 struct perf_event_context *ctx)
311{ 310{
312 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 311 if (event->state != PERF_EVENT_STATE_ACTIVE)
313 return; 312 return;
314 313
315 counter->state = PERF_COUNTER_STATE_INACTIVE; 314 event->state = PERF_EVENT_STATE_INACTIVE;
316 if (counter->pending_disable) { 315 if (event->pending_disable) {
317 counter->pending_disable = 0; 316 event->pending_disable = 0;
318 counter->state = PERF_COUNTER_STATE_OFF; 317 event->state = PERF_EVENT_STATE_OFF;
319 } 318 }
320 counter->tstamp_stopped = ctx->time; 319 event->tstamp_stopped = ctx->time;
321 counter->pmu->disable(counter); 320 event->pmu->disable(event);
322 counter->oncpu = -1; 321 event->oncpu = -1;
323 322
324 if (!is_software_counter(counter)) 323 if (!is_software_event(event))
325 cpuctx->active_oncpu--; 324 cpuctx->active_oncpu--;
326 ctx->nr_active--; 325 ctx->nr_active--;
327 if (counter->attr.exclusive || !cpuctx->active_oncpu) 326 if (event->attr.exclusive || !cpuctx->active_oncpu)
328 cpuctx->exclusive = 0; 327 cpuctx->exclusive = 0;
329} 328}
330 329
331static void 330static void
332group_sched_out(struct perf_counter *group_counter, 331group_sched_out(struct perf_event *group_event,
333 struct perf_cpu_context *cpuctx, 332 struct perf_cpu_context *cpuctx,
334 struct perf_counter_context *ctx) 333 struct perf_event_context *ctx)
335{ 334{
336 struct perf_counter *counter; 335 struct perf_event *event;
337 336
338 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) 337 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
339 return; 338 return;
340 339
341 counter_sched_out(group_counter, cpuctx, ctx); 340 event_sched_out(group_event, cpuctx, ctx);
342 341
343 /* 342 /*
344 * Schedule out siblings (if any): 343 * Schedule out siblings (if any):
345 */ 344 */
346 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 345 list_for_each_entry(event, &group_event->sibling_list, group_entry)
347 counter_sched_out(counter, cpuctx, ctx); 346 event_sched_out(event, cpuctx, ctx);
348 347
349 if (group_counter->attr.exclusive) 348 if (group_event->attr.exclusive)
350 cpuctx->exclusive = 0; 349 cpuctx->exclusive = 0;
351} 350}
352 351
353/* 352/*
354 * Cross CPU call to remove a performance counter 353 * Cross CPU call to remove a performance event
355 * 354 *
356 * We disable the counter on the hardware level first. After that we 355 * We disable the event on the hardware level first. After that we
357 * remove it from the context list. 356 * remove it from the context list.
358 */ 357 */
359static void __perf_counter_remove_from_context(void *info) 358static void __perf_event_remove_from_context(void *info)
360{ 359{
361 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 360 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
362 struct perf_counter *counter = info; 361 struct perf_event *event = info;
363 struct perf_counter_context *ctx = counter->ctx; 362 struct perf_event_context *ctx = event->ctx;
364 363
365 /* 364 /*
366 * If this is a task context, we need to check whether it is 365 * If this is a task context, we need to check whether it is
@@ -373,22 +372,22 @@ static void __perf_counter_remove_from_context(void *info)
373 spin_lock(&ctx->lock); 372 spin_lock(&ctx->lock);
374 /* 373 /*
375 * Protect the list operation against NMI by disabling the 374 * Protect the list operation against NMI by disabling the
376 * counters on a global level. 375 * events on a global level.
377 */ 376 */
378 perf_disable(); 377 perf_disable();
379 378
380 counter_sched_out(counter, cpuctx, ctx); 379 event_sched_out(event, cpuctx, ctx);
381 380
382 list_del_counter(counter, ctx); 381 list_del_event(event, ctx);
383 382
384 if (!ctx->task) { 383 if (!ctx->task) {
385 /* 384 /*
386 * Allow more per task counters with respect to the 385 * Allow more per task events with respect to the
387 * reservation: 386 * reservation:
388 */ 387 */
389 cpuctx->max_pertask = 388 cpuctx->max_pertask =
390 min(perf_max_counters - ctx->nr_counters, 389 min(perf_max_events - ctx->nr_events,
391 perf_max_counters - perf_reserved_percpu); 390 perf_max_events - perf_reserved_percpu);
392 } 391 }
393 392
394 perf_enable(); 393 perf_enable();
@@ -397,56 +396,56 @@ static void __perf_counter_remove_from_context(void *info)
397 396
398 397
399/* 398/*
400 * Remove the counter from a task's (or a CPU's) list of counters. 399 * Remove the event from a task's (or a CPU's) list of events.
401 * 400 *
402 * Must be called with ctx->mutex held. 401 * Must be called with ctx->mutex held.
403 * 402 *
404 * CPU counters are removed with a smp call. For task counters we only 403 * CPU events are removed with a smp call. For task events we only
405 * call when the task is on a CPU. 404 * call when the task is on a CPU.
406 * 405 *
407 * If counter->ctx is a cloned context, callers must make sure that 406 * If event->ctx is a cloned context, callers must make sure that
408 * every task struct that counter->ctx->task could possibly point to 407 * every task struct that event->ctx->task could possibly point to
409 * remains valid. This is OK when called from perf_release since 408 * remains valid. This is OK when called from perf_release since
410 * that only calls us on the top-level context, which can't be a clone. 409 * that only calls us on the top-level context, which can't be a clone.
411 * When called from perf_counter_exit_task, it's OK because the 410 * When called from perf_event_exit_task, it's OK because the
412 * context has been detached from its task. 411 * context has been detached from its task.
413 */ 412 */
414static void perf_counter_remove_from_context(struct perf_counter *counter) 413static void perf_event_remove_from_context(struct perf_event *event)
415{ 414{
416 struct perf_counter_context *ctx = counter->ctx; 415 struct perf_event_context *ctx = event->ctx;
417 struct task_struct *task = ctx->task; 416 struct task_struct *task = ctx->task;
418 417
419 if (!task) { 418 if (!task) {
420 /* 419 /*
421 * Per cpu counters are removed via an smp call and 420 * Per cpu events are removed via an smp call and
422 * the removal is always sucessful. 421 * the removal is always sucessful.
423 */ 422 */
424 smp_call_function_single(counter->cpu, 423 smp_call_function_single(event->cpu,
425 __perf_counter_remove_from_context, 424 __perf_event_remove_from_context,
426 counter, 1); 425 event, 1);
427 return; 426 return;
428 } 427 }
429 428
430retry: 429retry:
431 task_oncpu_function_call(task, __perf_counter_remove_from_context, 430 task_oncpu_function_call(task, __perf_event_remove_from_context,
432 counter); 431 event);
433 432
434 spin_lock_irq(&ctx->lock); 433 spin_lock_irq(&ctx->lock);
435 /* 434 /*
436 * If the context is active we need to retry the smp call. 435 * If the context is active we need to retry the smp call.
437 */ 436 */
438 if (ctx->nr_active && !list_empty(&counter->list_entry)) { 437 if (ctx->nr_active && !list_empty(&event->group_entry)) {
439 spin_unlock_irq(&ctx->lock); 438 spin_unlock_irq(&ctx->lock);
440 goto retry; 439 goto retry;
441 } 440 }
442 441
443 /* 442 /*
444 * The lock prevents that this context is scheduled in so we 443 * The lock prevents that this context is scheduled in so we
445 * can remove the counter safely, if the call above did not 444 * can remove the event safely, if the call above did not
446 * succeed. 445 * succeed.
447 */ 446 */
448 if (!list_empty(&counter->list_entry)) { 447 if (!list_empty(&event->group_entry)) {
449 list_del_counter(counter, ctx); 448 list_del_event(event, ctx);
450 } 449 }
451 spin_unlock_irq(&ctx->lock); 450 spin_unlock_irq(&ctx->lock);
452} 451}
@@ -459,7 +458,7 @@ static inline u64 perf_clock(void)
459/* 458/*
460 * Update the record of the current time in a context. 459 * Update the record of the current time in a context.
461 */ 460 */
462static void update_context_time(struct perf_counter_context *ctx) 461static void update_context_time(struct perf_event_context *ctx)
463{ 462{
464 u64 now = perf_clock(); 463 u64 now = perf_clock();
465 464
@@ -468,51 +467,51 @@ static void update_context_time(struct perf_counter_context *ctx)
468} 467}
469 468
470/* 469/*
471 * Update the total_time_enabled and total_time_running fields for a counter. 470 * Update the total_time_enabled and total_time_running fields for a event.
472 */ 471 */
473static void update_counter_times(struct perf_counter *counter) 472static void update_event_times(struct perf_event *event)
474{ 473{
475 struct perf_counter_context *ctx = counter->ctx; 474 struct perf_event_context *ctx = event->ctx;
476 u64 run_end; 475 u64 run_end;
477 476
478 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 477 if (event->state < PERF_EVENT_STATE_INACTIVE ||
479 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) 478 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
480 return; 479 return;
481 480
482 counter->total_time_enabled = ctx->time - counter->tstamp_enabled; 481 event->total_time_enabled = ctx->time - event->tstamp_enabled;
483 482
484 if (counter->state == PERF_COUNTER_STATE_INACTIVE) 483 if (event->state == PERF_EVENT_STATE_INACTIVE)
485 run_end = counter->tstamp_stopped; 484 run_end = event->tstamp_stopped;
486 else 485 else
487 run_end = ctx->time; 486 run_end = ctx->time;
488 487
489 counter->total_time_running = run_end - counter->tstamp_running; 488 event->total_time_running = run_end - event->tstamp_running;
490} 489}
491 490
492/* 491/*
493 * Update total_time_enabled and total_time_running for all counters in a group. 492 * Update total_time_enabled and total_time_running for all events in a group.
494 */ 493 */
495static void update_group_times(struct perf_counter *leader) 494static void update_group_times(struct perf_event *leader)
496{ 495{
497 struct perf_counter *counter; 496 struct perf_event *event;
498 497
499 update_counter_times(leader); 498 update_event_times(leader);
500 list_for_each_entry(counter, &leader->sibling_list, list_entry) 499 list_for_each_entry(event, &leader->sibling_list, group_entry)
501 update_counter_times(counter); 500 update_event_times(event);
502} 501}
503 502
504/* 503/*
505 * Cross CPU call to disable a performance counter 504 * Cross CPU call to disable a performance event
506 */ 505 */
507static void __perf_counter_disable(void *info) 506static void __perf_event_disable(void *info)
508{ 507{
509 struct perf_counter *counter = info; 508 struct perf_event *event = info;
510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 509 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
511 struct perf_counter_context *ctx = counter->ctx; 510 struct perf_event_context *ctx = event->ctx;
512 511
513 /* 512 /*
514 * If this is a per-task counter, need to check whether this 513 * If this is a per-task event, need to check whether this
515 * counter's task is the current task on this cpu. 514 * event's task is the current task on this cpu.
516 */ 515 */
517 if (ctx->task && cpuctx->task_ctx != ctx) 516 if (ctx->task && cpuctx->task_ctx != ctx)
518 return; 517 return;
@@ -520,57 +519,57 @@ static void __perf_counter_disable(void *info)
520 spin_lock(&ctx->lock); 519 spin_lock(&ctx->lock);
521 520
522 /* 521 /*
523 * If the counter is on, turn it off. 522 * If the event is on, turn it off.
524 * If it is in error state, leave it in error state. 523 * If it is in error state, leave it in error state.
525 */ 524 */
526 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { 525 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
527 update_context_time(ctx); 526 update_context_time(ctx);
528 update_group_times(counter); 527 update_group_times(event);
529 if (counter == counter->group_leader) 528 if (event == event->group_leader)
530 group_sched_out(counter, cpuctx, ctx); 529 group_sched_out(event, cpuctx, ctx);
531 else 530 else
532 counter_sched_out(counter, cpuctx, ctx); 531 event_sched_out(event, cpuctx, ctx);
533 counter->state = PERF_COUNTER_STATE_OFF; 532 event->state = PERF_EVENT_STATE_OFF;
534 } 533 }
535 534
536 spin_unlock(&ctx->lock); 535 spin_unlock(&ctx->lock);
537} 536}
538 537
539/* 538/*
540 * Disable a counter. 539 * Disable a event.
541 * 540 *
542 * If counter->ctx is a cloned context, callers must make sure that 541 * If event->ctx is a cloned context, callers must make sure that
543 * every task struct that counter->ctx->task could possibly point to 542 * every task struct that event->ctx->task could possibly point to
544 * remains valid. This condition is satisifed when called through 543 * remains valid. This condition is satisifed when called through
545 * perf_counter_for_each_child or perf_counter_for_each because they 544 * perf_event_for_each_child or perf_event_for_each because they
546 * hold the top-level counter's child_mutex, so any descendant that 545 * hold the top-level event's child_mutex, so any descendant that
547 * goes to exit will block in sync_child_counter. 546 * goes to exit will block in sync_child_event.
548 * When called from perf_pending_counter it's OK because counter->ctx 547 * When called from perf_pending_event it's OK because event->ctx
549 * is the current context on this CPU and preemption is disabled, 548 * is the current context on this CPU and preemption is disabled,
550 * hence we can't get into perf_counter_task_sched_out for this context. 549 * hence we can't get into perf_event_task_sched_out for this context.
551 */ 550 */
552static void perf_counter_disable(struct perf_counter *counter) 551static void perf_event_disable(struct perf_event *event)
553{ 552{
554 struct perf_counter_context *ctx = counter->ctx; 553 struct perf_event_context *ctx = event->ctx;
555 struct task_struct *task = ctx->task; 554 struct task_struct *task = ctx->task;
556 555
557 if (!task) { 556 if (!task) {
558 /* 557 /*
559 * Disable the counter on the cpu that it's on 558 * Disable the event on the cpu that it's on
560 */ 559 */
561 smp_call_function_single(counter->cpu, __perf_counter_disable, 560 smp_call_function_single(event->cpu, __perf_event_disable,
562 counter, 1); 561 event, 1);
563 return; 562 return;
564 } 563 }
565 564
566 retry: 565 retry:
567 task_oncpu_function_call(task, __perf_counter_disable, counter); 566 task_oncpu_function_call(task, __perf_event_disable, event);
568 567
569 spin_lock_irq(&ctx->lock); 568 spin_lock_irq(&ctx->lock);
570 /* 569 /*
571 * If the counter is still active, we need to retry the cross-call. 570 * If the event is still active, we need to retry the cross-call.
572 */ 571 */
573 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 572 if (event->state == PERF_EVENT_STATE_ACTIVE) {
574 spin_unlock_irq(&ctx->lock); 573 spin_unlock_irq(&ctx->lock);
575 goto retry; 574 goto retry;
576 } 575 }
@@ -579,73 +578,73 @@ static void perf_counter_disable(struct perf_counter *counter)
579 * Since we have the lock this context can't be scheduled 578 * Since we have the lock this context can't be scheduled
580 * in, so we can change the state safely. 579 * in, so we can change the state safely.
581 */ 580 */
582 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 581 if (event->state == PERF_EVENT_STATE_INACTIVE) {
583 update_group_times(counter); 582 update_group_times(event);
584 counter->state = PERF_COUNTER_STATE_OFF; 583 event->state = PERF_EVENT_STATE_OFF;
585 } 584 }
586 585
587 spin_unlock_irq(&ctx->lock); 586 spin_unlock_irq(&ctx->lock);
588} 587}
589 588
590static int 589static int
591counter_sched_in(struct perf_counter *counter, 590event_sched_in(struct perf_event *event,
592 struct perf_cpu_context *cpuctx, 591 struct perf_cpu_context *cpuctx,
593 struct perf_counter_context *ctx, 592 struct perf_event_context *ctx,
594 int cpu) 593 int cpu)
595{ 594{
596 if (counter->state <= PERF_COUNTER_STATE_OFF) 595 if (event->state <= PERF_EVENT_STATE_OFF)
597 return 0; 596 return 0;
598 597
599 counter->state = PERF_COUNTER_STATE_ACTIVE; 598 event->state = PERF_EVENT_STATE_ACTIVE;
600 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 599 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
601 /* 600 /*
602 * The new state must be visible before we turn it on in the hardware: 601 * The new state must be visible before we turn it on in the hardware:
603 */ 602 */
604 smp_wmb(); 603 smp_wmb();
605 604
606 if (counter->pmu->enable(counter)) { 605 if (event->pmu->enable(event)) {
607 counter->state = PERF_COUNTER_STATE_INACTIVE; 606 event->state = PERF_EVENT_STATE_INACTIVE;
608 counter->oncpu = -1; 607 event->oncpu = -1;
609 return -EAGAIN; 608 return -EAGAIN;
610 } 609 }
611 610
612 counter->tstamp_running += ctx->time - counter->tstamp_stopped; 611 event->tstamp_running += ctx->time - event->tstamp_stopped;
613 612
614 if (!is_software_counter(counter)) 613 if (!is_software_event(event))
615 cpuctx->active_oncpu++; 614 cpuctx->active_oncpu++;
616 ctx->nr_active++; 615 ctx->nr_active++;
617 616
618 if (counter->attr.exclusive) 617 if (event->attr.exclusive)
619 cpuctx->exclusive = 1; 618 cpuctx->exclusive = 1;
620 619
621 return 0; 620 return 0;
622} 621}
623 622
624static int 623static int
625group_sched_in(struct perf_counter *group_counter, 624group_sched_in(struct perf_event *group_event,
626 struct perf_cpu_context *cpuctx, 625 struct perf_cpu_context *cpuctx,
627 struct perf_counter_context *ctx, 626 struct perf_event_context *ctx,
628 int cpu) 627 int cpu)
629{ 628{
630 struct perf_counter *counter, *partial_group; 629 struct perf_event *event, *partial_group;
631 int ret; 630 int ret;
632 631
633 if (group_counter->state == PERF_COUNTER_STATE_OFF) 632 if (group_event->state == PERF_EVENT_STATE_OFF)
634 return 0; 633 return 0;
635 634
636 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); 635 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
637 if (ret) 636 if (ret)
638 return ret < 0 ? ret : 0; 637 return ret < 0 ? ret : 0;
639 638
640 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 639 if (event_sched_in(group_event, cpuctx, ctx, cpu))
641 return -EAGAIN; 640 return -EAGAIN;
642 641
643 /* 642 /*
644 * Schedule in siblings as one group (if any): 643 * Schedule in siblings as one group (if any):
645 */ 644 */
646 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 645 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
647 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 646 if (event_sched_in(event, cpuctx, ctx, cpu)) {
648 partial_group = counter; 647 partial_group = event;
649 goto group_error; 648 goto group_error;
650 } 649 }
651 } 650 }
@@ -657,57 +656,57 @@ group_error:
657 * Groups can be scheduled in as one unit only, so undo any 656 * Groups can be scheduled in as one unit only, so undo any
658 * partial group before returning: 657 * partial group before returning:
659 */ 658 */
660 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 659 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
661 if (counter == partial_group) 660 if (event == partial_group)
662 break; 661 break;
663 counter_sched_out(counter, cpuctx, ctx); 662 event_sched_out(event, cpuctx, ctx);
664 } 663 }
665 counter_sched_out(group_counter, cpuctx, ctx); 664 event_sched_out(group_event, cpuctx, ctx);
666 665
667 return -EAGAIN; 666 return -EAGAIN;
668} 667}
669 668
670/* 669/*
671 * Return 1 for a group consisting entirely of software counters, 670 * Return 1 for a group consisting entirely of software events,
672 * 0 if the group contains any hardware counters. 671 * 0 if the group contains any hardware events.
673 */ 672 */
674static int is_software_only_group(struct perf_counter *leader) 673static int is_software_only_group(struct perf_event *leader)
675{ 674{
676 struct perf_counter *counter; 675 struct perf_event *event;
677 676
678 if (!is_software_counter(leader)) 677 if (!is_software_event(leader))
679 return 0; 678 return 0;
680 679
681 list_for_each_entry(counter, &leader->sibling_list, list_entry) 680 list_for_each_entry(event, &leader->sibling_list, group_entry)
682 if (!is_software_counter(counter)) 681 if (!is_software_event(event))
683 return 0; 682 return 0;
684 683
685 return 1; 684 return 1;
686} 685}
687 686
688/* 687/*
689 * Work out whether we can put this counter group on the CPU now. 688 * Work out whether we can put this event group on the CPU now.
690 */ 689 */
691static int group_can_go_on(struct perf_counter *counter, 690static int group_can_go_on(struct perf_event *event,
692 struct perf_cpu_context *cpuctx, 691 struct perf_cpu_context *cpuctx,
693 int can_add_hw) 692 int can_add_hw)
694{ 693{
695 /* 694 /*
696 * Groups consisting entirely of software counters can always go on. 695 * Groups consisting entirely of software events can always go on.
697 */ 696 */
698 if (is_software_only_group(counter)) 697 if (is_software_only_group(event))
699 return 1; 698 return 1;
700 /* 699 /*
701 * If an exclusive group is already on, no other hardware 700 * If an exclusive group is already on, no other hardware
702 * counters can go on. 701 * events can go on.
703 */ 702 */
704 if (cpuctx->exclusive) 703 if (cpuctx->exclusive)
705 return 0; 704 return 0;
706 /* 705 /*
707 * If this group is exclusive and there are already 706 * If this group is exclusive and there are already
708 * counters on the CPU, it can't go on. 707 * events on the CPU, it can't go on.
709 */ 708 */
710 if (counter->attr.exclusive && cpuctx->active_oncpu) 709 if (event->attr.exclusive && cpuctx->active_oncpu)
711 return 0; 710 return 0;
712 /* 711 /*
713 * Otherwise, try to add it if all previous groups were able 712 * Otherwise, try to add it if all previous groups were able
@@ -716,26 +715,26 @@ static int group_can_go_on(struct perf_counter *counter,
716 return can_add_hw; 715 return can_add_hw;
717} 716}
718 717
719static void add_counter_to_ctx(struct perf_counter *counter, 718static void add_event_to_ctx(struct perf_event *event,
720 struct perf_counter_context *ctx) 719 struct perf_event_context *ctx)
721{ 720{
722 list_add_counter(counter, ctx); 721 list_add_event(event, ctx);
723 counter->tstamp_enabled = ctx->time; 722 event->tstamp_enabled = ctx->time;
724 counter->tstamp_running = ctx->time; 723 event->tstamp_running = ctx->time;
725 counter->tstamp_stopped = ctx->time; 724 event->tstamp_stopped = ctx->time;
726} 725}
727 726
728/* 727/*
729 * Cross CPU call to install and enable a performance counter 728 * Cross CPU call to install and enable a performance event
730 * 729 *
731 * Must be called with ctx->mutex held 730 * Must be called with ctx->mutex held
732 */ 731 */
733static void __perf_install_in_context(void *info) 732static void __perf_install_in_context(void *info)
734{ 733{
735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 734 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
736 struct perf_counter *counter = info; 735 struct perf_event *event = info;
737 struct perf_counter_context *ctx = counter->ctx; 736 struct perf_event_context *ctx = event->ctx;
738 struct perf_counter *leader = counter->group_leader; 737 struct perf_event *leader = event->group_leader;
739 int cpu = smp_processor_id(); 738 int cpu = smp_processor_id();
740 int err; 739 int err;
741 740
@@ -744,7 +743,7 @@ static void __perf_install_in_context(void *info)
744 * the current task context of this cpu. If not it has been 743 * the current task context of this cpu. If not it has been
745 * scheduled out before the smp call arrived. 744 * scheduled out before the smp call arrived.
746 * Or possibly this is the right context but it isn't 745 * Or possibly this is the right context but it isn't
747 * on this cpu because it had no counters. 746 * on this cpu because it had no events.
748 */ 747 */
749 if (ctx->task && cpuctx->task_ctx != ctx) { 748 if (ctx->task && cpuctx->task_ctx != ctx) {
750 if (cpuctx->task_ctx || ctx->task != current) 749 if (cpuctx->task_ctx || ctx->task != current)
@@ -758,41 +757,41 @@ static void __perf_install_in_context(void *info)
758 757
759 /* 758 /*
760 * Protect the list operation against NMI by disabling the 759 * Protect the list operation against NMI by disabling the
761 * counters on a global level. NOP for non NMI based counters. 760 * events on a global level. NOP for non NMI based events.
762 */ 761 */
763 perf_disable(); 762 perf_disable();
764 763
765 add_counter_to_ctx(counter, ctx); 764 add_event_to_ctx(event, ctx);
766 765
767 /* 766 /*
768 * Don't put the counter on if it is disabled or if 767 * Don't put the event on if it is disabled or if
769 * it is in a group and the group isn't on. 768 * it is in a group and the group isn't on.
770 */ 769 */
771 if (counter->state != PERF_COUNTER_STATE_INACTIVE || 770 if (event->state != PERF_EVENT_STATE_INACTIVE ||
772 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) 771 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
773 goto unlock; 772 goto unlock;
774 773
775 /* 774 /*
776 * An exclusive counter can't go on if there are already active 775 * An exclusive event can't go on if there are already active
777 * hardware counters, and no hardware counter can go on if there 776 * hardware events, and no hardware event can go on if there
778 * is already an exclusive counter on. 777 * is already an exclusive event on.
779 */ 778 */
780 if (!group_can_go_on(counter, cpuctx, 1)) 779 if (!group_can_go_on(event, cpuctx, 1))
781 err = -EEXIST; 780 err = -EEXIST;
782 else 781 else
783 err = counter_sched_in(counter, cpuctx, ctx, cpu); 782 err = event_sched_in(event, cpuctx, ctx, cpu);
784 783
785 if (err) { 784 if (err) {
786 /* 785 /*
787 * This counter couldn't go on. If it is in a group 786 * This event couldn't go on. If it is in a group
788 * then we have to pull the whole group off. 787 * then we have to pull the whole group off.
789 * If the counter group is pinned then put it in error state. 788 * If the event group is pinned then put it in error state.
790 */ 789 */
791 if (leader != counter) 790 if (leader != event)
792 group_sched_out(leader, cpuctx, ctx); 791 group_sched_out(leader, cpuctx, ctx);
793 if (leader->attr.pinned) { 792 if (leader->attr.pinned) {
794 update_group_times(leader); 793 update_group_times(leader);
795 leader->state = PERF_COUNTER_STATE_ERROR; 794 leader->state = PERF_EVENT_STATE_ERROR;
796 } 795 }
797 } 796 }
798 797
@@ -806,92 +805,92 @@ static void __perf_install_in_context(void *info)
806} 805}
807 806
808/* 807/*
809 * Attach a performance counter to a context 808 * Attach a performance event to a context
810 * 809 *
811 * First we add the counter to the list with the hardware enable bit 810 * First we add the event to the list with the hardware enable bit
812 * in counter->hw_config cleared. 811 * in event->hw_config cleared.
813 * 812 *
814 * If the counter is attached to a task which is on a CPU we use a smp 813 * If the event is attached to a task which is on a CPU we use a smp
815 * call to enable it in the task context. The task might have been 814 * call to enable it in the task context. The task might have been
816 * scheduled away, but we check this in the smp call again. 815 * scheduled away, but we check this in the smp call again.
817 * 816 *
818 * Must be called with ctx->mutex held. 817 * Must be called with ctx->mutex held.
819 */ 818 */
820static void 819static void
821perf_install_in_context(struct perf_counter_context *ctx, 820perf_install_in_context(struct perf_event_context *ctx,
822 struct perf_counter *counter, 821 struct perf_event *event,
823 int cpu) 822 int cpu)
824{ 823{
825 struct task_struct *task = ctx->task; 824 struct task_struct *task = ctx->task;
826 825
827 if (!task) { 826 if (!task) {
828 /* 827 /*
829 * Per cpu counters are installed via an smp call and 828 * Per cpu events are installed via an smp call and
830 * the install is always sucessful. 829 * the install is always sucessful.
831 */ 830 */
832 smp_call_function_single(cpu, __perf_install_in_context, 831 smp_call_function_single(cpu, __perf_install_in_context,
833 counter, 1); 832 event, 1);
834 return; 833 return;
835 } 834 }
836 835
837retry: 836retry:
838 task_oncpu_function_call(task, __perf_install_in_context, 837 task_oncpu_function_call(task, __perf_install_in_context,
839 counter); 838 event);
840 839
841 spin_lock_irq(&ctx->lock); 840 spin_lock_irq(&ctx->lock);
842 /* 841 /*
843 * we need to retry the smp call. 842 * we need to retry the smp call.
844 */ 843 */
845 if (ctx->is_active && list_empty(&counter->list_entry)) { 844 if (ctx->is_active && list_empty(&event->group_entry)) {
846 spin_unlock_irq(&ctx->lock); 845 spin_unlock_irq(&ctx->lock);
847 goto retry; 846 goto retry;
848 } 847 }
849 848
850 /* 849 /*
851 * The lock prevents that this context is scheduled in so we 850 * The lock prevents that this context is scheduled in so we
852 * can add the counter safely, if it the call above did not 851 * can add the event safely, if it the call above did not
853 * succeed. 852 * succeed.
854 */ 853 */
855 if (list_empty(&counter->list_entry)) 854 if (list_empty(&event->group_entry))
856 add_counter_to_ctx(counter, ctx); 855 add_event_to_ctx(event, ctx);
857 spin_unlock_irq(&ctx->lock); 856 spin_unlock_irq(&ctx->lock);
858} 857}
859 858
860/* 859/*
861 * Put a counter into inactive state and update time fields. 860 * Put a event into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all 861 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we 862 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also. 863 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders 864 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty. 865 * since the non-leader members' sibling_lists will be empty.
867 */ 866 */
868static void __perf_counter_mark_enabled(struct perf_counter *counter, 867static void __perf_event_mark_enabled(struct perf_event *event,
869 struct perf_counter_context *ctx) 868 struct perf_event_context *ctx)
870{ 869{
871 struct perf_counter *sub; 870 struct perf_event *sub;
872 871
873 counter->state = PERF_COUNTER_STATE_INACTIVE; 872 event->state = PERF_EVENT_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled; 873 event->tstamp_enabled = ctx->time - event->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry) 874 list_for_each_entry(sub, &event->sibling_list, group_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE) 875 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
877 sub->tstamp_enabled = 876 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled; 877 ctx->time - sub->total_time_enabled;
879} 878}
880 879
881/* 880/*
882 * Cross CPU call to enable a performance counter 881 * Cross CPU call to enable a performance event
883 */ 882 */
884static void __perf_counter_enable(void *info) 883static void __perf_event_enable(void *info)
885{ 884{
886 struct perf_counter *counter = info; 885 struct perf_event *event = info;
887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 886 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
888 struct perf_counter_context *ctx = counter->ctx; 887 struct perf_event_context *ctx = event->ctx;
889 struct perf_counter *leader = counter->group_leader; 888 struct perf_event *leader = event->group_leader;
890 int err; 889 int err;
891 890
892 /* 891 /*
893 * If this is a per-task counter, need to check whether this 892 * If this is a per-task event, need to check whether this
894 * counter's task is the current task on this cpu. 893 * event's task is the current task on this cpu.
895 */ 894 */
896 if (ctx->task && cpuctx->task_ctx != ctx) { 895 if (ctx->task && cpuctx->task_ctx != ctx) {
897 if (cpuctx->task_ctx || ctx->task != current) 896 if (cpuctx->task_ctx || ctx->task != current)
@@ -903,40 +902,40 @@ static void __perf_counter_enable(void *info)
903 ctx->is_active = 1; 902 ctx->is_active = 1;
904 update_context_time(ctx); 903 update_context_time(ctx);
905 904
906 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 905 if (event->state >= PERF_EVENT_STATE_INACTIVE)
907 goto unlock; 906 goto unlock;
908 __perf_counter_mark_enabled(counter, ctx); 907 __perf_event_mark_enabled(event, ctx);
909 908
910 /* 909 /*
911 * If the counter is in a group and isn't the group leader, 910 * If the event is in a group and isn't the group leader,
912 * then don't put it on unless the group is on. 911 * then don't put it on unless the group is on.
913 */ 912 */
914 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) 913 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
915 goto unlock; 914 goto unlock;
916 915
917 if (!group_can_go_on(counter, cpuctx, 1)) { 916 if (!group_can_go_on(event, cpuctx, 1)) {
918 err = -EEXIST; 917 err = -EEXIST;
919 } else { 918 } else {
920 perf_disable(); 919 perf_disable();
921 if (counter == leader) 920 if (event == leader)
922 err = group_sched_in(counter, cpuctx, ctx, 921 err = group_sched_in(event, cpuctx, ctx,
923 smp_processor_id()); 922 smp_processor_id());
924 else 923 else
925 err = counter_sched_in(counter, cpuctx, ctx, 924 err = event_sched_in(event, cpuctx, ctx,
926 smp_processor_id()); 925 smp_processor_id());
927 perf_enable(); 926 perf_enable();
928 } 927 }
929 928
930 if (err) { 929 if (err) {
931 /* 930 /*
932 * If this counter can't go on and it's part of a 931 * If this event can't go on and it's part of a
933 * group, then the whole group has to come off. 932 * group, then the whole group has to come off.
934 */ 933 */
935 if (leader != counter) 934 if (leader != event)
936 group_sched_out(leader, cpuctx, ctx); 935 group_sched_out(leader, cpuctx, ctx);
937 if (leader->attr.pinned) { 936 if (leader->attr.pinned) {
938 update_group_times(leader); 937 update_group_times(leader);
939 leader->state = PERF_COUNTER_STATE_ERROR; 938 leader->state = PERF_EVENT_STATE_ERROR;
940 } 939 }
941 } 940 }
942 941
@@ -945,98 +944,98 @@ static void __perf_counter_enable(void *info)
945} 944}
946 945
947/* 946/*
948 * Enable a counter. 947 * Enable a event.
949 * 948 *
950 * If counter->ctx is a cloned context, callers must make sure that 949 * If event->ctx is a cloned context, callers must make sure that
951 * every task struct that counter->ctx->task could possibly point to 950 * every task struct that event->ctx->task could possibly point to
952 * remains valid. This condition is satisfied when called through 951 * remains valid. This condition is satisfied when called through
953 * perf_counter_for_each_child or perf_counter_for_each as described 952 * perf_event_for_each_child or perf_event_for_each as described
954 * for perf_counter_disable. 953 * for perf_event_disable.
955 */ 954 */
956static void perf_counter_enable(struct perf_counter *counter) 955static void perf_event_enable(struct perf_event *event)
957{ 956{
958 struct perf_counter_context *ctx = counter->ctx; 957 struct perf_event_context *ctx = event->ctx;
959 struct task_struct *task = ctx->task; 958 struct task_struct *task = ctx->task;
960 959
961 if (!task) { 960 if (!task) {
962 /* 961 /*
963 * Enable the counter on the cpu that it's on 962 * Enable the event on the cpu that it's on
964 */ 963 */
965 smp_call_function_single(counter->cpu, __perf_counter_enable, 964 smp_call_function_single(event->cpu, __perf_event_enable,
966 counter, 1); 965 event, 1);
967 return; 966 return;
968 } 967 }
969 968
970 spin_lock_irq(&ctx->lock); 969 spin_lock_irq(&ctx->lock);
971 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 970 if (event->state >= PERF_EVENT_STATE_INACTIVE)
972 goto out; 971 goto out;
973 972
974 /* 973 /*
975 * If the counter is in error state, clear that first. 974 * If the event is in error state, clear that first.
976 * That way, if we see the counter in error state below, we 975 * That way, if we see the event in error state below, we
977 * know that it has gone back into error state, as distinct 976 * know that it has gone back into error state, as distinct
978 * from the task having been scheduled away before the 977 * from the task having been scheduled away before the
979 * cross-call arrived. 978 * cross-call arrived.
980 */ 979 */
981 if (counter->state == PERF_COUNTER_STATE_ERROR) 980 if (event->state == PERF_EVENT_STATE_ERROR)
982 counter->state = PERF_COUNTER_STATE_OFF; 981 event->state = PERF_EVENT_STATE_OFF;
983 982
984 retry: 983 retry:
985 spin_unlock_irq(&ctx->lock); 984 spin_unlock_irq(&ctx->lock);
986 task_oncpu_function_call(task, __perf_counter_enable, counter); 985 task_oncpu_function_call(task, __perf_event_enable, event);
987 986
988 spin_lock_irq(&ctx->lock); 987 spin_lock_irq(&ctx->lock);
989 988
990 /* 989 /*
991 * If the context is active and the counter is still off, 990 * If the context is active and the event is still off,
992 * we need to retry the cross-call. 991 * we need to retry the cross-call.
993 */ 992 */
994 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) 993 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
995 goto retry; 994 goto retry;
996 995
997 /* 996 /*
998 * Since we have the lock this context can't be scheduled 997 * Since we have the lock this context can't be scheduled
999 * in, so we can change the state safely. 998 * in, so we can change the state safely.
1000 */ 999 */
1001 if (counter->state == PERF_COUNTER_STATE_OFF) 1000 if (event->state == PERF_EVENT_STATE_OFF)
1002 __perf_counter_mark_enabled(counter, ctx); 1001 __perf_event_mark_enabled(event, ctx);
1003 1002
1004 out: 1003 out:
1005 spin_unlock_irq(&ctx->lock); 1004 spin_unlock_irq(&ctx->lock);
1006} 1005}
1007 1006
1008static int perf_counter_refresh(struct perf_counter *counter, int refresh) 1007static int perf_event_refresh(struct perf_event *event, int refresh)
1009{ 1008{
1010 /* 1009 /*
1011 * not supported on inherited counters 1010 * not supported on inherited events
1012 */ 1011 */
1013 if (counter->attr.inherit) 1012 if (event->attr.inherit)
1014 return -EINVAL; 1013 return -EINVAL;
1015 1014
1016 atomic_add(refresh, &counter->event_limit); 1015 atomic_add(refresh, &event->event_limit);
1017 perf_counter_enable(counter); 1016 perf_event_enable(event);
1018 1017
1019 return 0; 1018 return 0;
1020} 1019}
1021 1020
1022void __perf_counter_sched_out(struct perf_counter_context *ctx, 1021void __perf_event_sched_out(struct perf_event_context *ctx,
1023 struct perf_cpu_context *cpuctx) 1022 struct perf_cpu_context *cpuctx)
1024{ 1023{
1025 struct perf_counter *counter; 1024 struct perf_event *event;
1026 1025
1027 spin_lock(&ctx->lock); 1026 spin_lock(&ctx->lock);
1028 ctx->is_active = 0; 1027 ctx->is_active = 0;
1029 if (likely(!ctx->nr_counters)) 1028 if (likely(!ctx->nr_events))
1030 goto out; 1029 goto out;
1031 update_context_time(ctx); 1030 update_context_time(ctx);
1032 1031
1033 perf_disable(); 1032 perf_disable();
1034 if (ctx->nr_active) { 1033 if (ctx->nr_active) {
1035 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1034 list_for_each_entry(event, &ctx->group_list, group_entry) {
1036 if (counter != counter->group_leader) 1035 if (event != event->group_leader)
1037 counter_sched_out(counter, cpuctx, ctx); 1036 event_sched_out(event, cpuctx, ctx);
1038 else 1037 else
1039 group_sched_out(counter, cpuctx, ctx); 1038 group_sched_out(event, cpuctx, ctx);
1040 } 1039 }
1041 } 1040 }
1042 perf_enable(); 1041 perf_enable();
@@ -1047,46 +1046,46 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
1047/* 1046/*
1048 * Test whether two contexts are equivalent, i.e. whether they 1047 * Test whether two contexts are equivalent, i.e. whether they
1049 * have both been cloned from the same version of the same context 1048 * have both been cloned from the same version of the same context
1050 * and they both have the same number of enabled counters. 1049 * and they both have the same number of enabled events.
1051 * If the number of enabled counters is the same, then the set 1050 * If the number of enabled events is the same, then the set
1052 * of enabled counters should be the same, because these are both 1051 * of enabled events should be the same, because these are both
1053 * inherited contexts, therefore we can't access individual counters 1052 * inherited contexts, therefore we can't access individual events
1054 * in them directly with an fd; we can only enable/disable all 1053 * in them directly with an fd; we can only enable/disable all
1055 * counters via prctl, or enable/disable all counters in a family 1054 * events via prctl, or enable/disable all events in a family
1056 * via ioctl, which will have the same effect on both contexts. 1055 * via ioctl, which will have the same effect on both contexts.
1057 */ 1056 */
1058static int context_equiv(struct perf_counter_context *ctx1, 1057static int context_equiv(struct perf_event_context *ctx1,
1059 struct perf_counter_context *ctx2) 1058 struct perf_event_context *ctx2)
1060{ 1059{
1061 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx 1060 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1062 && ctx1->parent_gen == ctx2->parent_gen 1061 && ctx1->parent_gen == ctx2->parent_gen
1063 && !ctx1->pin_count && !ctx2->pin_count; 1062 && !ctx1->pin_count && !ctx2->pin_count;
1064} 1063}
1065 1064
1066static void __perf_counter_read(void *counter); 1065static void __perf_event_read(void *event);
1067 1066
1068static void __perf_counter_sync_stat(struct perf_counter *counter, 1067static void __perf_event_sync_stat(struct perf_event *event,
1069 struct perf_counter *next_counter) 1068 struct perf_event *next_event)
1070{ 1069{
1071 u64 value; 1070 u64 value;
1072 1071
1073 if (!counter->attr.inherit_stat) 1072 if (!event->attr.inherit_stat)
1074 return; 1073 return;
1075 1074
1076 /* 1075 /*
1077 * Update the counter value, we cannot use perf_counter_read() 1076 * Update the event value, we cannot use perf_event_read()
1078 * because we're in the middle of a context switch and have IRQs 1077 * because we're in the middle of a context switch and have IRQs
1079 * disabled, which upsets smp_call_function_single(), however 1078 * disabled, which upsets smp_call_function_single(), however
1080 * we know the counter must be on the current CPU, therefore we 1079 * we know the event must be on the current CPU, therefore we
1081 * don't need to use it. 1080 * don't need to use it.
1082 */ 1081 */
1083 switch (counter->state) { 1082 switch (event->state) {
1084 case PERF_COUNTER_STATE_ACTIVE: 1083 case PERF_EVENT_STATE_ACTIVE:
1085 __perf_counter_read(counter); 1084 __perf_event_read(event);
1086 break; 1085 break;
1087 1086
1088 case PERF_COUNTER_STATE_INACTIVE: 1087 case PERF_EVENT_STATE_INACTIVE:
1089 update_counter_times(counter); 1088 update_event_times(event);
1090 break; 1089 break;
1091 1090
1092 default: 1091 default:
@@ -1094,73 +1093,73 @@ static void __perf_counter_sync_stat(struct perf_counter *counter,
1094 } 1093 }
1095 1094
1096 /* 1095 /*
1097 * In order to keep per-task stats reliable we need to flip the counter 1096 * In order to keep per-task stats reliable we need to flip the event
1098 * values when we flip the contexts. 1097 * values when we flip the contexts.
1099 */ 1098 */
1100 value = atomic64_read(&next_counter->count); 1099 value = atomic64_read(&next_event->count);
1101 value = atomic64_xchg(&counter->count, value); 1100 value = atomic64_xchg(&event->count, value);
1102 atomic64_set(&next_counter->count, value); 1101 atomic64_set(&next_event->count, value);
1103 1102
1104 swap(counter->total_time_enabled, next_counter->total_time_enabled); 1103 swap(event->total_time_enabled, next_event->total_time_enabled);
1105 swap(counter->total_time_running, next_counter->total_time_running); 1104 swap(event->total_time_running, next_event->total_time_running);
1106 1105
1107 /* 1106 /*
1108 * Since we swizzled the values, update the user visible data too. 1107 * Since we swizzled the values, update the user visible data too.
1109 */ 1108 */
1110 perf_counter_update_userpage(counter); 1109 perf_event_update_userpage(event);
1111 perf_counter_update_userpage(next_counter); 1110 perf_event_update_userpage(next_event);
1112} 1111}
1113 1112
1114#define list_next_entry(pos, member) \ 1113#define list_next_entry(pos, member) \
1115 list_entry(pos->member.next, typeof(*pos), member) 1114 list_entry(pos->member.next, typeof(*pos), member)
1116 1115
1117static void perf_counter_sync_stat(struct perf_counter_context *ctx, 1116static void perf_event_sync_stat(struct perf_event_context *ctx,
1118 struct perf_counter_context *next_ctx) 1117 struct perf_event_context *next_ctx)
1119{ 1118{
1120 struct perf_counter *counter, *next_counter; 1119 struct perf_event *event, *next_event;
1121 1120
1122 if (!ctx->nr_stat) 1121 if (!ctx->nr_stat)
1123 return; 1122 return;
1124 1123
1125 counter = list_first_entry(&ctx->event_list, 1124 event = list_first_entry(&ctx->event_list,
1126 struct perf_counter, event_entry); 1125 struct perf_event, event_entry);
1127 1126
1128 next_counter = list_first_entry(&next_ctx->event_list, 1127 next_event = list_first_entry(&next_ctx->event_list,
1129 struct perf_counter, event_entry); 1128 struct perf_event, event_entry);
1130 1129
1131 while (&counter->event_entry != &ctx->event_list && 1130 while (&event->event_entry != &ctx->event_list &&
1132 &next_counter->event_entry != &next_ctx->event_list) { 1131 &next_event->event_entry != &next_ctx->event_list) {
1133 1132
1134 __perf_counter_sync_stat(counter, next_counter); 1133 __perf_event_sync_stat(event, next_event);
1135 1134
1136 counter = list_next_entry(counter, event_entry); 1135 event = list_next_entry(event, event_entry);
1137 next_counter = list_next_entry(next_counter, event_entry); 1136 next_event = list_next_entry(next_event, event_entry);
1138 } 1137 }
1139} 1138}
1140 1139
1141/* 1140/*
1142 * Called from scheduler to remove the counters of the current task, 1141 * Called from scheduler to remove the events of the current task,
1143 * with interrupts disabled. 1142 * with interrupts disabled.
1144 * 1143 *
1145 * We stop each counter and update the counter value in counter->count. 1144 * We stop each event and update the event value in event->count.
1146 * 1145 *
1147 * This does not protect us against NMI, but disable() 1146 * This does not protect us against NMI, but disable()
1148 * sets the disabled bit in the control field of counter _before_ 1147 * sets the disabled bit in the control field of event _before_
1149 * accessing the counter control register. If a NMI hits, then it will 1148 * accessing the event control register. If a NMI hits, then it will
1150 * not restart the counter. 1149 * not restart the event.
1151 */ 1150 */
1152void perf_counter_task_sched_out(struct task_struct *task, 1151void perf_event_task_sched_out(struct task_struct *task,
1153 struct task_struct *next, int cpu) 1152 struct task_struct *next, int cpu)
1154{ 1153{
1155 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1154 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1156 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1155 struct perf_event_context *ctx = task->perf_event_ctxp;
1157 struct perf_counter_context *next_ctx; 1156 struct perf_event_context *next_ctx;
1158 struct perf_counter_context *parent; 1157 struct perf_event_context *parent;
1159 struct pt_regs *regs; 1158 struct pt_regs *regs;
1160 int do_switch = 1; 1159 int do_switch = 1;
1161 1160
1162 regs = task_pt_regs(task); 1161 regs = task_pt_regs(task);
1163 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); 1162 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1164 1163
1165 if (likely(!ctx || !cpuctx->task_ctx)) 1164 if (likely(!ctx || !cpuctx->task_ctx))
1166 return; 1165 return;
@@ -1169,7 +1168,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1169 1168
1170 rcu_read_lock(); 1169 rcu_read_lock();
1171 parent = rcu_dereference(ctx->parent_ctx); 1170 parent = rcu_dereference(ctx->parent_ctx);
1172 next_ctx = next->perf_counter_ctxp; 1171 next_ctx = next->perf_event_ctxp;
1173 if (parent && next_ctx && 1172 if (parent && next_ctx &&
1174 rcu_dereference(next_ctx->parent_ctx) == parent) { 1173 rcu_dereference(next_ctx->parent_ctx) == parent) {
1175 /* 1174 /*
@@ -1186,15 +1185,15 @@ void perf_counter_task_sched_out(struct task_struct *task,
1186 if (context_equiv(ctx, next_ctx)) { 1185 if (context_equiv(ctx, next_ctx)) {
1187 /* 1186 /*
1188 * XXX do we need a memory barrier of sorts 1187 * XXX do we need a memory barrier of sorts
1189 * wrt to rcu_dereference() of perf_counter_ctxp 1188 * wrt to rcu_dereference() of perf_event_ctxp
1190 */ 1189 */
1191 task->perf_counter_ctxp = next_ctx; 1190 task->perf_event_ctxp = next_ctx;
1192 next->perf_counter_ctxp = ctx; 1191 next->perf_event_ctxp = ctx;
1193 ctx->task = next; 1192 ctx->task = next;
1194 next_ctx->task = task; 1193 next_ctx->task = task;
1195 do_switch = 0; 1194 do_switch = 0;
1196 1195
1197 perf_counter_sync_stat(ctx, next_ctx); 1196 perf_event_sync_stat(ctx, next_ctx);
1198 } 1197 }
1199 spin_unlock(&next_ctx->lock); 1198 spin_unlock(&next_ctx->lock);
1200 spin_unlock(&ctx->lock); 1199 spin_unlock(&ctx->lock);
@@ -1202,7 +1201,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1202 rcu_read_unlock(); 1201 rcu_read_unlock();
1203 1202
1204 if (do_switch) { 1203 if (do_switch) {
1205 __perf_counter_sched_out(ctx, cpuctx); 1204 __perf_event_sched_out(ctx, cpuctx);
1206 cpuctx->task_ctx = NULL; 1205 cpuctx->task_ctx = NULL;
1207 } 1206 }
1208} 1207}
@@ -1210,7 +1209,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1210/* 1209/*
1211 * Called with IRQs disabled 1210 * Called with IRQs disabled
1212 */ 1211 */
1213static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) 1212static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1214{ 1213{
1215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1214 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1216 1215
@@ -1220,28 +1219,28 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1220 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1219 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1221 return; 1220 return;
1222 1221
1223 __perf_counter_sched_out(ctx, cpuctx); 1222 __perf_event_sched_out(ctx, cpuctx);
1224 cpuctx->task_ctx = NULL; 1223 cpuctx->task_ctx = NULL;
1225} 1224}
1226 1225
1227/* 1226/*
1228 * Called with IRQs disabled 1227 * Called with IRQs disabled
1229 */ 1228 */
1230static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) 1229static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1231{ 1230{
1232 __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 1231 __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1233} 1232}
1234 1233
1235static void 1234static void
1236__perf_counter_sched_in(struct perf_counter_context *ctx, 1235__perf_event_sched_in(struct perf_event_context *ctx,
1237 struct perf_cpu_context *cpuctx, int cpu) 1236 struct perf_cpu_context *cpuctx, int cpu)
1238{ 1237{
1239 struct perf_counter *counter; 1238 struct perf_event *event;
1240 int can_add_hw = 1; 1239 int can_add_hw = 1;
1241 1240
1242 spin_lock(&ctx->lock); 1241 spin_lock(&ctx->lock);
1243 ctx->is_active = 1; 1242 ctx->is_active = 1;
1244 if (likely(!ctx->nr_counters)) 1243 if (likely(!ctx->nr_events))
1245 goto out; 1244 goto out;
1246 1245
1247 ctx->timestamp = perf_clock(); 1246 ctx->timestamp = perf_clock();
@@ -1252,52 +1251,52 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1252 * First go through the list and put on any pinned groups 1251 * First go through the list and put on any pinned groups
1253 * in order to give them the best chance of going on. 1252 * in order to give them the best chance of going on.
1254 */ 1253 */
1255 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1254 list_for_each_entry(event, &ctx->group_list, group_entry) {
1256 if (counter->state <= PERF_COUNTER_STATE_OFF || 1255 if (event->state <= PERF_EVENT_STATE_OFF ||
1257 !counter->attr.pinned) 1256 !event->attr.pinned)
1258 continue; 1257 continue;
1259 if (counter->cpu != -1 && counter->cpu != cpu) 1258 if (event->cpu != -1 && event->cpu != cpu)
1260 continue; 1259 continue;
1261 1260
1262 if (counter != counter->group_leader) 1261 if (event != event->group_leader)
1263 counter_sched_in(counter, cpuctx, ctx, cpu); 1262 event_sched_in(event, cpuctx, ctx, cpu);
1264 else { 1263 else {
1265 if (group_can_go_on(counter, cpuctx, 1)) 1264 if (group_can_go_on(event, cpuctx, 1))
1266 group_sched_in(counter, cpuctx, ctx, cpu); 1265 group_sched_in(event, cpuctx, ctx, cpu);
1267 } 1266 }
1268 1267
1269 /* 1268 /*
1270 * If this pinned group hasn't been scheduled, 1269 * If this pinned group hasn't been scheduled,
1271 * put it in error state. 1270 * put it in error state.
1272 */ 1271 */
1273 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1272 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1274 update_group_times(counter); 1273 update_group_times(event);
1275 counter->state = PERF_COUNTER_STATE_ERROR; 1274 event->state = PERF_EVENT_STATE_ERROR;
1276 } 1275 }
1277 } 1276 }
1278 1277
1279 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1278 list_for_each_entry(event, &ctx->group_list, group_entry) {
1280 /* 1279 /*
1281 * Ignore counters in OFF or ERROR state, and 1280 * Ignore events in OFF or ERROR state, and
1282 * ignore pinned counters since we did them already. 1281 * ignore pinned events since we did them already.
1283 */ 1282 */
1284 if (counter->state <= PERF_COUNTER_STATE_OFF || 1283 if (event->state <= PERF_EVENT_STATE_OFF ||
1285 counter->attr.pinned) 1284 event->attr.pinned)
1286 continue; 1285 continue;
1287 1286
1288 /* 1287 /*
1289 * Listen to the 'cpu' scheduling filter constraint 1288 * Listen to the 'cpu' scheduling filter constraint
1290 * of counters: 1289 * of events:
1291 */ 1290 */
1292 if (counter->cpu != -1 && counter->cpu != cpu) 1291 if (event->cpu != -1 && event->cpu != cpu)
1293 continue; 1292 continue;
1294 1293
1295 if (counter != counter->group_leader) { 1294 if (event != event->group_leader) {
1296 if (counter_sched_in(counter, cpuctx, ctx, cpu)) 1295 if (event_sched_in(event, cpuctx, ctx, cpu))
1297 can_add_hw = 0; 1296 can_add_hw = 0;
1298 } else { 1297 } else {
1299 if (group_can_go_on(counter, cpuctx, can_add_hw)) { 1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1300 if (group_sched_in(counter, cpuctx, ctx, cpu)) 1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1301 can_add_hw = 0; 1300 can_add_hw = 0;
1302 } 1301 }
1303 } 1302 }
@@ -1308,48 +1307,48 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1308} 1307}
1309 1308
1310/* 1309/*
1311 * Called from scheduler to add the counters of the current task 1310 * Called from scheduler to add the events of the current task
1312 * with interrupts disabled. 1311 * with interrupts disabled.
1313 * 1312 *
1314 * We restore the counter value and then enable it. 1313 * We restore the event value and then enable it.
1315 * 1314 *
1316 * This does not protect us against NMI, but enable() 1315 * This does not protect us against NMI, but enable()
1317 * sets the enabled bit in the control field of counter _before_ 1316 * sets the enabled bit in the control field of event _before_
1318 * accessing the counter control register. If a NMI hits, then it will 1317 * accessing the event control register. If a NMI hits, then it will
1319 * keep the counter running. 1318 * keep the event running.
1320 */ 1319 */
1321void perf_counter_task_sched_in(struct task_struct *task, int cpu) 1320void perf_event_task_sched_in(struct task_struct *task, int cpu)
1322{ 1321{
1323 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1322 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1324 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1323 struct perf_event_context *ctx = task->perf_event_ctxp;
1325 1324
1326 if (likely(!ctx)) 1325 if (likely(!ctx))
1327 return; 1326 return;
1328 if (cpuctx->task_ctx == ctx) 1327 if (cpuctx->task_ctx == ctx)
1329 return; 1328 return;
1330 __perf_counter_sched_in(ctx, cpuctx, cpu); 1329 __perf_event_sched_in(ctx, cpuctx, cpu);
1331 cpuctx->task_ctx = ctx; 1330 cpuctx->task_ctx = ctx;
1332} 1331}
1333 1332
1334static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1333static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1335{ 1334{
1336 struct perf_counter_context *ctx = &cpuctx->ctx; 1335 struct perf_event_context *ctx = &cpuctx->ctx;
1337 1336
1338 __perf_counter_sched_in(ctx, cpuctx, cpu); 1337 __perf_event_sched_in(ctx, cpuctx, cpu);
1339} 1338}
1340 1339
1341#define MAX_INTERRUPTS (~0ULL) 1340#define MAX_INTERRUPTS (~0ULL)
1342 1341
1343static void perf_log_throttle(struct perf_counter *counter, int enable); 1342static void perf_log_throttle(struct perf_event *event, int enable);
1344 1343
1345static void perf_adjust_period(struct perf_counter *counter, u64 events) 1344static void perf_adjust_period(struct perf_event *event, u64 events)
1346{ 1345{
1347 struct hw_perf_counter *hwc = &counter->hw; 1346 struct hw_perf_event *hwc = &event->hw;
1348 u64 period, sample_period; 1347 u64 period, sample_period;
1349 s64 delta; 1348 s64 delta;
1350 1349
1351 events *= hwc->sample_period; 1350 events *= hwc->sample_period;
1352 period = div64_u64(events, counter->attr.sample_freq); 1351 period = div64_u64(events, event->attr.sample_freq);
1353 1352
1354 delta = (s64)(period - hwc->sample_period); 1353 delta = (s64)(period - hwc->sample_period);
1355 delta = (delta + 7) / 8; /* low pass filter */ 1354 delta = (delta + 7) / 8; /* low pass filter */
@@ -1362,39 +1361,39 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
1362 hwc->sample_period = sample_period; 1361 hwc->sample_period = sample_period;
1363} 1362}
1364 1363
1365static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) 1364static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1366{ 1365{
1367 struct perf_counter *counter; 1366 struct perf_event *event;
1368 struct hw_perf_counter *hwc; 1367 struct hw_perf_event *hwc;
1369 u64 interrupts, freq; 1368 u64 interrupts, freq;
1370 1369
1371 spin_lock(&ctx->lock); 1370 spin_lock(&ctx->lock);
1372 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1371 list_for_each_entry(event, &ctx->group_list, group_entry) {
1373 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 1372 if (event->state != PERF_EVENT_STATE_ACTIVE)
1374 continue; 1373 continue;
1375 1374
1376 hwc = &counter->hw; 1375 hwc = &event->hw;
1377 1376
1378 interrupts = hwc->interrupts; 1377 interrupts = hwc->interrupts;
1379 hwc->interrupts = 0; 1378 hwc->interrupts = 0;
1380 1379
1381 /* 1380 /*
1382 * unthrottle counters on the tick 1381 * unthrottle events on the tick
1383 */ 1382 */
1384 if (interrupts == MAX_INTERRUPTS) { 1383 if (interrupts == MAX_INTERRUPTS) {
1385 perf_log_throttle(counter, 1); 1384 perf_log_throttle(event, 1);
1386 counter->pmu->unthrottle(counter); 1385 event->pmu->unthrottle(event);
1387 interrupts = 2*sysctl_perf_counter_sample_rate/HZ; 1386 interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1388 } 1387 }
1389 1388
1390 if (!counter->attr.freq || !counter->attr.sample_freq) 1389 if (!event->attr.freq || !event->attr.sample_freq)
1391 continue; 1390 continue;
1392 1391
1393 /* 1392 /*
1394 * if the specified freq < HZ then we need to skip ticks 1393 * if the specified freq < HZ then we need to skip ticks
1395 */ 1394 */
1396 if (counter->attr.sample_freq < HZ) { 1395 if (event->attr.sample_freq < HZ) {
1397 freq = counter->attr.sample_freq; 1396 freq = event->attr.sample_freq;
1398 1397
1399 hwc->freq_count += freq; 1398 hwc->freq_count += freq;
1400 hwc->freq_interrupts += interrupts; 1399 hwc->freq_interrupts += interrupts;
@@ -1408,7 +1407,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1408 } else 1407 } else
1409 freq = HZ; 1408 freq = HZ;
1410 1409
1411 perf_adjust_period(counter, freq * interrupts); 1410 perf_adjust_period(event, freq * interrupts);
1412 1411
1413 /* 1412 /*
1414 * In order to avoid being stalled by an (accidental) huge 1413 * In order to avoid being stalled by an (accidental) huge
@@ -1417,9 +1416,9 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1417 */ 1416 */
1418 if (!interrupts) { 1417 if (!interrupts) {
1419 perf_disable(); 1418 perf_disable();
1420 counter->pmu->disable(counter); 1419 event->pmu->disable(event);
1421 atomic64_set(&hwc->period_left, 0); 1420 atomic64_set(&hwc->period_left, 0);
1422 counter->pmu->enable(counter); 1421 event->pmu->enable(event);
1423 perf_enable(); 1422 perf_enable();
1424 } 1423 }
1425 } 1424 }
@@ -1427,22 +1426,22 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1427} 1426}
1428 1427
1429/* 1428/*
1430 * Round-robin a context's counters: 1429 * Round-robin a context's events:
1431 */ 1430 */
1432static void rotate_ctx(struct perf_counter_context *ctx) 1431static void rotate_ctx(struct perf_event_context *ctx)
1433{ 1432{
1434 struct perf_counter *counter; 1433 struct perf_event *event;
1435 1434
1436 if (!ctx->nr_counters) 1435 if (!ctx->nr_events)
1437 return; 1436 return;
1438 1437
1439 spin_lock(&ctx->lock); 1438 spin_lock(&ctx->lock);
1440 /* 1439 /*
1441 * Rotate the first entry last (works just fine for group counters too): 1440 * Rotate the first entry last (works just fine for group events too):
1442 */ 1441 */
1443 perf_disable(); 1442 perf_disable();
1444 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1443 list_for_each_entry(event, &ctx->group_list, group_entry) {
1445 list_move_tail(&counter->list_entry, &ctx->counter_list); 1444 list_move_tail(&event->group_entry, &ctx->group_list);
1446 break; 1445 break;
1447 } 1446 }
1448 perf_enable(); 1447 perf_enable();
@@ -1450,93 +1449,93 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1450 spin_unlock(&ctx->lock); 1449 spin_unlock(&ctx->lock);
1451} 1450}
1452 1451
1453void perf_counter_task_tick(struct task_struct *curr, int cpu) 1452void perf_event_task_tick(struct task_struct *curr, int cpu)
1454{ 1453{
1455 struct perf_cpu_context *cpuctx; 1454 struct perf_cpu_context *cpuctx;
1456 struct perf_counter_context *ctx; 1455 struct perf_event_context *ctx;
1457 1456
1458 if (!atomic_read(&nr_counters)) 1457 if (!atomic_read(&nr_events))
1459 return; 1458 return;
1460 1459
1461 cpuctx = &per_cpu(perf_cpu_context, cpu); 1460 cpuctx = &per_cpu(perf_cpu_context, cpu);
1462 ctx = curr->perf_counter_ctxp; 1461 ctx = curr->perf_event_ctxp;
1463 1462
1464 perf_ctx_adjust_freq(&cpuctx->ctx); 1463 perf_ctx_adjust_freq(&cpuctx->ctx);
1465 if (ctx) 1464 if (ctx)
1466 perf_ctx_adjust_freq(ctx); 1465 perf_ctx_adjust_freq(ctx);
1467 1466
1468 perf_counter_cpu_sched_out(cpuctx); 1467 perf_event_cpu_sched_out(cpuctx);
1469 if (ctx) 1468 if (ctx)
1470 __perf_counter_task_sched_out(ctx); 1469 __perf_event_task_sched_out(ctx);
1471 1470
1472 rotate_ctx(&cpuctx->ctx); 1471 rotate_ctx(&cpuctx->ctx);
1473 if (ctx) 1472 if (ctx)
1474 rotate_ctx(ctx); 1473 rotate_ctx(ctx);
1475 1474
1476 perf_counter_cpu_sched_in(cpuctx, cpu); 1475 perf_event_cpu_sched_in(cpuctx, cpu);
1477 if (ctx) 1476 if (ctx)
1478 perf_counter_task_sched_in(curr, cpu); 1477 perf_event_task_sched_in(curr, cpu);
1479} 1478}
1480 1479
1481/* 1480/*
1482 * Enable all of a task's counters that have been marked enable-on-exec. 1481 * Enable all of a task's events that have been marked enable-on-exec.
1483 * This expects task == current. 1482 * This expects task == current.
1484 */ 1483 */
1485static void perf_counter_enable_on_exec(struct task_struct *task) 1484static void perf_event_enable_on_exec(struct task_struct *task)
1486{ 1485{
1487 struct perf_counter_context *ctx; 1486 struct perf_event_context *ctx;
1488 struct perf_counter *counter; 1487 struct perf_event *event;
1489 unsigned long flags; 1488 unsigned long flags;
1490 int enabled = 0; 1489 int enabled = 0;
1491 1490
1492 local_irq_save(flags); 1491 local_irq_save(flags);
1493 ctx = task->perf_counter_ctxp; 1492 ctx = task->perf_event_ctxp;
1494 if (!ctx || !ctx->nr_counters) 1493 if (!ctx || !ctx->nr_events)
1495 goto out; 1494 goto out;
1496 1495
1497 __perf_counter_task_sched_out(ctx); 1496 __perf_event_task_sched_out(ctx);
1498 1497
1499 spin_lock(&ctx->lock); 1498 spin_lock(&ctx->lock);
1500 1499
1501 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1500 list_for_each_entry(event, &ctx->group_list, group_entry) {
1502 if (!counter->attr.enable_on_exec) 1501 if (!event->attr.enable_on_exec)
1503 continue; 1502 continue;
1504 counter->attr.enable_on_exec = 0; 1503 event->attr.enable_on_exec = 0;
1505 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 1504 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1506 continue; 1505 continue;
1507 __perf_counter_mark_enabled(counter, ctx); 1506 __perf_event_mark_enabled(event, ctx);
1508 enabled = 1; 1507 enabled = 1;
1509 } 1508 }
1510 1509
1511 /* 1510 /*
1512 * Unclone this context if we enabled any counter. 1511 * Unclone this context if we enabled any event.
1513 */ 1512 */
1514 if (enabled) 1513 if (enabled)
1515 unclone_ctx(ctx); 1514 unclone_ctx(ctx);
1516 1515
1517 spin_unlock(&ctx->lock); 1516 spin_unlock(&ctx->lock);
1518 1517
1519 perf_counter_task_sched_in(task, smp_processor_id()); 1518 perf_event_task_sched_in(task, smp_processor_id());
1520 out: 1519 out:
1521 local_irq_restore(flags); 1520 local_irq_restore(flags);
1522} 1521}
1523 1522
1524/* 1523/*
1525 * Cross CPU call to read the hardware counter 1524 * Cross CPU call to read the hardware event
1526 */ 1525 */
1527static void __perf_counter_read(void *info) 1526static void __perf_event_read(void *info)
1528{ 1527{
1529 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1530 struct perf_counter *counter = info; 1529 struct perf_event *event = info;
1531 struct perf_counter_context *ctx = counter->ctx; 1530 struct perf_event_context *ctx = event->ctx;
1532 unsigned long flags; 1531 unsigned long flags;
1533 1532
1534 /* 1533 /*
1535 * If this is a task context, we need to check whether it is 1534 * If this is a task context, we need to check whether it is
1536 * the current task context of this cpu. If not it has been 1535 * the current task context of this cpu. If not it has been
1537 * scheduled out before the smp call arrived. In that case 1536 * scheduled out before the smp call arrived. In that case
1538 * counter->count would have been updated to a recent sample 1537 * event->count would have been updated to a recent sample
1539 * when the counter was scheduled out. 1538 * when the event was scheduled out.
1540 */ 1539 */
1541 if (ctx->task && cpuctx->task_ctx != ctx) 1540 if (ctx->task && cpuctx->task_ctx != ctx)
1542 return; 1541 return;
@@ -1544,56 +1543,56 @@ static void __perf_counter_read(void *info)
1544 local_irq_save(flags); 1543 local_irq_save(flags);
1545 if (ctx->is_active) 1544 if (ctx->is_active)
1546 update_context_time(ctx); 1545 update_context_time(ctx);
1547 counter->pmu->read(counter); 1546 event->pmu->read(event);
1548 update_counter_times(counter); 1547 update_event_times(event);
1549 local_irq_restore(flags); 1548 local_irq_restore(flags);
1550} 1549}
1551 1550
1552static u64 perf_counter_read(struct perf_counter *counter) 1551static u64 perf_event_read(struct perf_event *event)
1553{ 1552{
1554 /* 1553 /*
1555 * If counter is enabled and currently active on a CPU, update the 1554 * If event is enabled and currently active on a CPU, update the
1556 * value in the counter structure: 1555 * value in the event structure:
1557 */ 1556 */
1558 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 1557 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1559 smp_call_function_single(counter->oncpu, 1558 smp_call_function_single(event->oncpu,
1560 __perf_counter_read, counter, 1); 1559 __perf_event_read, event, 1);
1561 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1560 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1562 update_counter_times(counter); 1561 update_event_times(event);
1563 } 1562 }
1564 1563
1565 return atomic64_read(&counter->count); 1564 return atomic64_read(&event->count);
1566} 1565}
1567 1566
1568/* 1567/*
1569 * Initialize the perf_counter context in a task_struct: 1568 * Initialize the perf_event context in a task_struct:
1570 */ 1569 */
1571static void 1570static void
1572__perf_counter_init_context(struct perf_counter_context *ctx, 1571__perf_event_init_context(struct perf_event_context *ctx,
1573 struct task_struct *task) 1572 struct task_struct *task)
1574{ 1573{
1575 memset(ctx, 0, sizeof(*ctx)); 1574 memset(ctx, 0, sizeof(*ctx));
1576 spin_lock_init(&ctx->lock); 1575 spin_lock_init(&ctx->lock);
1577 mutex_init(&ctx->mutex); 1576 mutex_init(&ctx->mutex);
1578 INIT_LIST_HEAD(&ctx->counter_list); 1577 INIT_LIST_HEAD(&ctx->group_list);
1579 INIT_LIST_HEAD(&ctx->event_list); 1578 INIT_LIST_HEAD(&ctx->event_list);
1580 atomic_set(&ctx->refcount, 1); 1579 atomic_set(&ctx->refcount, 1);
1581 ctx->task = task; 1580 ctx->task = task;
1582} 1581}
1583 1582
1584static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1583static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1585{ 1584{
1586 struct perf_counter_context *ctx; 1585 struct perf_event_context *ctx;
1587 struct perf_cpu_context *cpuctx; 1586 struct perf_cpu_context *cpuctx;
1588 struct task_struct *task; 1587 struct task_struct *task;
1589 unsigned long flags; 1588 unsigned long flags;
1590 int err; 1589 int err;
1591 1590
1592 /* 1591 /*
1593 * If cpu is not a wildcard then this is a percpu counter: 1592 * If cpu is not a wildcard then this is a percpu event:
1594 */ 1593 */
1595 if (cpu != -1) { 1594 if (cpu != -1) {
1596 /* Must be root to operate on a CPU counter: */ 1595 /* Must be root to operate on a CPU event: */
1597 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1596 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1598 return ERR_PTR(-EACCES); 1597 return ERR_PTR(-EACCES);
1599 1598
@@ -1601,7 +1600,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1601 return ERR_PTR(-EINVAL); 1600 return ERR_PTR(-EINVAL);
1602 1601
1603 /* 1602 /*
1604 * We could be clever and allow to attach a counter to an 1603 * We could be clever and allow to attach a event to an
1605 * offline CPU and activate it when the CPU comes up, but 1604 * offline CPU and activate it when the CPU comes up, but
1606 * that's for later. 1605 * that's for later.
1607 */ 1606 */
@@ -1628,7 +1627,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1628 return ERR_PTR(-ESRCH); 1627 return ERR_PTR(-ESRCH);
1629 1628
1630 /* 1629 /*
1631 * Can't attach counters to a dying task. 1630 * Can't attach events to a dying task.
1632 */ 1631 */
1633 err = -ESRCH; 1632 err = -ESRCH;
1634 if (task->flags & PF_EXITING) 1633 if (task->flags & PF_EXITING)
@@ -1647,13 +1646,13 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1647 } 1646 }
1648 1647
1649 if (!ctx) { 1648 if (!ctx) {
1650 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 1649 ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1651 err = -ENOMEM; 1650 err = -ENOMEM;
1652 if (!ctx) 1651 if (!ctx)
1653 goto errout; 1652 goto errout;
1654 __perf_counter_init_context(ctx, task); 1653 __perf_event_init_context(ctx, task);
1655 get_ctx(ctx); 1654 get_ctx(ctx);
1656 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { 1655 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1657 /* 1656 /*
1658 * We raced with some other task; use 1657 * We raced with some other task; use
1659 * the context they set. 1658 * the context they set.
@@ -1672,42 +1671,42 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1672 return ERR_PTR(err); 1671 return ERR_PTR(err);
1673} 1672}
1674 1673
1675static void free_counter_rcu(struct rcu_head *head) 1674static void free_event_rcu(struct rcu_head *head)
1676{ 1675{
1677 struct perf_counter *counter; 1676 struct perf_event *event;
1678 1677
1679 counter = container_of(head, struct perf_counter, rcu_head); 1678 event = container_of(head, struct perf_event, rcu_head);
1680 if (counter->ns) 1679 if (event->ns)
1681 put_pid_ns(counter->ns); 1680 put_pid_ns(event->ns);
1682 kfree(counter); 1681 kfree(event);
1683} 1682}
1684 1683
1685static void perf_pending_sync(struct perf_counter *counter); 1684static void perf_pending_sync(struct perf_event *event);
1686 1685
1687static void free_counter(struct perf_counter *counter) 1686static void free_event(struct perf_event *event)
1688{ 1687{
1689 perf_pending_sync(counter); 1688 perf_pending_sync(event);
1690 1689
1691 if (!counter->parent) { 1690 if (!event->parent) {
1692 atomic_dec(&nr_counters); 1691 atomic_dec(&nr_events);
1693 if (counter->attr.mmap) 1692 if (event->attr.mmap)
1694 atomic_dec(&nr_mmap_counters); 1693 atomic_dec(&nr_mmap_events);
1695 if (counter->attr.comm) 1694 if (event->attr.comm)
1696 atomic_dec(&nr_comm_counters); 1695 atomic_dec(&nr_comm_events);
1697 if (counter->attr.task) 1696 if (event->attr.task)
1698 atomic_dec(&nr_task_counters); 1697 atomic_dec(&nr_task_events);
1699 } 1698 }
1700 1699
1701 if (counter->output) { 1700 if (event->output) {
1702 fput(counter->output->filp); 1701 fput(event->output->filp);
1703 counter->output = NULL; 1702 event->output = NULL;
1704 } 1703 }
1705 1704
1706 if (counter->destroy) 1705 if (event->destroy)
1707 counter->destroy(counter); 1706 event->destroy(event);
1708 1707
1709 put_ctx(counter->ctx); 1708 put_ctx(event->ctx);
1710 call_rcu(&counter->rcu_head, free_counter_rcu); 1709 call_rcu(&event->rcu_head, free_event_rcu);
1711} 1710}
1712 1711
1713/* 1712/*
@@ -1715,43 +1714,43 @@ static void free_counter(struct perf_counter *counter)
1715 */ 1714 */
1716static int perf_release(struct inode *inode, struct file *file) 1715static int perf_release(struct inode *inode, struct file *file)
1717{ 1716{
1718 struct perf_counter *counter = file->private_data; 1717 struct perf_event *event = file->private_data;
1719 struct perf_counter_context *ctx = counter->ctx; 1718 struct perf_event_context *ctx = event->ctx;
1720 1719
1721 file->private_data = NULL; 1720 file->private_data = NULL;
1722 1721
1723 WARN_ON_ONCE(ctx->parent_ctx); 1722 WARN_ON_ONCE(ctx->parent_ctx);
1724 mutex_lock(&ctx->mutex); 1723 mutex_lock(&ctx->mutex);
1725 perf_counter_remove_from_context(counter); 1724 perf_event_remove_from_context(event);
1726 mutex_unlock(&ctx->mutex); 1725 mutex_unlock(&ctx->mutex);
1727 1726
1728 mutex_lock(&counter->owner->perf_counter_mutex); 1727 mutex_lock(&event->owner->perf_event_mutex);
1729 list_del_init(&counter->owner_entry); 1728 list_del_init(&event->owner_entry);
1730 mutex_unlock(&counter->owner->perf_counter_mutex); 1729 mutex_unlock(&event->owner->perf_event_mutex);
1731 put_task_struct(counter->owner); 1730 put_task_struct(event->owner);
1732 1731
1733 free_counter(counter); 1732 free_event(event);
1734 1733
1735 return 0; 1734 return 0;
1736} 1735}
1737 1736
1738static int perf_counter_read_size(struct perf_counter *counter) 1737static int perf_event_read_size(struct perf_event *event)
1739{ 1738{
1740 int entry = sizeof(u64); /* value */ 1739 int entry = sizeof(u64); /* value */
1741 int size = 0; 1740 int size = 0;
1742 int nr = 1; 1741 int nr = 1;
1743 1742
1744 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1743 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1745 size += sizeof(u64); 1744 size += sizeof(u64);
1746 1745
1747 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1746 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1748 size += sizeof(u64); 1747 size += sizeof(u64);
1749 1748
1750 if (counter->attr.read_format & PERF_FORMAT_ID) 1749 if (event->attr.read_format & PERF_FORMAT_ID)
1751 entry += sizeof(u64); 1750 entry += sizeof(u64);
1752 1751
1753 if (counter->attr.read_format & PERF_FORMAT_GROUP) { 1752 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1754 nr += counter->group_leader->nr_siblings; 1753 nr += event->group_leader->nr_siblings;
1755 size += sizeof(u64); 1754 size += sizeof(u64);
1756 } 1755 }
1757 1756
@@ -1760,27 +1759,27 @@ static int perf_counter_read_size(struct perf_counter *counter)
1760 return size; 1759 return size;
1761} 1760}
1762 1761
1763static u64 perf_counter_read_value(struct perf_counter *counter) 1762static u64 perf_event_read_value(struct perf_event *event)
1764{ 1763{
1765 struct perf_counter *child; 1764 struct perf_event *child;
1766 u64 total = 0; 1765 u64 total = 0;
1767 1766
1768 total += perf_counter_read(counter); 1767 total += perf_event_read(event);
1769 list_for_each_entry(child, &counter->child_list, child_list) 1768 list_for_each_entry(child, &event->child_list, child_list)
1770 total += perf_counter_read(child); 1769 total += perf_event_read(child);
1771 1770
1772 return total; 1771 return total;
1773} 1772}
1774 1773
1775static int perf_counter_read_entry(struct perf_counter *counter, 1774static int perf_event_read_entry(struct perf_event *event,
1776 u64 read_format, char __user *buf) 1775 u64 read_format, char __user *buf)
1777{ 1776{
1778 int n = 0, count = 0; 1777 int n = 0, count = 0;
1779 u64 values[2]; 1778 u64 values[2];
1780 1779
1781 values[n++] = perf_counter_read_value(counter); 1780 values[n++] = perf_event_read_value(event);
1782 if (read_format & PERF_FORMAT_ID) 1781 if (read_format & PERF_FORMAT_ID)
1783 values[n++] = primary_counter_id(counter); 1782 values[n++] = primary_event_id(event);
1784 1783
1785 count = n * sizeof(u64); 1784 count = n * sizeof(u64);
1786 1785
@@ -1790,10 +1789,10 @@ static int perf_counter_read_entry(struct perf_counter *counter,
1790 return count; 1789 return count;
1791} 1790}
1792 1791
1793static int perf_counter_read_group(struct perf_counter *counter, 1792static int perf_event_read_group(struct perf_event *event,
1794 u64 read_format, char __user *buf) 1793 u64 read_format, char __user *buf)
1795{ 1794{
1796 struct perf_counter *leader = counter->group_leader, *sub; 1795 struct perf_event *leader = event->group_leader, *sub;
1797 int n = 0, size = 0, err = -EFAULT; 1796 int n = 0, size = 0, err = -EFAULT;
1798 u64 values[3]; 1797 u64 values[3];
1799 1798
@@ -1812,14 +1811,14 @@ static int perf_counter_read_group(struct perf_counter *counter,
1812 if (copy_to_user(buf, values, size)) 1811 if (copy_to_user(buf, values, size))
1813 return -EFAULT; 1812 return -EFAULT;
1814 1813
1815 err = perf_counter_read_entry(leader, read_format, buf + size); 1814 err = perf_event_read_entry(leader, read_format, buf + size);
1816 if (err < 0) 1815 if (err < 0)
1817 return err; 1816 return err;
1818 1817
1819 size += err; 1818 size += err;
1820 1819
1821 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1822 err = perf_counter_read_entry(sub, read_format, 1821 err = perf_event_read_entry(sub, read_format,
1823 buf + size); 1822 buf + size);
1824 if (err < 0) 1823 if (err < 0)
1825 return err; 1824 return err;
@@ -1830,23 +1829,23 @@ static int perf_counter_read_group(struct perf_counter *counter,
1830 return size; 1829 return size;
1831} 1830}
1832 1831
1833static int perf_counter_read_one(struct perf_counter *counter, 1832static int perf_event_read_one(struct perf_event *event,
1834 u64 read_format, char __user *buf) 1833 u64 read_format, char __user *buf)
1835{ 1834{
1836 u64 values[4]; 1835 u64 values[4];
1837 int n = 0; 1836 int n = 0;
1838 1837
1839 values[n++] = perf_counter_read_value(counter); 1838 values[n++] = perf_event_read_value(event);
1840 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1839 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1841 values[n++] = counter->total_time_enabled + 1840 values[n++] = event->total_time_enabled +
1842 atomic64_read(&counter->child_total_time_enabled); 1841 atomic64_read(&event->child_total_time_enabled);
1843 } 1842 }
1844 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1845 values[n++] = counter->total_time_running + 1844 values[n++] = event->total_time_running +
1846 atomic64_read(&counter->child_total_time_running); 1845 atomic64_read(&event->child_total_time_running);
1847 } 1846 }
1848 if (read_format & PERF_FORMAT_ID) 1847 if (read_format & PERF_FORMAT_ID)
1849 values[n++] = primary_counter_id(counter); 1848 values[n++] = primary_event_id(event);
1850 1849
1851 if (copy_to_user(buf, values, n * sizeof(u64))) 1850 if (copy_to_user(buf, values, n * sizeof(u64)))
1852 return -EFAULT; 1851 return -EFAULT;
@@ -1855,32 +1854,32 @@ static int perf_counter_read_one(struct perf_counter *counter,
1855} 1854}
1856 1855
1857/* 1856/*
1858 * Read the performance counter - simple non blocking version for now 1857 * Read the performance event - simple non blocking version for now
1859 */ 1858 */
1860static ssize_t 1859static ssize_t
1861perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1860perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1862{ 1861{
1863 u64 read_format = counter->attr.read_format; 1862 u64 read_format = event->attr.read_format;
1864 int ret; 1863 int ret;
1865 1864
1866 /* 1865 /*
1867 * Return end-of-file for a read on a counter that is in 1866 * Return end-of-file for a read on a event that is in
1868 * error state (i.e. because it was pinned but it couldn't be 1867 * error state (i.e. because it was pinned but it couldn't be
1869 * scheduled on to the CPU at some point). 1868 * scheduled on to the CPU at some point).
1870 */ 1869 */
1871 if (counter->state == PERF_COUNTER_STATE_ERROR) 1870 if (event->state == PERF_EVENT_STATE_ERROR)
1872 return 0; 1871 return 0;
1873 1872
1874 if (count < perf_counter_read_size(counter)) 1873 if (count < perf_event_read_size(event))
1875 return -ENOSPC; 1874 return -ENOSPC;
1876 1875
1877 WARN_ON_ONCE(counter->ctx->parent_ctx); 1876 WARN_ON_ONCE(event->ctx->parent_ctx);
1878 mutex_lock(&counter->child_mutex); 1877 mutex_lock(&event->child_mutex);
1879 if (read_format & PERF_FORMAT_GROUP) 1878 if (read_format & PERF_FORMAT_GROUP)
1880 ret = perf_counter_read_group(counter, read_format, buf); 1879 ret = perf_event_read_group(event, read_format, buf);
1881 else 1880 else
1882 ret = perf_counter_read_one(counter, read_format, buf); 1881 ret = perf_event_read_one(event, read_format, buf);
1883 mutex_unlock(&counter->child_mutex); 1882 mutex_unlock(&event->child_mutex);
1884 1883
1885 return ret; 1884 return ret;
1886} 1885}
@@ -1888,79 +1887,79 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1888static ssize_t 1887static ssize_t
1889perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 1888perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1890{ 1889{
1891 struct perf_counter *counter = file->private_data; 1890 struct perf_event *event = file->private_data;
1892 1891
1893 return perf_read_hw(counter, buf, count); 1892 return perf_read_hw(event, buf, count);
1894} 1893}
1895 1894
1896static unsigned int perf_poll(struct file *file, poll_table *wait) 1895static unsigned int perf_poll(struct file *file, poll_table *wait)
1897{ 1896{
1898 struct perf_counter *counter = file->private_data; 1897 struct perf_event *event = file->private_data;
1899 struct perf_mmap_data *data; 1898 struct perf_mmap_data *data;
1900 unsigned int events = POLL_HUP; 1899 unsigned int events = POLL_HUP;
1901 1900
1902 rcu_read_lock(); 1901 rcu_read_lock();
1903 data = rcu_dereference(counter->data); 1902 data = rcu_dereference(event->data);
1904 if (data) 1903 if (data)
1905 events = atomic_xchg(&data->poll, 0); 1904 events = atomic_xchg(&data->poll, 0);
1906 rcu_read_unlock(); 1905 rcu_read_unlock();
1907 1906
1908 poll_wait(file, &counter->waitq, wait); 1907 poll_wait(file, &event->waitq, wait);
1909 1908
1910 return events; 1909 return events;
1911} 1910}
1912 1911
1913static void perf_counter_reset(struct perf_counter *counter) 1912static void perf_event_reset(struct perf_event *event)
1914{ 1913{
1915 (void)perf_counter_read(counter); 1914 (void)perf_event_read(event);
1916 atomic64_set(&counter->count, 0); 1915 atomic64_set(&event->count, 0);
1917 perf_counter_update_userpage(counter); 1916 perf_event_update_userpage(event);
1918} 1917}
1919 1918
1920/* 1919/*
1921 * Holding the top-level counter's child_mutex means that any 1920 * Holding the top-level event's child_mutex means that any
1922 * descendant process that has inherited this counter will block 1921 * descendant process that has inherited this event will block
1923 * in sync_child_counter if it goes to exit, thus satisfying the 1922 * in sync_child_event if it goes to exit, thus satisfying the
1924 * task existence requirements of perf_counter_enable/disable. 1923 * task existence requirements of perf_event_enable/disable.
1925 */ 1924 */
1926static void perf_counter_for_each_child(struct perf_counter *counter, 1925static void perf_event_for_each_child(struct perf_event *event,
1927 void (*func)(struct perf_counter *)) 1926 void (*func)(struct perf_event *))
1928{ 1927{
1929 struct perf_counter *child; 1928 struct perf_event *child;
1930 1929
1931 WARN_ON_ONCE(counter->ctx->parent_ctx); 1930 WARN_ON_ONCE(event->ctx->parent_ctx);
1932 mutex_lock(&counter->child_mutex); 1931 mutex_lock(&event->child_mutex);
1933 func(counter); 1932 func(event);
1934 list_for_each_entry(child, &counter->child_list, child_list) 1933 list_for_each_entry(child, &event->child_list, child_list)
1935 func(child); 1934 func(child);
1936 mutex_unlock(&counter->child_mutex); 1935 mutex_unlock(&event->child_mutex);
1937} 1936}
1938 1937
1939static void perf_counter_for_each(struct perf_counter *counter, 1938static void perf_event_for_each(struct perf_event *event,
1940 void (*func)(struct perf_counter *)) 1939 void (*func)(struct perf_event *))
1941{ 1940{
1942 struct perf_counter_context *ctx = counter->ctx; 1941 struct perf_event_context *ctx = event->ctx;
1943 struct perf_counter *sibling; 1942 struct perf_event *sibling;
1944 1943
1945 WARN_ON_ONCE(ctx->parent_ctx); 1944 WARN_ON_ONCE(ctx->parent_ctx);
1946 mutex_lock(&ctx->mutex); 1945 mutex_lock(&ctx->mutex);
1947 counter = counter->group_leader; 1946 event = event->group_leader;
1948 1947
1949 perf_counter_for_each_child(counter, func); 1948 perf_event_for_each_child(event, func);
1950 func(counter); 1949 func(event);
1951 list_for_each_entry(sibling, &counter->sibling_list, list_entry) 1950 list_for_each_entry(sibling, &event->sibling_list, group_entry)
1952 perf_counter_for_each_child(counter, func); 1951 perf_event_for_each_child(event, func);
1953 mutex_unlock(&ctx->mutex); 1952 mutex_unlock(&ctx->mutex);
1954} 1953}
1955 1954
1956static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1955static int perf_event_period(struct perf_event *event, u64 __user *arg)
1957{ 1956{
1958 struct perf_counter_context *ctx = counter->ctx; 1957 struct perf_event_context *ctx = event->ctx;
1959 unsigned long size; 1958 unsigned long size;
1960 int ret = 0; 1959 int ret = 0;
1961 u64 value; 1960 u64 value;
1962 1961
1963 if (!counter->attr.sample_period) 1962 if (!event->attr.sample_period)
1964 return -EINVAL; 1963 return -EINVAL;
1965 1964
1966 size = copy_from_user(&value, arg, sizeof(value)); 1965 size = copy_from_user(&value, arg, sizeof(value));
@@ -1971,16 +1970,16 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1971 return -EINVAL; 1970 return -EINVAL;
1972 1971
1973 spin_lock_irq(&ctx->lock); 1972 spin_lock_irq(&ctx->lock);
1974 if (counter->attr.freq) { 1973 if (event->attr.freq) {
1975 if (value > sysctl_perf_counter_sample_rate) { 1974 if (value > sysctl_perf_event_sample_rate) {
1976 ret = -EINVAL; 1975 ret = -EINVAL;
1977 goto unlock; 1976 goto unlock;
1978 } 1977 }
1979 1978
1980 counter->attr.sample_freq = value; 1979 event->attr.sample_freq = value;
1981 } else { 1980 } else {
1982 counter->attr.sample_period = value; 1981 event->attr.sample_period = value;
1983 counter->hw.sample_period = value; 1982 event->hw.sample_period = value;
1984 } 1983 }
1985unlock: 1984unlock:
1986 spin_unlock_irq(&ctx->lock); 1985 spin_unlock_irq(&ctx->lock);
@@ -1988,80 +1987,80 @@ unlock:
1988 return ret; 1987 return ret;
1989} 1988}
1990 1989
1991int perf_counter_set_output(struct perf_counter *counter, int output_fd); 1990int perf_event_set_output(struct perf_event *event, int output_fd);
1992 1991
1993static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1992static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1994{ 1993{
1995 struct perf_counter *counter = file->private_data; 1994 struct perf_event *event = file->private_data;
1996 void (*func)(struct perf_counter *); 1995 void (*func)(struct perf_event *);
1997 u32 flags = arg; 1996 u32 flags = arg;
1998 1997
1999 switch (cmd) { 1998 switch (cmd) {
2000 case PERF_COUNTER_IOC_ENABLE: 1999 case PERF_EVENT_IOC_ENABLE:
2001 func = perf_counter_enable; 2000 func = perf_event_enable;
2002 break; 2001 break;
2003 case PERF_COUNTER_IOC_DISABLE: 2002 case PERF_EVENT_IOC_DISABLE:
2004 func = perf_counter_disable; 2003 func = perf_event_disable;
2005 break; 2004 break;
2006 case PERF_COUNTER_IOC_RESET: 2005 case PERF_EVENT_IOC_RESET:
2007 func = perf_counter_reset; 2006 func = perf_event_reset;
2008 break; 2007 break;
2009 2008
2010 case PERF_COUNTER_IOC_REFRESH: 2009 case PERF_EVENT_IOC_REFRESH:
2011 return perf_counter_refresh(counter, arg); 2010 return perf_event_refresh(event, arg);
2012 2011
2013 case PERF_COUNTER_IOC_PERIOD: 2012 case PERF_EVENT_IOC_PERIOD:
2014 return perf_counter_period(counter, (u64 __user *)arg); 2013 return perf_event_period(event, (u64 __user *)arg);
2015 2014
2016 case PERF_COUNTER_IOC_SET_OUTPUT: 2015 case PERF_EVENT_IOC_SET_OUTPUT:
2017 return perf_counter_set_output(counter, arg); 2016 return perf_event_set_output(event, arg);
2018 2017
2019 default: 2018 default:
2020 return -ENOTTY; 2019 return -ENOTTY;
2021 } 2020 }
2022 2021
2023 if (flags & PERF_IOC_FLAG_GROUP) 2022 if (flags & PERF_IOC_FLAG_GROUP)
2024 perf_counter_for_each(counter, func); 2023 perf_event_for_each(event, func);
2025 else 2024 else
2026 perf_counter_for_each_child(counter, func); 2025 perf_event_for_each_child(event, func);
2027 2026
2028 return 0; 2027 return 0;
2029} 2028}
2030 2029
2031int perf_counter_task_enable(void) 2030int perf_event_task_enable(void)
2032{ 2031{
2033 struct perf_counter *counter; 2032 struct perf_event *event;
2034 2033
2035 mutex_lock(&current->perf_counter_mutex); 2034 mutex_lock(&current->perf_event_mutex);
2036 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2035 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2037 perf_counter_for_each_child(counter, perf_counter_enable); 2036 perf_event_for_each_child(event, perf_event_enable);
2038 mutex_unlock(&current->perf_counter_mutex); 2037 mutex_unlock(&current->perf_event_mutex);
2039 2038
2040 return 0; 2039 return 0;
2041} 2040}
2042 2041
2043int perf_counter_task_disable(void) 2042int perf_event_task_disable(void)
2044{ 2043{
2045 struct perf_counter *counter; 2044 struct perf_event *event;
2046 2045
2047 mutex_lock(&current->perf_counter_mutex); 2046 mutex_lock(&current->perf_event_mutex);
2048 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2047 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2049 perf_counter_for_each_child(counter, perf_counter_disable); 2048 perf_event_for_each_child(event, perf_event_disable);
2050 mutex_unlock(&current->perf_counter_mutex); 2049 mutex_unlock(&current->perf_event_mutex);
2051 2050
2052 return 0; 2051 return 0;
2053} 2052}
2054 2053
2055#ifndef PERF_COUNTER_INDEX_OFFSET 2054#ifndef PERF_EVENT_INDEX_OFFSET
2056# define PERF_COUNTER_INDEX_OFFSET 0 2055# define PERF_EVENT_INDEX_OFFSET 0
2057#endif 2056#endif
2058 2057
2059static int perf_counter_index(struct perf_counter *counter) 2058static int perf_event_index(struct perf_event *event)
2060{ 2059{
2061 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2060 if (event->state != PERF_EVENT_STATE_ACTIVE)
2062 return 0; 2061 return 0;
2063 2062
2064 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; 2063 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2065} 2064}
2066 2065
2067/* 2066/*
@@ -2069,13 +2068,13 @@ static int perf_counter_index(struct perf_counter *counter)
2069 * the seqlock logic goes bad. We can not serialize this because the arch 2068 * the seqlock logic goes bad. We can not serialize this because the arch
2070 * code calls this from NMI context. 2069 * code calls this from NMI context.
2071 */ 2070 */
2072void perf_counter_update_userpage(struct perf_counter *counter) 2071void perf_event_update_userpage(struct perf_event *event)
2073{ 2072{
2074 struct perf_counter_mmap_page *userpg; 2073 struct perf_event_mmap_page *userpg;
2075 struct perf_mmap_data *data; 2074 struct perf_mmap_data *data;
2076 2075
2077 rcu_read_lock(); 2076 rcu_read_lock();
2078 data = rcu_dereference(counter->data); 2077 data = rcu_dereference(event->data);
2079 if (!data) 2078 if (!data)
2080 goto unlock; 2079 goto unlock;
2081 2080
@@ -2088,16 +2087,16 @@ void perf_counter_update_userpage(struct perf_counter *counter)
2088 preempt_disable(); 2087 preempt_disable();
2089 ++userpg->lock; 2088 ++userpg->lock;
2090 barrier(); 2089 barrier();
2091 userpg->index = perf_counter_index(counter); 2090 userpg->index = perf_event_index(event);
2092 userpg->offset = atomic64_read(&counter->count); 2091 userpg->offset = atomic64_read(&event->count);
2093 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 2092 if (event->state == PERF_EVENT_STATE_ACTIVE)
2094 userpg->offset -= atomic64_read(&counter->hw.prev_count); 2093 userpg->offset -= atomic64_read(&event->hw.prev_count);
2095 2094
2096 userpg->time_enabled = counter->total_time_enabled + 2095 userpg->time_enabled = event->total_time_enabled +
2097 atomic64_read(&counter->child_total_time_enabled); 2096 atomic64_read(&event->child_total_time_enabled);
2098 2097
2099 userpg->time_running = counter->total_time_running + 2098 userpg->time_running = event->total_time_running +
2100 atomic64_read(&counter->child_total_time_running); 2099 atomic64_read(&event->child_total_time_running);
2101 2100
2102 barrier(); 2101 barrier();
2103 ++userpg->lock; 2102 ++userpg->lock;
@@ -2108,7 +2107,7 @@ unlock:
2108 2107
2109static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2110{ 2109{
2111 struct perf_counter *counter = vma->vm_file->private_data; 2110 struct perf_event *event = vma->vm_file->private_data;
2112 struct perf_mmap_data *data; 2111 struct perf_mmap_data *data;
2113 int ret = VM_FAULT_SIGBUS; 2112 int ret = VM_FAULT_SIGBUS;
2114 2113
@@ -2119,7 +2118,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2119 } 2118 }
2120 2119
2121 rcu_read_lock(); 2120 rcu_read_lock();
2122 data = rcu_dereference(counter->data); 2121 data = rcu_dereference(event->data);
2123 if (!data) 2122 if (!data)
2124 goto unlock; 2123 goto unlock;
2125 2124
@@ -2148,13 +2147,13 @@ unlock:
2148 return ret; 2147 return ret;
2149} 2148}
2150 2149
2151static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) 2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2152{ 2151{
2153 struct perf_mmap_data *data; 2152 struct perf_mmap_data *data;
2154 unsigned long size; 2153 unsigned long size;
2155 int i; 2154 int i;
2156 2155
2157 WARN_ON(atomic_read(&counter->mmap_count)); 2156 WARN_ON(atomic_read(&event->mmap_count));
2158 2157
2159 size = sizeof(struct perf_mmap_data); 2158 size = sizeof(struct perf_mmap_data);
2160 size += nr_pages * sizeof(void *); 2159 size += nr_pages * sizeof(void *);
@@ -2176,14 +2175,14 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2176 data->nr_pages = nr_pages; 2175 data->nr_pages = nr_pages;
2177 atomic_set(&data->lock, -1); 2176 atomic_set(&data->lock, -1);
2178 2177
2179 if (counter->attr.watermark) { 2178 if (event->attr.watermark) {
2180 data->watermark = min_t(long, PAGE_SIZE * nr_pages, 2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2181 counter->attr.wakeup_watermark); 2180 event->attr.wakeup_watermark);
2182 } 2181 }
2183 if (!data->watermark) 2182 if (!data->watermark)
2184 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); 2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2185 2184
2186 rcu_assign_pointer(counter->data, data); 2185 rcu_assign_pointer(event->data, data);
2187 2186
2188 return 0; 2187 return 0;
2189 2188
@@ -2222,35 +2221,35 @@ static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2222 kfree(data); 2221 kfree(data);
2223} 2222}
2224 2223
2225static void perf_mmap_data_free(struct perf_counter *counter) 2224static void perf_mmap_data_free(struct perf_event *event)
2226{ 2225{
2227 struct perf_mmap_data *data = counter->data; 2226 struct perf_mmap_data *data = event->data;
2228 2227
2229 WARN_ON(atomic_read(&counter->mmap_count)); 2228 WARN_ON(atomic_read(&event->mmap_count));
2230 2229
2231 rcu_assign_pointer(counter->data, NULL); 2230 rcu_assign_pointer(event->data, NULL);
2232 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2231 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2233} 2232}
2234 2233
2235static void perf_mmap_open(struct vm_area_struct *vma) 2234static void perf_mmap_open(struct vm_area_struct *vma)
2236{ 2235{
2237 struct perf_counter *counter = vma->vm_file->private_data; 2236 struct perf_event *event = vma->vm_file->private_data;
2238 2237
2239 atomic_inc(&counter->mmap_count); 2238 atomic_inc(&event->mmap_count);
2240} 2239}
2241 2240
2242static void perf_mmap_close(struct vm_area_struct *vma) 2241static void perf_mmap_close(struct vm_area_struct *vma)
2243{ 2242{
2244 struct perf_counter *counter = vma->vm_file->private_data; 2243 struct perf_event *event = vma->vm_file->private_data;
2245 2244
2246 WARN_ON_ONCE(counter->ctx->parent_ctx); 2245 WARN_ON_ONCE(event->ctx->parent_ctx);
2247 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { 2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2248 struct user_struct *user = current_user(); 2247 struct user_struct *user = current_user();
2249 2248
2250 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); 2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm);
2251 vma->vm_mm->locked_vm -= counter->data->nr_locked; 2250 vma->vm_mm->locked_vm -= event->data->nr_locked;
2252 perf_mmap_data_free(counter); 2251 perf_mmap_data_free(event);
2253 mutex_unlock(&counter->mmap_mutex); 2252 mutex_unlock(&event->mmap_mutex);
2254 } 2253 }
2255} 2254}
2256 2255
@@ -2263,7 +2262,7 @@ static struct vm_operations_struct perf_mmap_vmops = {
2263 2262
2264static int perf_mmap(struct file *file, struct vm_area_struct *vma) 2263static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2265{ 2264{
2266 struct perf_counter *counter = file->private_data; 2265 struct perf_event *event = file->private_data;
2267 unsigned long user_locked, user_lock_limit; 2266 unsigned long user_locked, user_lock_limit;
2268 struct user_struct *user = current_user(); 2267 struct user_struct *user = current_user();
2269 unsigned long locked, lock_limit; 2268 unsigned long locked, lock_limit;
@@ -2291,21 +2290,21 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2291 if (vma->vm_pgoff != 0) 2290 if (vma->vm_pgoff != 0)
2292 return -EINVAL; 2291 return -EINVAL;
2293 2292
2294 WARN_ON_ONCE(counter->ctx->parent_ctx); 2293 WARN_ON_ONCE(event->ctx->parent_ctx);
2295 mutex_lock(&counter->mmap_mutex); 2294 mutex_lock(&event->mmap_mutex);
2296 if (counter->output) { 2295 if (event->output) {
2297 ret = -EINVAL; 2296 ret = -EINVAL;
2298 goto unlock; 2297 goto unlock;
2299 } 2298 }
2300 2299
2301 if (atomic_inc_not_zero(&counter->mmap_count)) { 2300 if (atomic_inc_not_zero(&event->mmap_count)) {
2302 if (nr_pages != counter->data->nr_pages) 2301 if (nr_pages != event->data->nr_pages)
2303 ret = -EINVAL; 2302 ret = -EINVAL;
2304 goto unlock; 2303 goto unlock;
2305 } 2304 }
2306 2305
2307 user_extra = nr_pages + 1; 2306 user_extra = nr_pages + 1;
2308 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 2307 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2309 2308
2310 /* 2309 /*
2311 * Increase the limit linearly with more CPUs: 2310 * Increase the limit linearly with more CPUs:
@@ -2328,20 +2327,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2328 goto unlock; 2327 goto unlock;
2329 } 2328 }
2330 2329
2331 WARN_ON(counter->data); 2330 WARN_ON(event->data);
2332 ret = perf_mmap_data_alloc(counter, nr_pages); 2331 ret = perf_mmap_data_alloc(event, nr_pages);
2333 if (ret) 2332 if (ret)
2334 goto unlock; 2333 goto unlock;
2335 2334
2336 atomic_set(&counter->mmap_count, 1); 2335 atomic_set(&event->mmap_count, 1);
2337 atomic_long_add(user_extra, &user->locked_vm); 2336 atomic_long_add(user_extra, &user->locked_vm);
2338 vma->vm_mm->locked_vm += extra; 2337 vma->vm_mm->locked_vm += extra;
2339 counter->data->nr_locked = extra; 2338 event->data->nr_locked = extra;
2340 if (vma->vm_flags & VM_WRITE) 2339 if (vma->vm_flags & VM_WRITE)
2341 counter->data->writable = 1; 2340 event->data->writable = 1;
2342 2341
2343unlock: 2342unlock:
2344 mutex_unlock(&counter->mmap_mutex); 2343 mutex_unlock(&event->mmap_mutex);
2345 2344
2346 vma->vm_flags |= VM_RESERVED; 2345 vma->vm_flags |= VM_RESERVED;
2347 vma->vm_ops = &perf_mmap_vmops; 2346 vma->vm_ops = &perf_mmap_vmops;
@@ -2352,11 +2351,11 @@ unlock:
2352static int perf_fasync(int fd, struct file *filp, int on) 2351static int perf_fasync(int fd, struct file *filp, int on)
2353{ 2352{
2354 struct inode *inode = filp->f_path.dentry->d_inode; 2353 struct inode *inode = filp->f_path.dentry->d_inode;
2355 struct perf_counter *counter = filp->private_data; 2354 struct perf_event *event = filp->private_data;
2356 int retval; 2355 int retval;
2357 2356
2358 mutex_lock(&inode->i_mutex); 2357 mutex_lock(&inode->i_mutex);
2359 retval = fasync_helper(fd, filp, on, &counter->fasync); 2358 retval = fasync_helper(fd, filp, on, &event->fasync);
2360 mutex_unlock(&inode->i_mutex); 2359 mutex_unlock(&inode->i_mutex);
2361 2360
2362 if (retval < 0) 2361 if (retval < 0)
@@ -2376,19 +2375,19 @@ static const struct file_operations perf_fops = {
2376}; 2375};
2377 2376
2378/* 2377/*
2379 * Perf counter wakeup 2378 * Perf event wakeup
2380 * 2379 *
2381 * If there's data, ensure we set the poll() state and publish everything 2380 * If there's data, ensure we set the poll() state and publish everything
2382 * to user-space before waking everybody up. 2381 * to user-space before waking everybody up.
2383 */ 2382 */
2384 2383
2385void perf_counter_wakeup(struct perf_counter *counter) 2384void perf_event_wakeup(struct perf_event *event)
2386{ 2385{
2387 wake_up_all(&counter->waitq); 2386 wake_up_all(&event->waitq);
2388 2387
2389 if (counter->pending_kill) { 2388 if (event->pending_kill) {
2390 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); 2389 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2391 counter->pending_kill = 0; 2390 event->pending_kill = 0;
2392 } 2391 }
2393} 2392}
2394 2393
@@ -2401,19 +2400,19 @@ void perf_counter_wakeup(struct perf_counter *counter)
2401 * single linked list and use cmpxchg() to add entries lockless. 2400 * single linked list and use cmpxchg() to add entries lockless.
2402 */ 2401 */
2403 2402
2404static void perf_pending_counter(struct perf_pending_entry *entry) 2403static void perf_pending_event(struct perf_pending_entry *entry)
2405{ 2404{
2406 struct perf_counter *counter = container_of(entry, 2405 struct perf_event *event = container_of(entry,
2407 struct perf_counter, pending); 2406 struct perf_event, pending);
2408 2407
2409 if (counter->pending_disable) { 2408 if (event->pending_disable) {
2410 counter->pending_disable = 0; 2409 event->pending_disable = 0;
2411 __perf_counter_disable(counter); 2410 __perf_event_disable(event);
2412 } 2411 }
2413 2412
2414 if (counter->pending_wakeup) { 2413 if (event->pending_wakeup) {
2415 counter->pending_wakeup = 0; 2414 event->pending_wakeup = 0;
2416 perf_counter_wakeup(counter); 2415 perf_event_wakeup(event);
2417 } 2416 }
2418} 2417}
2419 2418
@@ -2439,7 +2438,7 @@ static void perf_pending_queue(struct perf_pending_entry *entry,
2439 entry->next = *head; 2438 entry->next = *head;
2440 } while (cmpxchg(head, entry->next, entry) != entry->next); 2439 } while (cmpxchg(head, entry->next, entry) != entry->next);
2441 2440
2442 set_perf_counter_pending(); 2441 set_perf_event_pending();
2443 2442
2444 put_cpu_var(perf_pending_head); 2443 put_cpu_var(perf_pending_head);
2445} 2444}
@@ -2472,7 +2471,7 @@ static int __perf_pending_run(void)
2472 return nr; 2471 return nr;
2473} 2472}
2474 2473
2475static inline int perf_not_pending(struct perf_counter *counter) 2474static inline int perf_not_pending(struct perf_event *event)
2476{ 2475{
2477 /* 2476 /*
2478 * If we flush on whatever cpu we run, there is a chance we don't 2477 * If we flush on whatever cpu we run, there is a chance we don't
@@ -2487,15 +2486,15 @@ static inline int perf_not_pending(struct perf_counter *counter)
2487 * so that we do not miss the wakeup. -- see perf_pending_handle() 2486 * so that we do not miss the wakeup. -- see perf_pending_handle()
2488 */ 2487 */
2489 smp_rmb(); 2488 smp_rmb();
2490 return counter->pending.next == NULL; 2489 return event->pending.next == NULL;
2491} 2490}
2492 2491
2493static void perf_pending_sync(struct perf_counter *counter) 2492static void perf_pending_sync(struct perf_event *event)
2494{ 2493{
2495 wait_event(counter->waitq, perf_not_pending(counter)); 2494 wait_event(event->waitq, perf_not_pending(event));
2496} 2495}
2497 2496
2498void perf_counter_do_pending(void) 2497void perf_event_do_pending(void)
2499{ 2498{
2500 __perf_pending_run(); 2499 __perf_pending_run();
2501} 2500}
@@ -2536,25 +2535,25 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2536 atomic_set(&handle->data->poll, POLL_IN); 2535 atomic_set(&handle->data->poll, POLL_IN);
2537 2536
2538 if (handle->nmi) { 2537 if (handle->nmi) {
2539 handle->counter->pending_wakeup = 1; 2538 handle->event->pending_wakeup = 1;
2540 perf_pending_queue(&handle->counter->pending, 2539 perf_pending_queue(&handle->event->pending,
2541 perf_pending_counter); 2540 perf_pending_event);
2542 } else 2541 } else
2543 perf_counter_wakeup(handle->counter); 2542 perf_event_wakeup(handle->event);
2544} 2543}
2545 2544
2546/* 2545/*
2547 * Curious locking construct. 2546 * Curious locking construct.
2548 * 2547 *
2549 * We need to ensure a later event doesn't publish a head when a former 2548 * We need to ensure a later event_id doesn't publish a head when a former
2550 * event isn't done writing. However since we need to deal with NMIs we 2549 * event_id isn't done writing. However since we need to deal with NMIs we
2551 * cannot fully serialize things. 2550 * cannot fully serialize things.
2552 * 2551 *
2553 * What we do is serialize between CPUs so we only have to deal with NMI 2552 * What we do is serialize between CPUs so we only have to deal with NMI
2554 * nesting on a single CPU. 2553 * nesting on a single CPU.
2555 * 2554 *
2556 * We only publish the head (and generate a wakeup) when the outer-most 2555 * We only publish the head (and generate a wakeup) when the outer-most
2557 * event completes. 2556 * event_id completes.
2558 */ 2557 */
2559static void perf_output_lock(struct perf_output_handle *handle) 2558static void perf_output_lock(struct perf_output_handle *handle)
2560{ 2559{
@@ -2658,10 +2657,10 @@ void perf_output_copy(struct perf_output_handle *handle,
2658} 2657}
2659 2658
2660int perf_output_begin(struct perf_output_handle *handle, 2659int perf_output_begin(struct perf_output_handle *handle,
2661 struct perf_counter *counter, unsigned int size, 2660 struct perf_event *event, unsigned int size,
2662 int nmi, int sample) 2661 int nmi, int sample)
2663{ 2662{
2664 struct perf_counter *output_counter; 2663 struct perf_event *output_event;
2665 struct perf_mmap_data *data; 2664 struct perf_mmap_data *data;
2666 unsigned long tail, offset, head; 2665 unsigned long tail, offset, head;
2667 int have_lost; 2666 int have_lost;
@@ -2673,21 +2672,21 @@ int perf_output_begin(struct perf_output_handle *handle,
2673 2672
2674 rcu_read_lock(); 2673 rcu_read_lock();
2675 /* 2674 /*
2676 * For inherited counters we send all the output towards the parent. 2675 * For inherited events we send all the output towards the parent.
2677 */ 2676 */
2678 if (counter->parent) 2677 if (event->parent)
2679 counter = counter->parent; 2678 event = event->parent;
2680 2679
2681 output_counter = rcu_dereference(counter->output); 2680 output_event = rcu_dereference(event->output);
2682 if (output_counter) 2681 if (output_event)
2683 counter = output_counter; 2682 event = output_event;
2684 2683
2685 data = rcu_dereference(counter->data); 2684 data = rcu_dereference(event->data);
2686 if (!data) 2685 if (!data)
2687 goto out; 2686 goto out;
2688 2687
2689 handle->data = data; 2688 handle->data = data;
2690 handle->counter = counter; 2689 handle->event = event;
2691 handle->nmi = nmi; 2690 handle->nmi = nmi;
2692 handle->sample = sample; 2691 handle->sample = sample;
2693 2692
@@ -2721,10 +2720,10 @@ int perf_output_begin(struct perf_output_handle *handle,
2721 atomic_set(&data->wakeup, 1); 2720 atomic_set(&data->wakeup, 1);
2722 2721
2723 if (have_lost) { 2722 if (have_lost) {
2724 lost_event.header.type = PERF_EVENT_LOST; 2723 lost_event.header.type = PERF_RECORD_LOST;
2725 lost_event.header.misc = 0; 2724 lost_event.header.misc = 0;
2726 lost_event.header.size = sizeof(lost_event); 2725 lost_event.header.size = sizeof(lost_event);
2727 lost_event.id = counter->id; 2726 lost_event.id = event->id;
2728 lost_event.lost = atomic_xchg(&data->lost, 0); 2727 lost_event.lost = atomic_xchg(&data->lost, 0);
2729 2728
2730 perf_output_put(handle, lost_event); 2729 perf_output_put(handle, lost_event);
@@ -2743,10 +2742,10 @@ out:
2743 2742
2744void perf_output_end(struct perf_output_handle *handle) 2743void perf_output_end(struct perf_output_handle *handle)
2745{ 2744{
2746 struct perf_counter *counter = handle->counter; 2745 struct perf_event *event = handle->event;
2747 struct perf_mmap_data *data = handle->data; 2746 struct perf_mmap_data *data = handle->data;
2748 2747
2749 int wakeup_events = counter->attr.wakeup_events; 2748 int wakeup_events = event->attr.wakeup_events;
2750 2749
2751 if (handle->sample && wakeup_events) { 2750 if (handle->sample && wakeup_events) {
2752 int events = atomic_inc_return(&data->events); 2751 int events = atomic_inc_return(&data->events);
@@ -2760,58 +2759,58 @@ void perf_output_end(struct perf_output_handle *handle)
2760 rcu_read_unlock(); 2759 rcu_read_unlock();
2761} 2760}
2762 2761
2763static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) 2762static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2764{ 2763{
2765 /* 2764 /*
2766 * only top level counters have the pid namespace they were created in 2765 * only top level events have the pid namespace they were created in
2767 */ 2766 */
2768 if (counter->parent) 2767 if (event->parent)
2769 counter = counter->parent; 2768 event = event->parent;
2770 2769
2771 return task_tgid_nr_ns(p, counter->ns); 2770 return task_tgid_nr_ns(p, event->ns);
2772} 2771}
2773 2772
2774static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) 2773static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2775{ 2774{
2776 /* 2775 /*
2777 * only top level counters have the pid namespace they were created in 2776 * only top level events have the pid namespace they were created in
2778 */ 2777 */
2779 if (counter->parent) 2778 if (event->parent)
2780 counter = counter->parent; 2779 event = event->parent;
2781 2780
2782 return task_pid_nr_ns(p, counter->ns); 2781 return task_pid_nr_ns(p, event->ns);
2783} 2782}
2784 2783
2785static void perf_output_read_one(struct perf_output_handle *handle, 2784static void perf_output_read_one(struct perf_output_handle *handle,
2786 struct perf_counter *counter) 2785 struct perf_event *event)
2787{ 2786{
2788 u64 read_format = counter->attr.read_format; 2787 u64 read_format = event->attr.read_format;
2789 u64 values[4]; 2788 u64 values[4];
2790 int n = 0; 2789 int n = 0;
2791 2790
2792 values[n++] = atomic64_read(&counter->count); 2791 values[n++] = atomic64_read(&event->count);
2793 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2792 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2794 values[n++] = counter->total_time_enabled + 2793 values[n++] = event->total_time_enabled +
2795 atomic64_read(&counter->child_total_time_enabled); 2794 atomic64_read(&event->child_total_time_enabled);
2796 } 2795 }
2797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2796 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2798 values[n++] = counter->total_time_running + 2797 values[n++] = event->total_time_running +
2799 atomic64_read(&counter->child_total_time_running); 2798 atomic64_read(&event->child_total_time_running);
2800 } 2799 }
2801 if (read_format & PERF_FORMAT_ID) 2800 if (read_format & PERF_FORMAT_ID)
2802 values[n++] = primary_counter_id(counter); 2801 values[n++] = primary_event_id(event);
2803 2802
2804 perf_output_copy(handle, values, n * sizeof(u64)); 2803 perf_output_copy(handle, values, n * sizeof(u64));
2805} 2804}
2806 2805
2807/* 2806/*
2808 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. 2807 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2809 */ 2808 */
2810static void perf_output_read_group(struct perf_output_handle *handle, 2809static void perf_output_read_group(struct perf_output_handle *handle,
2811 struct perf_counter *counter) 2810 struct perf_event *event)
2812{ 2811{
2813 struct perf_counter *leader = counter->group_leader, *sub; 2812 struct perf_event *leader = event->group_leader, *sub;
2814 u64 read_format = counter->attr.read_format; 2813 u64 read_format = event->attr.read_format;
2815 u64 values[5]; 2814 u64 values[5];
2816 int n = 0; 2815 int n = 0;
2817 2816
@@ -2823,42 +2822,42 @@ static void perf_output_read_group(struct perf_output_handle *handle,
2823 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2822 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2824 values[n++] = leader->total_time_running; 2823 values[n++] = leader->total_time_running;
2825 2824
2826 if (leader != counter) 2825 if (leader != event)
2827 leader->pmu->read(leader); 2826 leader->pmu->read(leader);
2828 2827
2829 values[n++] = atomic64_read(&leader->count); 2828 values[n++] = atomic64_read(&leader->count);
2830 if (read_format & PERF_FORMAT_ID) 2829 if (read_format & PERF_FORMAT_ID)
2831 values[n++] = primary_counter_id(leader); 2830 values[n++] = primary_event_id(leader);
2832 2831
2833 perf_output_copy(handle, values, n * sizeof(u64)); 2832 perf_output_copy(handle, values, n * sizeof(u64));
2834 2833
2835 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 2834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2836 n = 0; 2835 n = 0;
2837 2836
2838 if (sub != counter) 2837 if (sub != event)
2839 sub->pmu->read(sub); 2838 sub->pmu->read(sub);
2840 2839
2841 values[n++] = atomic64_read(&sub->count); 2840 values[n++] = atomic64_read(&sub->count);
2842 if (read_format & PERF_FORMAT_ID) 2841 if (read_format & PERF_FORMAT_ID)
2843 values[n++] = primary_counter_id(sub); 2842 values[n++] = primary_event_id(sub);
2844 2843
2845 perf_output_copy(handle, values, n * sizeof(u64)); 2844 perf_output_copy(handle, values, n * sizeof(u64));
2846 } 2845 }
2847} 2846}
2848 2847
2849static void perf_output_read(struct perf_output_handle *handle, 2848static void perf_output_read(struct perf_output_handle *handle,
2850 struct perf_counter *counter) 2849 struct perf_event *event)
2851{ 2850{
2852 if (counter->attr.read_format & PERF_FORMAT_GROUP) 2851 if (event->attr.read_format & PERF_FORMAT_GROUP)
2853 perf_output_read_group(handle, counter); 2852 perf_output_read_group(handle, event);
2854 else 2853 else
2855 perf_output_read_one(handle, counter); 2854 perf_output_read_one(handle, event);
2856} 2855}
2857 2856
2858void perf_output_sample(struct perf_output_handle *handle, 2857void perf_output_sample(struct perf_output_handle *handle,
2859 struct perf_event_header *header, 2858 struct perf_event_header *header,
2860 struct perf_sample_data *data, 2859 struct perf_sample_data *data,
2861 struct perf_counter *counter) 2860 struct perf_event *event)
2862{ 2861{
2863 u64 sample_type = data->type; 2862 u64 sample_type = data->type;
2864 2863
@@ -2889,7 +2888,7 @@ void perf_output_sample(struct perf_output_handle *handle,
2889 perf_output_put(handle, data->period); 2888 perf_output_put(handle, data->period);
2890 2889
2891 if (sample_type & PERF_SAMPLE_READ) 2890 if (sample_type & PERF_SAMPLE_READ)
2892 perf_output_read(handle, counter); 2891 perf_output_read(handle, event);
2893 2892
2894 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2893 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2895 if (data->callchain) { 2894 if (data->callchain) {
@@ -2927,14 +2926,14 @@ void perf_output_sample(struct perf_output_handle *handle,
2927 2926
2928void perf_prepare_sample(struct perf_event_header *header, 2927void perf_prepare_sample(struct perf_event_header *header,
2929 struct perf_sample_data *data, 2928 struct perf_sample_data *data,
2930 struct perf_counter *counter, 2929 struct perf_event *event,
2931 struct pt_regs *regs) 2930 struct pt_regs *regs)
2932{ 2931{
2933 u64 sample_type = counter->attr.sample_type; 2932 u64 sample_type = event->attr.sample_type;
2934 2933
2935 data->type = sample_type; 2934 data->type = sample_type;
2936 2935
2937 header->type = PERF_EVENT_SAMPLE; 2936 header->type = PERF_RECORD_SAMPLE;
2938 header->size = sizeof(*header); 2937 header->size = sizeof(*header);
2939 2938
2940 header->misc = 0; 2939 header->misc = 0;
@@ -2948,8 +2947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
2948 2947
2949 if (sample_type & PERF_SAMPLE_TID) { 2948 if (sample_type & PERF_SAMPLE_TID) {
2950 /* namespace issues */ 2949 /* namespace issues */
2951 data->tid_entry.pid = perf_counter_pid(counter, current); 2950 data->tid_entry.pid = perf_event_pid(event, current);
2952 data->tid_entry.tid = perf_counter_tid(counter, current); 2951 data->tid_entry.tid = perf_event_tid(event, current);
2953 2952
2954 header->size += sizeof(data->tid_entry); 2953 header->size += sizeof(data->tid_entry);
2955 } 2954 }
@@ -2964,13 +2963,13 @@ void perf_prepare_sample(struct perf_event_header *header,
2964 header->size += sizeof(data->addr); 2963 header->size += sizeof(data->addr);
2965 2964
2966 if (sample_type & PERF_SAMPLE_ID) { 2965 if (sample_type & PERF_SAMPLE_ID) {
2967 data->id = primary_counter_id(counter); 2966 data->id = primary_event_id(event);
2968 2967
2969 header->size += sizeof(data->id); 2968 header->size += sizeof(data->id);
2970 } 2969 }
2971 2970
2972 if (sample_type & PERF_SAMPLE_STREAM_ID) { 2971 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2973 data->stream_id = counter->id; 2972 data->stream_id = event->id;
2974 2973
2975 header->size += sizeof(data->stream_id); 2974 header->size += sizeof(data->stream_id);
2976 } 2975 }
@@ -2986,7 +2985,7 @@ void perf_prepare_sample(struct perf_event_header *header,
2986 header->size += sizeof(data->period); 2985 header->size += sizeof(data->period);
2987 2986
2988 if (sample_type & PERF_SAMPLE_READ) 2987 if (sample_type & PERF_SAMPLE_READ)
2989 header->size += perf_counter_read_size(counter); 2988 header->size += perf_event_read_size(event);
2990 2989
2991 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2990 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2992 int size = 1; 2991 int size = 1;
@@ -3012,25 +3011,25 @@ void perf_prepare_sample(struct perf_event_header *header,
3012 } 3011 }
3013} 3012}
3014 3013
3015static void perf_counter_output(struct perf_counter *counter, int nmi, 3014static void perf_event_output(struct perf_event *event, int nmi,
3016 struct perf_sample_data *data, 3015 struct perf_sample_data *data,
3017 struct pt_regs *regs) 3016 struct pt_regs *regs)
3018{ 3017{
3019 struct perf_output_handle handle; 3018 struct perf_output_handle handle;
3020 struct perf_event_header header; 3019 struct perf_event_header header;
3021 3020
3022 perf_prepare_sample(&header, data, counter, regs); 3021 perf_prepare_sample(&header, data, event, regs);
3023 3022
3024 if (perf_output_begin(&handle, counter, header.size, nmi, 1)) 3023 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3025 return; 3024 return;
3026 3025
3027 perf_output_sample(&handle, &header, data, counter); 3026 perf_output_sample(&handle, &header, data, event);
3028 3027
3029 perf_output_end(&handle); 3028 perf_output_end(&handle);
3030} 3029}
3031 3030
3032/* 3031/*
3033 * read event 3032 * read event_id
3034 */ 3033 */
3035 3034
3036struct perf_read_event { 3035struct perf_read_event {
@@ -3041,27 +3040,27 @@ struct perf_read_event {
3041}; 3040};
3042 3041
3043static void 3042static void
3044perf_counter_read_event(struct perf_counter *counter, 3043perf_event_read_event(struct perf_event *event,
3045 struct task_struct *task) 3044 struct task_struct *task)
3046{ 3045{
3047 struct perf_output_handle handle; 3046 struct perf_output_handle handle;
3048 struct perf_read_event event = { 3047 struct perf_read_event read_event = {
3049 .header = { 3048 .header = {
3050 .type = PERF_EVENT_READ, 3049 .type = PERF_RECORD_READ,
3051 .misc = 0, 3050 .misc = 0,
3052 .size = sizeof(event) + perf_counter_read_size(counter), 3051 .size = sizeof(read_event) + perf_event_read_size(event),
3053 }, 3052 },
3054 .pid = perf_counter_pid(counter, task), 3053 .pid = perf_event_pid(event, task),
3055 .tid = perf_counter_tid(counter, task), 3054 .tid = perf_event_tid(event, task),
3056 }; 3055 };
3057 int ret; 3056 int ret;
3058 3057
3059 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3058 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3060 if (ret) 3059 if (ret)
3061 return; 3060 return;
3062 3061
3063 perf_output_put(&handle, event); 3062 perf_output_put(&handle, read_event);
3064 perf_output_read(&handle, counter); 3063 perf_output_read(&handle, event);
3065 3064
3066 perf_output_end(&handle); 3065 perf_output_end(&handle);
3067} 3066}
@@ -3074,7 +3073,7 @@ perf_counter_read_event(struct perf_counter *counter,
3074 3073
3075struct perf_task_event { 3074struct perf_task_event {
3076 struct task_struct *task; 3075 struct task_struct *task;
3077 struct perf_counter_context *task_ctx; 3076 struct perf_event_context *task_ctx;
3078 3077
3079 struct { 3078 struct {
3080 struct perf_event_header header; 3079 struct perf_event_header header;
@@ -3084,10 +3083,10 @@ struct perf_task_event {
3084 u32 tid; 3083 u32 tid;
3085 u32 ptid; 3084 u32 ptid;
3086 u64 time; 3085 u64 time;
3087 } event; 3086 } event_id;
3088}; 3087};
3089 3088
3090static void perf_counter_task_output(struct perf_counter *counter, 3089static void perf_event_task_output(struct perf_event *event,
3091 struct perf_task_event *task_event) 3090 struct perf_task_event *task_event)
3092{ 3091{
3093 struct perf_output_handle handle; 3092 struct perf_output_handle handle;
@@ -3095,85 +3094,85 @@ static void perf_counter_task_output(struct perf_counter *counter,
3095 struct task_struct *task = task_event->task; 3094 struct task_struct *task = task_event->task;
3096 int ret; 3095 int ret;
3097 3096
3098 size = task_event->event.header.size; 3097 size = task_event->event_id.header.size;
3099 ret = perf_output_begin(&handle, counter, size, 0, 0); 3098 ret = perf_output_begin(&handle, event, size, 0, 0);
3100 3099
3101 if (ret) 3100 if (ret)
3102 return; 3101 return;
3103 3102
3104 task_event->event.pid = perf_counter_pid(counter, task); 3103 task_event->event_id.pid = perf_event_pid(event, task);
3105 task_event->event.ppid = perf_counter_pid(counter, current); 3104 task_event->event_id.ppid = perf_event_pid(event, current);
3106 3105
3107 task_event->event.tid = perf_counter_tid(counter, task); 3106 task_event->event_id.tid = perf_event_tid(event, task);
3108 task_event->event.ptid = perf_counter_tid(counter, current); 3107 task_event->event_id.ptid = perf_event_tid(event, current);
3109 3108
3110 task_event->event.time = perf_clock(); 3109 task_event->event_id.time = perf_clock();
3111 3110
3112 perf_output_put(&handle, task_event->event); 3111 perf_output_put(&handle, task_event->event_id);
3113 3112
3114 perf_output_end(&handle); 3113 perf_output_end(&handle);
3115} 3114}
3116 3115
3117static int perf_counter_task_match(struct perf_counter *counter) 3116static int perf_event_task_match(struct perf_event *event)
3118{ 3117{
3119 if (counter->attr.comm || counter->attr.mmap || counter->attr.task) 3118 if (event->attr.comm || event->attr.mmap || event->attr.task)
3120 return 1; 3119 return 1;
3121 3120
3122 return 0; 3121 return 0;
3123} 3122}
3124 3123
3125static void perf_counter_task_ctx(struct perf_counter_context *ctx, 3124static void perf_event_task_ctx(struct perf_event_context *ctx,
3126 struct perf_task_event *task_event) 3125 struct perf_task_event *task_event)
3127{ 3126{
3128 struct perf_counter *counter; 3127 struct perf_event *event;
3129 3128
3130 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3131 return; 3130 return;
3132 3131
3133 rcu_read_lock(); 3132 rcu_read_lock();
3134 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3133 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3135 if (perf_counter_task_match(counter)) 3134 if (perf_event_task_match(event))
3136 perf_counter_task_output(counter, task_event); 3135 perf_event_task_output(event, task_event);
3137 } 3136 }
3138 rcu_read_unlock(); 3137 rcu_read_unlock();
3139} 3138}
3140 3139
3141static void perf_counter_task_event(struct perf_task_event *task_event) 3140static void perf_event_task_event(struct perf_task_event *task_event)
3142{ 3141{
3143 struct perf_cpu_context *cpuctx; 3142 struct perf_cpu_context *cpuctx;
3144 struct perf_counter_context *ctx = task_event->task_ctx; 3143 struct perf_event_context *ctx = task_event->task_ctx;
3145 3144
3146 cpuctx = &get_cpu_var(perf_cpu_context); 3145 cpuctx = &get_cpu_var(perf_cpu_context);
3147 perf_counter_task_ctx(&cpuctx->ctx, task_event); 3146 perf_event_task_ctx(&cpuctx->ctx, task_event);
3148 put_cpu_var(perf_cpu_context); 3147 put_cpu_var(perf_cpu_context);
3149 3148
3150 rcu_read_lock(); 3149 rcu_read_lock();
3151 if (!ctx) 3150 if (!ctx)
3152 ctx = rcu_dereference(task_event->task->perf_counter_ctxp); 3151 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3153 if (ctx) 3152 if (ctx)
3154 perf_counter_task_ctx(ctx, task_event); 3153 perf_event_task_ctx(ctx, task_event);
3155 rcu_read_unlock(); 3154 rcu_read_unlock();
3156} 3155}
3157 3156
3158static void perf_counter_task(struct task_struct *task, 3157static void perf_event_task(struct task_struct *task,
3159 struct perf_counter_context *task_ctx, 3158 struct perf_event_context *task_ctx,
3160 int new) 3159 int new)
3161{ 3160{
3162 struct perf_task_event task_event; 3161 struct perf_task_event task_event;
3163 3162
3164 if (!atomic_read(&nr_comm_counters) && 3163 if (!atomic_read(&nr_comm_events) &&
3165 !atomic_read(&nr_mmap_counters) && 3164 !atomic_read(&nr_mmap_events) &&
3166 !atomic_read(&nr_task_counters)) 3165 !atomic_read(&nr_task_events))
3167 return; 3166 return;
3168 3167
3169 task_event = (struct perf_task_event){ 3168 task_event = (struct perf_task_event){
3170 .task = task, 3169 .task = task,
3171 .task_ctx = task_ctx, 3170 .task_ctx = task_ctx,
3172 .event = { 3171 .event_id = {
3173 .header = { 3172 .header = {
3174 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, 3173 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3175 .misc = 0, 3174 .misc = 0,
3176 .size = sizeof(task_event.event), 3175 .size = sizeof(task_event.event_id),
3177 }, 3176 },
3178 /* .pid */ 3177 /* .pid */
3179 /* .ppid */ 3178 /* .ppid */
@@ -3182,12 +3181,12 @@ static void perf_counter_task(struct task_struct *task,
3182 }, 3181 },
3183 }; 3182 };
3184 3183
3185 perf_counter_task_event(&task_event); 3184 perf_event_task_event(&task_event);
3186} 3185}
3187 3186
3188void perf_counter_fork(struct task_struct *task) 3187void perf_event_fork(struct task_struct *task)
3189{ 3188{
3190 perf_counter_task(task, NULL, 1); 3189 perf_event_task(task, NULL, 1);
3191} 3190}
3192 3191
3193/* 3192/*
@@ -3204,56 +3203,56 @@ struct perf_comm_event {
3204 3203
3205 u32 pid; 3204 u32 pid;
3206 u32 tid; 3205 u32 tid;
3207 } event; 3206 } event_id;
3208}; 3207};
3209 3208
3210static void perf_counter_comm_output(struct perf_counter *counter, 3209static void perf_event_comm_output(struct perf_event *event,
3211 struct perf_comm_event *comm_event) 3210 struct perf_comm_event *comm_event)
3212{ 3211{
3213 struct perf_output_handle handle; 3212 struct perf_output_handle handle;
3214 int size = comm_event->event.header.size; 3213 int size = comm_event->event_id.header.size;
3215 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3214 int ret = perf_output_begin(&handle, event, size, 0, 0);
3216 3215
3217 if (ret) 3216 if (ret)
3218 return; 3217 return;
3219 3218
3220 comm_event->event.pid = perf_counter_pid(counter, comm_event->task); 3219 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3221 comm_event->event.tid = perf_counter_tid(counter, comm_event->task); 3220 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3222 3221
3223 perf_output_put(&handle, comm_event->event); 3222 perf_output_put(&handle, comm_event->event_id);
3224 perf_output_copy(&handle, comm_event->comm, 3223 perf_output_copy(&handle, comm_event->comm,
3225 comm_event->comm_size); 3224 comm_event->comm_size);
3226 perf_output_end(&handle); 3225 perf_output_end(&handle);
3227} 3226}
3228 3227
3229static int perf_counter_comm_match(struct perf_counter *counter) 3228static int perf_event_comm_match(struct perf_event *event)
3230{ 3229{
3231 if (counter->attr.comm) 3230 if (event->attr.comm)
3232 return 1; 3231 return 1;
3233 3232
3234 return 0; 3233 return 0;
3235} 3234}
3236 3235
3237static void perf_counter_comm_ctx(struct perf_counter_context *ctx, 3236static void perf_event_comm_ctx(struct perf_event_context *ctx,
3238 struct perf_comm_event *comm_event) 3237 struct perf_comm_event *comm_event)
3239{ 3238{
3240 struct perf_counter *counter; 3239 struct perf_event *event;
3241 3240
3242 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3241 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3243 return; 3242 return;
3244 3243
3245 rcu_read_lock(); 3244 rcu_read_lock();
3246 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3247 if (perf_counter_comm_match(counter)) 3246 if (perf_event_comm_match(event))
3248 perf_counter_comm_output(counter, comm_event); 3247 perf_event_comm_output(event, comm_event);
3249 } 3248 }
3250 rcu_read_unlock(); 3249 rcu_read_unlock();
3251} 3250}
3252 3251
3253static void perf_counter_comm_event(struct perf_comm_event *comm_event) 3252static void perf_event_comm_event(struct perf_comm_event *comm_event)
3254{ 3253{
3255 struct perf_cpu_context *cpuctx; 3254 struct perf_cpu_context *cpuctx;
3256 struct perf_counter_context *ctx; 3255 struct perf_event_context *ctx;
3257 unsigned int size; 3256 unsigned int size;
3258 char comm[TASK_COMM_LEN]; 3257 char comm[TASK_COMM_LEN];
3259 3258
@@ -3264,10 +3263,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3264 comm_event->comm = comm; 3263 comm_event->comm = comm;
3265 comm_event->comm_size = size; 3264 comm_event->comm_size = size;
3266 3265
3267 comm_event->event.header.size = sizeof(comm_event->event) + size; 3266 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3268 3267
3269 cpuctx = &get_cpu_var(perf_cpu_context); 3268 cpuctx = &get_cpu_var(perf_cpu_context);
3270 perf_counter_comm_ctx(&cpuctx->ctx, comm_event); 3269 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3271 put_cpu_var(perf_cpu_context); 3270 put_cpu_var(perf_cpu_context);
3272 3271
3273 rcu_read_lock(); 3272 rcu_read_lock();
@@ -3275,29 +3274,29 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3275 * doesn't really matter which of the child contexts the 3274 * doesn't really matter which of the child contexts the
3276 * events ends up in. 3275 * events ends up in.
3277 */ 3276 */
3278 ctx = rcu_dereference(current->perf_counter_ctxp); 3277 ctx = rcu_dereference(current->perf_event_ctxp);
3279 if (ctx) 3278 if (ctx)
3280 perf_counter_comm_ctx(ctx, comm_event); 3279 perf_event_comm_ctx(ctx, comm_event);
3281 rcu_read_unlock(); 3280 rcu_read_unlock();
3282} 3281}
3283 3282
3284void perf_counter_comm(struct task_struct *task) 3283void perf_event_comm(struct task_struct *task)
3285{ 3284{
3286 struct perf_comm_event comm_event; 3285 struct perf_comm_event comm_event;
3287 3286
3288 if (task->perf_counter_ctxp) 3287 if (task->perf_event_ctxp)
3289 perf_counter_enable_on_exec(task); 3288 perf_event_enable_on_exec(task);
3290 3289
3291 if (!atomic_read(&nr_comm_counters)) 3290 if (!atomic_read(&nr_comm_events))
3292 return; 3291 return;
3293 3292
3294 comm_event = (struct perf_comm_event){ 3293 comm_event = (struct perf_comm_event){
3295 .task = task, 3294 .task = task,
3296 /* .comm */ 3295 /* .comm */
3297 /* .comm_size */ 3296 /* .comm_size */
3298 .event = { 3297 .event_id = {
3299 .header = { 3298 .header = {
3300 .type = PERF_EVENT_COMM, 3299 .type = PERF_RECORD_COMM,
3301 .misc = 0, 3300 .misc = 0,
3302 /* .size */ 3301 /* .size */
3303 }, 3302 },
@@ -3306,7 +3305,7 @@ void perf_counter_comm(struct task_struct *task)
3306 }, 3305 },
3307 }; 3306 };
3308 3307
3309 perf_counter_comm_event(&comm_event); 3308 perf_event_comm_event(&comm_event);
3310} 3309}
3311 3310
3312/* 3311/*
@@ -3327,57 +3326,57 @@ struct perf_mmap_event {
3327 u64 start; 3326 u64 start;
3328 u64 len; 3327 u64 len;
3329 u64 pgoff; 3328 u64 pgoff;
3330 } event; 3329 } event_id;
3331}; 3330};
3332 3331
3333static void perf_counter_mmap_output(struct perf_counter *counter, 3332static void perf_event_mmap_output(struct perf_event *event,
3334 struct perf_mmap_event *mmap_event) 3333 struct perf_mmap_event *mmap_event)
3335{ 3334{
3336 struct perf_output_handle handle; 3335 struct perf_output_handle handle;
3337 int size = mmap_event->event.header.size; 3336 int size = mmap_event->event_id.header.size;
3338 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3337 int ret = perf_output_begin(&handle, event, size, 0, 0);
3339 3338
3340 if (ret) 3339 if (ret)
3341 return; 3340 return;
3342 3341
3343 mmap_event->event.pid = perf_counter_pid(counter, current); 3342 mmap_event->event_id.pid = perf_event_pid(event, current);
3344 mmap_event->event.tid = perf_counter_tid(counter, current); 3343 mmap_event->event_id.tid = perf_event_tid(event, current);
3345 3344
3346 perf_output_put(&handle, mmap_event->event); 3345 perf_output_put(&handle, mmap_event->event_id);
3347 perf_output_copy(&handle, mmap_event->file_name, 3346 perf_output_copy(&handle, mmap_event->file_name,
3348 mmap_event->file_size); 3347 mmap_event->file_size);
3349 perf_output_end(&handle); 3348 perf_output_end(&handle);
3350} 3349}
3351 3350
3352static int perf_counter_mmap_match(struct perf_counter *counter, 3351static int perf_event_mmap_match(struct perf_event *event,
3353 struct perf_mmap_event *mmap_event) 3352 struct perf_mmap_event *mmap_event)
3354{ 3353{
3355 if (counter->attr.mmap) 3354 if (event->attr.mmap)
3356 return 1; 3355 return 1;
3357 3356
3358 return 0; 3357 return 0;
3359} 3358}
3360 3359
3361static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, 3360static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3362 struct perf_mmap_event *mmap_event) 3361 struct perf_mmap_event *mmap_event)
3363{ 3362{
3364 struct perf_counter *counter; 3363 struct perf_event *event;
3365 3364
3366 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3365 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3367 return; 3366 return;
3368 3367
3369 rcu_read_lock(); 3368 rcu_read_lock();
3370 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3369 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3371 if (perf_counter_mmap_match(counter, mmap_event)) 3370 if (perf_event_mmap_match(event, mmap_event))
3372 perf_counter_mmap_output(counter, mmap_event); 3371 perf_event_mmap_output(event, mmap_event);
3373 } 3372 }
3374 rcu_read_unlock(); 3373 rcu_read_unlock();
3375} 3374}
3376 3375
3377static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) 3376static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3378{ 3377{
3379 struct perf_cpu_context *cpuctx; 3378 struct perf_cpu_context *cpuctx;
3380 struct perf_counter_context *ctx; 3379 struct perf_event_context *ctx;
3381 struct vm_area_struct *vma = mmap_event->vma; 3380 struct vm_area_struct *vma = mmap_event->vma;
3382 struct file *file = vma->vm_file; 3381 struct file *file = vma->vm_file;
3383 unsigned int size; 3382 unsigned int size;
@@ -3425,10 +3424,10 @@ got_name:
3425 mmap_event->file_name = name; 3424 mmap_event->file_name = name;
3426 mmap_event->file_size = size; 3425 mmap_event->file_size = size;
3427 3426
3428 mmap_event->event.header.size = sizeof(mmap_event->event) + size; 3427 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3429 3428
3430 cpuctx = &get_cpu_var(perf_cpu_context); 3429 cpuctx = &get_cpu_var(perf_cpu_context);
3431 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); 3430 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3432 put_cpu_var(perf_cpu_context); 3431 put_cpu_var(perf_cpu_context);
3433 3432
3434 rcu_read_lock(); 3433 rcu_read_lock();
@@ -3436,28 +3435,28 @@ got_name:
3436 * doesn't really matter which of the child contexts the 3435 * doesn't really matter which of the child contexts the
3437 * events ends up in. 3436 * events ends up in.
3438 */ 3437 */
3439 ctx = rcu_dereference(current->perf_counter_ctxp); 3438 ctx = rcu_dereference(current->perf_event_ctxp);
3440 if (ctx) 3439 if (ctx)
3441 perf_counter_mmap_ctx(ctx, mmap_event); 3440 perf_event_mmap_ctx(ctx, mmap_event);
3442 rcu_read_unlock(); 3441 rcu_read_unlock();
3443 3442
3444 kfree(buf); 3443 kfree(buf);
3445} 3444}
3446 3445
3447void __perf_counter_mmap(struct vm_area_struct *vma) 3446void __perf_event_mmap(struct vm_area_struct *vma)
3448{ 3447{
3449 struct perf_mmap_event mmap_event; 3448 struct perf_mmap_event mmap_event;
3450 3449
3451 if (!atomic_read(&nr_mmap_counters)) 3450 if (!atomic_read(&nr_mmap_events))
3452 return; 3451 return;
3453 3452
3454 mmap_event = (struct perf_mmap_event){ 3453 mmap_event = (struct perf_mmap_event){
3455 .vma = vma, 3454 .vma = vma,
3456 /* .file_name */ 3455 /* .file_name */
3457 /* .file_size */ 3456 /* .file_size */
3458 .event = { 3457 .event_id = {
3459 .header = { 3458 .header = {
3460 .type = PERF_EVENT_MMAP, 3459 .type = PERF_RECORD_MMAP,
3461 .misc = 0, 3460 .misc = 0,
3462 /* .size */ 3461 /* .size */
3463 }, 3462 },
@@ -3469,14 +3468,14 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3469 }, 3468 },
3470 }; 3469 };
3471 3470
3472 perf_counter_mmap_event(&mmap_event); 3471 perf_event_mmap_event(&mmap_event);
3473} 3472}
3474 3473
3475/* 3474/*
3476 * IRQ throttle logging 3475 * IRQ throttle logging
3477 */ 3476 */
3478 3477
3479static void perf_log_throttle(struct perf_counter *counter, int enable) 3478static void perf_log_throttle(struct perf_event *event, int enable)
3480{ 3479{
3481 struct perf_output_handle handle; 3480 struct perf_output_handle handle;
3482 int ret; 3481 int ret;
@@ -3488,19 +3487,19 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3488 u64 stream_id; 3487 u64 stream_id;
3489 } throttle_event = { 3488 } throttle_event = {
3490 .header = { 3489 .header = {
3491 .type = PERF_EVENT_THROTTLE, 3490 .type = PERF_RECORD_THROTTLE,
3492 .misc = 0, 3491 .misc = 0,
3493 .size = sizeof(throttle_event), 3492 .size = sizeof(throttle_event),
3494 }, 3493 },
3495 .time = perf_clock(), 3494 .time = perf_clock(),
3496 .id = primary_counter_id(counter), 3495 .id = primary_event_id(event),
3497 .stream_id = counter->id, 3496 .stream_id = event->id,
3498 }; 3497 };
3499 3498
3500 if (enable) 3499 if (enable)
3501 throttle_event.header.type = PERF_EVENT_UNTHROTTLE; 3500 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3502 3501
3503 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3502 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3504 if (ret) 3503 if (ret)
3505 return; 3504 return;
3506 3505
@@ -3509,18 +3508,18 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3509} 3508}
3510 3509
3511/* 3510/*
3512 * Generic counter overflow handling, sampling. 3511 * Generic event overflow handling, sampling.
3513 */ 3512 */
3514 3513
3515static int __perf_counter_overflow(struct perf_counter *counter, int nmi, 3514static int __perf_event_overflow(struct perf_event *event, int nmi,
3516 int throttle, struct perf_sample_data *data, 3515 int throttle, struct perf_sample_data *data,
3517 struct pt_regs *regs) 3516 struct pt_regs *regs)
3518{ 3517{
3519 int events = atomic_read(&counter->event_limit); 3518 int events = atomic_read(&event->event_limit);
3520 struct hw_perf_counter *hwc = &counter->hw; 3519 struct hw_perf_event *hwc = &event->hw;
3521 int ret = 0; 3520 int ret = 0;
3522 3521
3523 throttle = (throttle && counter->pmu->unthrottle != NULL); 3522 throttle = (throttle && event->pmu->unthrottle != NULL);
3524 3523
3525 if (!throttle) { 3524 if (!throttle) {
3526 hwc->interrupts++; 3525 hwc->interrupts++;
@@ -3528,73 +3527,73 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
3528 if (hwc->interrupts != MAX_INTERRUPTS) { 3527 if (hwc->interrupts != MAX_INTERRUPTS) {
3529 hwc->interrupts++; 3528 hwc->interrupts++;
3530 if (HZ * hwc->interrupts > 3529 if (HZ * hwc->interrupts >
3531 (u64)sysctl_perf_counter_sample_rate) { 3530 (u64)sysctl_perf_event_sample_rate) {
3532 hwc->interrupts = MAX_INTERRUPTS; 3531 hwc->interrupts = MAX_INTERRUPTS;
3533 perf_log_throttle(counter, 0); 3532 perf_log_throttle(event, 0);
3534 ret = 1; 3533 ret = 1;
3535 } 3534 }
3536 } else { 3535 } else {
3537 /* 3536 /*
3538 * Keep re-disabling counters even though on the previous 3537 * Keep re-disabling events even though on the previous
3539 * pass we disabled it - just in case we raced with a 3538 * pass we disabled it - just in case we raced with a
3540 * sched-in and the counter got enabled again: 3539 * sched-in and the event got enabled again:
3541 */ 3540 */
3542 ret = 1; 3541 ret = 1;
3543 } 3542 }
3544 } 3543 }
3545 3544
3546 if (counter->attr.freq) { 3545 if (event->attr.freq) {
3547 u64 now = perf_clock(); 3546 u64 now = perf_clock();
3548 s64 delta = now - hwc->freq_stamp; 3547 s64 delta = now - hwc->freq_stamp;
3549 3548
3550 hwc->freq_stamp = now; 3549 hwc->freq_stamp = now;
3551 3550
3552 if (delta > 0 && delta < TICK_NSEC) 3551 if (delta > 0 && delta < TICK_NSEC)
3553 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); 3552 perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3554 } 3553 }
3555 3554
3556 /* 3555 /*
3557 * XXX event_limit might not quite work as expected on inherited 3556 * XXX event_limit might not quite work as expected on inherited
3558 * counters 3557 * events
3559 */ 3558 */
3560 3559
3561 counter->pending_kill = POLL_IN; 3560 event->pending_kill = POLL_IN;
3562 if (events && atomic_dec_and_test(&counter->event_limit)) { 3561 if (events && atomic_dec_and_test(&event->event_limit)) {
3563 ret = 1; 3562 ret = 1;
3564 counter->pending_kill = POLL_HUP; 3563 event->pending_kill = POLL_HUP;
3565 if (nmi) { 3564 if (nmi) {
3566 counter->pending_disable = 1; 3565 event->pending_disable = 1;
3567 perf_pending_queue(&counter->pending, 3566 perf_pending_queue(&event->pending,
3568 perf_pending_counter); 3567 perf_pending_event);
3569 } else 3568 } else
3570 perf_counter_disable(counter); 3569 perf_event_disable(event);
3571 } 3570 }
3572 3571
3573 perf_counter_output(counter, nmi, data, regs); 3572 perf_event_output(event, nmi, data, regs);
3574 return ret; 3573 return ret;
3575} 3574}
3576 3575
3577int perf_counter_overflow(struct perf_counter *counter, int nmi, 3576int perf_event_overflow(struct perf_event *event, int nmi,
3578 struct perf_sample_data *data, 3577 struct perf_sample_data *data,
3579 struct pt_regs *regs) 3578 struct pt_regs *regs)
3580{ 3579{
3581 return __perf_counter_overflow(counter, nmi, 1, data, regs); 3580 return __perf_event_overflow(event, nmi, 1, data, regs);
3582} 3581}
3583 3582
3584/* 3583/*
3585 * Generic software counter infrastructure 3584 * Generic software event infrastructure
3586 */ 3585 */
3587 3586
3588/* 3587/*
3589 * We directly increment counter->count and keep a second value in 3588 * We directly increment event->count and keep a second value in
3590 * counter->hw.period_left to count intervals. This period counter 3589 * event->hw.period_left to count intervals. This period event
3591 * is kept in the range [-sample_period, 0] so that we can use the 3590 * is kept in the range [-sample_period, 0] so that we can use the
3592 * sign as trigger. 3591 * sign as trigger.
3593 */ 3592 */
3594 3593
3595static u64 perf_swcounter_set_period(struct perf_counter *counter) 3594static u64 perf_swevent_set_period(struct perf_event *event)
3596{ 3595{
3597 struct hw_perf_counter *hwc = &counter->hw; 3596 struct hw_perf_event *hwc = &event->hw;
3598 u64 period = hwc->last_period; 3597 u64 period = hwc->last_period;
3599 u64 nr, offset; 3598 u64 nr, offset;
3600 s64 old, val; 3599 s64 old, val;
@@ -3615,22 +3614,22 @@ again:
3615 return nr; 3614 return nr;
3616} 3615}
3617 3616
3618static void perf_swcounter_overflow(struct perf_counter *counter, 3617static void perf_swevent_overflow(struct perf_event *event,
3619 int nmi, struct perf_sample_data *data, 3618 int nmi, struct perf_sample_data *data,
3620 struct pt_regs *regs) 3619 struct pt_regs *regs)
3621{ 3620{
3622 struct hw_perf_counter *hwc = &counter->hw; 3621 struct hw_perf_event *hwc = &event->hw;
3623 int throttle = 0; 3622 int throttle = 0;
3624 u64 overflow; 3623 u64 overflow;
3625 3624
3626 data->period = counter->hw.last_period; 3625 data->period = event->hw.last_period;
3627 overflow = perf_swcounter_set_period(counter); 3626 overflow = perf_swevent_set_period(event);
3628 3627
3629 if (hwc->interrupts == MAX_INTERRUPTS) 3628 if (hwc->interrupts == MAX_INTERRUPTS)
3630 return; 3629 return;
3631 3630
3632 for (; overflow; overflow--) { 3631 for (; overflow; overflow--) {
3633 if (__perf_counter_overflow(counter, nmi, throttle, 3632 if (__perf_event_overflow(event, nmi, throttle,
3634 data, regs)) { 3633 data, regs)) {
3635 /* 3634 /*
3636 * We inhibit the overflow from happening when 3635 * We inhibit the overflow from happening when
@@ -3642,20 +3641,20 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
3642 } 3641 }
3643} 3642}
3644 3643
3645static void perf_swcounter_unthrottle(struct perf_counter *counter) 3644static void perf_swevent_unthrottle(struct perf_event *event)
3646{ 3645{
3647 /* 3646 /*
3648 * Nothing to do, we already reset hwc->interrupts. 3647 * Nothing to do, we already reset hwc->interrupts.
3649 */ 3648 */
3650} 3649}
3651 3650
3652static void perf_swcounter_add(struct perf_counter *counter, u64 nr, 3651static void perf_swevent_add(struct perf_event *event, u64 nr,
3653 int nmi, struct perf_sample_data *data, 3652 int nmi, struct perf_sample_data *data,
3654 struct pt_regs *regs) 3653 struct pt_regs *regs)
3655{ 3654{
3656 struct hw_perf_counter *hwc = &counter->hw; 3655 struct hw_perf_event *hwc = &event->hw;
3657 3656
3658 atomic64_add(nr, &counter->count); 3657 atomic64_add(nr, &event->count);
3659 3658
3660 if (!hwc->sample_period) 3659 if (!hwc->sample_period)
3661 return; 3660 return;
@@ -3664,29 +3663,29 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3664 return; 3663 return;
3665 3664
3666 if (!atomic64_add_negative(nr, &hwc->period_left)) 3665 if (!atomic64_add_negative(nr, &hwc->period_left))
3667 perf_swcounter_overflow(counter, nmi, data, regs); 3666 perf_swevent_overflow(event, nmi, data, regs);
3668} 3667}
3669 3668
3670static int perf_swcounter_is_counting(struct perf_counter *counter) 3669static int perf_swevent_is_counting(struct perf_event *event)
3671{ 3670{
3672 /* 3671 /*
3673 * The counter is active, we're good! 3672 * The event is active, we're good!
3674 */ 3673 */
3675 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3674 if (event->state == PERF_EVENT_STATE_ACTIVE)
3676 return 1; 3675 return 1;
3677 3676
3678 /* 3677 /*
3679 * The counter is off/error, not counting. 3678 * The event is off/error, not counting.
3680 */ 3679 */
3681 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3680 if (event->state != PERF_EVENT_STATE_INACTIVE)
3682 return 0; 3681 return 0;
3683 3682
3684 /* 3683 /*
3685 * The counter is inactive, if the context is active 3684 * The event is inactive, if the context is active
3686 * we're part of a group that didn't make it on the 'pmu', 3685 * we're part of a group that didn't make it on the 'pmu',
3687 * not counting. 3686 * not counting.
3688 */ 3687 */
3689 if (counter->ctx->is_active) 3688 if (event->ctx->is_active)
3690 return 0; 3689 return 0;
3691 3690
3692 /* 3691 /*
@@ -3697,49 +3696,49 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
3697 return 1; 3696 return 1;
3698} 3697}
3699 3698
3700static int perf_swcounter_match(struct perf_counter *counter, 3699static int perf_swevent_match(struct perf_event *event,
3701 enum perf_type_id type, 3700 enum perf_type_id type,
3702 u32 event, struct pt_regs *regs) 3701 u32 event_id, struct pt_regs *regs)
3703{ 3702{
3704 if (!perf_swcounter_is_counting(counter)) 3703 if (!perf_swevent_is_counting(event))
3705 return 0; 3704 return 0;
3706 3705
3707 if (counter->attr.type != type) 3706 if (event->attr.type != type)
3708 return 0; 3707 return 0;
3709 if (counter->attr.config != event) 3708 if (event->attr.config != event_id)
3710 return 0; 3709 return 0;
3711 3710
3712 if (regs) { 3711 if (regs) {
3713 if (counter->attr.exclude_user && user_mode(regs)) 3712 if (event->attr.exclude_user && user_mode(regs))
3714 return 0; 3713 return 0;
3715 3714
3716 if (counter->attr.exclude_kernel && !user_mode(regs)) 3715 if (event->attr.exclude_kernel && !user_mode(regs))
3717 return 0; 3716 return 0;
3718 } 3717 }
3719 3718
3720 return 1; 3719 return 1;
3721} 3720}
3722 3721
3723static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3722static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3724 enum perf_type_id type, 3723 enum perf_type_id type,
3725 u32 event, u64 nr, int nmi, 3724 u32 event_id, u64 nr, int nmi,
3726 struct perf_sample_data *data, 3725 struct perf_sample_data *data,
3727 struct pt_regs *regs) 3726 struct pt_regs *regs)
3728{ 3727{
3729 struct perf_counter *counter; 3728 struct perf_event *event;
3730 3729
3731 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3730 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3732 return; 3731 return;
3733 3732
3734 rcu_read_lock(); 3733 rcu_read_lock();
3735 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3734 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3736 if (perf_swcounter_match(counter, type, event, regs)) 3735 if (perf_swevent_match(event, type, event_id, regs))
3737 perf_swcounter_add(counter, nr, nmi, data, regs); 3736 perf_swevent_add(event, nr, nmi, data, regs);
3738 } 3737 }
3739 rcu_read_unlock(); 3738 rcu_read_unlock();
3740} 3739}
3741 3740
3742static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) 3741static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
3743{ 3742{
3744 if (in_nmi()) 3743 if (in_nmi())
3745 return &cpuctx->recursion[3]; 3744 return &cpuctx->recursion[3];
@@ -3753,14 +3752,14 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3753 return &cpuctx->recursion[0]; 3752 return &cpuctx->recursion[0];
3754} 3753}
3755 3754
3756static void do_perf_swcounter_event(enum perf_type_id type, u32 event, 3755static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3757 u64 nr, int nmi, 3756 u64 nr, int nmi,
3758 struct perf_sample_data *data, 3757 struct perf_sample_data *data,
3759 struct pt_regs *regs) 3758 struct pt_regs *regs)
3760{ 3759{
3761 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3760 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3762 int *recursion = perf_swcounter_recursion_context(cpuctx); 3761 int *recursion = perf_swevent_recursion_context(cpuctx);
3763 struct perf_counter_context *ctx; 3762 struct perf_event_context *ctx;
3764 3763
3765 if (*recursion) 3764 if (*recursion)
3766 goto out; 3765 goto out;
@@ -3768,16 +3767,16 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3768 (*recursion)++; 3767 (*recursion)++;
3769 barrier(); 3768 barrier();
3770 3769
3771 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 3770 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3772 nr, nmi, data, regs); 3771 nr, nmi, data, regs);
3773 rcu_read_lock(); 3772 rcu_read_lock();
3774 /* 3773 /*
3775 * doesn't really matter which of the child contexts the 3774 * doesn't really matter which of the child contexts the
3776 * events ends up in. 3775 * events ends up in.
3777 */ 3776 */
3778 ctx = rcu_dereference(current->perf_counter_ctxp); 3777 ctx = rcu_dereference(current->perf_event_ctxp);
3779 if (ctx) 3778 if (ctx)
3780 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); 3779 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3781 rcu_read_unlock(); 3780 rcu_read_unlock();
3782 3781
3783 barrier(); 3782 barrier();
@@ -3787,57 +3786,57 @@ out:
3787 put_cpu_var(perf_cpu_context); 3786 put_cpu_var(perf_cpu_context);
3788} 3787}
3789 3788
3790void __perf_swcounter_event(u32 event, u64 nr, int nmi, 3789void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3791 struct pt_regs *regs, u64 addr) 3790 struct pt_regs *regs, u64 addr)
3792{ 3791{
3793 struct perf_sample_data data = { 3792 struct perf_sample_data data = {
3794 .addr = addr, 3793 .addr = addr,
3795 }; 3794 };
3796 3795
3797 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, 3796 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
3798 &data, regs); 3797 &data, regs);
3799} 3798}
3800 3799
3801static void perf_swcounter_read(struct perf_counter *counter) 3800static void perf_swevent_read(struct perf_event *event)
3802{ 3801{
3803} 3802}
3804 3803
3805static int perf_swcounter_enable(struct perf_counter *counter) 3804static int perf_swevent_enable(struct perf_event *event)
3806{ 3805{
3807 struct hw_perf_counter *hwc = &counter->hw; 3806 struct hw_perf_event *hwc = &event->hw;
3808 3807
3809 if (hwc->sample_period) { 3808 if (hwc->sample_period) {
3810 hwc->last_period = hwc->sample_period; 3809 hwc->last_period = hwc->sample_period;
3811 perf_swcounter_set_period(counter); 3810 perf_swevent_set_period(event);
3812 } 3811 }
3813 return 0; 3812 return 0;
3814} 3813}
3815 3814
3816static void perf_swcounter_disable(struct perf_counter *counter) 3815static void perf_swevent_disable(struct perf_event *event)
3817{ 3816{
3818} 3817}
3819 3818
3820static const struct pmu perf_ops_generic = { 3819static const struct pmu perf_ops_generic = {
3821 .enable = perf_swcounter_enable, 3820 .enable = perf_swevent_enable,
3822 .disable = perf_swcounter_disable, 3821 .disable = perf_swevent_disable,
3823 .read = perf_swcounter_read, 3822 .read = perf_swevent_read,
3824 .unthrottle = perf_swcounter_unthrottle, 3823 .unthrottle = perf_swevent_unthrottle,
3825}; 3824};
3826 3825
3827/* 3826/*
3828 * hrtimer based swcounter callback 3827 * hrtimer based swevent callback
3829 */ 3828 */
3830 3829
3831static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3830static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3832{ 3831{
3833 enum hrtimer_restart ret = HRTIMER_RESTART; 3832 enum hrtimer_restart ret = HRTIMER_RESTART;
3834 struct perf_sample_data data; 3833 struct perf_sample_data data;
3835 struct pt_regs *regs; 3834 struct pt_regs *regs;
3836 struct perf_counter *counter; 3835 struct perf_event *event;
3837 u64 period; 3836 u64 period;
3838 3837
3839 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 3838 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
3840 counter->pmu->read(counter); 3839 event->pmu->read(event);
3841 3840
3842 data.addr = 0; 3841 data.addr = 0;
3843 regs = get_irq_regs(); 3842 regs = get_irq_regs();
@@ -3845,45 +3844,45 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3845 * In case we exclude kernel IPs or are somehow not in interrupt 3844 * In case we exclude kernel IPs or are somehow not in interrupt
3846 * context, provide the next best thing, the user IP. 3845 * context, provide the next best thing, the user IP.
3847 */ 3846 */
3848 if ((counter->attr.exclude_kernel || !regs) && 3847 if ((event->attr.exclude_kernel || !regs) &&
3849 !counter->attr.exclude_user) 3848 !event->attr.exclude_user)
3850 regs = task_pt_regs(current); 3849 regs = task_pt_regs(current);
3851 3850
3852 if (regs) { 3851 if (regs) {
3853 if (perf_counter_overflow(counter, 0, &data, regs)) 3852 if (perf_event_overflow(event, 0, &data, regs))
3854 ret = HRTIMER_NORESTART; 3853 ret = HRTIMER_NORESTART;
3855 } 3854 }
3856 3855
3857 period = max_t(u64, 10000, counter->hw.sample_period); 3856 period = max_t(u64, 10000, event->hw.sample_period);
3858 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 3857 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3859 3858
3860 return ret; 3859 return ret;
3861} 3860}
3862 3861
3863/* 3862/*
3864 * Software counter: cpu wall time clock 3863 * Software event: cpu wall time clock
3865 */ 3864 */
3866 3865
3867static void cpu_clock_perf_counter_update(struct perf_counter *counter) 3866static void cpu_clock_perf_event_update(struct perf_event *event)
3868{ 3867{
3869 int cpu = raw_smp_processor_id(); 3868 int cpu = raw_smp_processor_id();
3870 s64 prev; 3869 s64 prev;
3871 u64 now; 3870 u64 now;
3872 3871
3873 now = cpu_clock(cpu); 3872 now = cpu_clock(cpu);
3874 prev = atomic64_read(&counter->hw.prev_count); 3873 prev = atomic64_read(&event->hw.prev_count);
3875 atomic64_set(&counter->hw.prev_count, now); 3874 atomic64_set(&event->hw.prev_count, now);
3876 atomic64_add(now - prev, &counter->count); 3875 atomic64_add(now - prev, &event->count);
3877} 3876}
3878 3877
3879static int cpu_clock_perf_counter_enable(struct perf_counter *counter) 3878static int cpu_clock_perf_event_enable(struct perf_event *event)
3880{ 3879{
3881 struct hw_perf_counter *hwc = &counter->hw; 3880 struct hw_perf_event *hwc = &event->hw;
3882 int cpu = raw_smp_processor_id(); 3881 int cpu = raw_smp_processor_id();
3883 3882
3884 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3885 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3886 hwc->hrtimer.function = perf_swcounter_hrtimer; 3885 hwc->hrtimer.function = perf_swevent_hrtimer;
3887 if (hwc->sample_period) { 3886 if (hwc->sample_period) {
3888 u64 period = max_t(u64, 10000, hwc->sample_period); 3887 u64 period = max_t(u64, 10000, hwc->sample_period);
3889 __hrtimer_start_range_ns(&hwc->hrtimer, 3888 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3894,48 +3893,48 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3894 return 0; 3893 return 0;
3895} 3894}
3896 3895
3897static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 3896static void cpu_clock_perf_event_disable(struct perf_event *event)
3898{ 3897{
3899 if (counter->hw.sample_period) 3898 if (event->hw.sample_period)
3900 hrtimer_cancel(&counter->hw.hrtimer); 3899 hrtimer_cancel(&event->hw.hrtimer);
3901 cpu_clock_perf_counter_update(counter); 3900 cpu_clock_perf_event_update(event);
3902} 3901}
3903 3902
3904static void cpu_clock_perf_counter_read(struct perf_counter *counter) 3903static void cpu_clock_perf_event_read(struct perf_event *event)
3905{ 3904{
3906 cpu_clock_perf_counter_update(counter); 3905 cpu_clock_perf_event_update(event);
3907} 3906}
3908 3907
3909static const struct pmu perf_ops_cpu_clock = { 3908static const struct pmu perf_ops_cpu_clock = {
3910 .enable = cpu_clock_perf_counter_enable, 3909 .enable = cpu_clock_perf_event_enable,
3911 .disable = cpu_clock_perf_counter_disable, 3910 .disable = cpu_clock_perf_event_disable,
3912 .read = cpu_clock_perf_counter_read, 3911 .read = cpu_clock_perf_event_read,
3913}; 3912};
3914 3913
3915/* 3914/*
3916 * Software counter: task time clock 3915 * Software event: task time clock
3917 */ 3916 */
3918 3917
3919static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) 3918static void task_clock_perf_event_update(struct perf_event *event, u64 now)
3920{ 3919{
3921 u64 prev; 3920 u64 prev;
3922 s64 delta; 3921 s64 delta;
3923 3922
3924 prev = atomic64_xchg(&counter->hw.prev_count, now); 3923 prev = atomic64_xchg(&event->hw.prev_count, now);
3925 delta = now - prev; 3924 delta = now - prev;
3926 atomic64_add(delta, &counter->count); 3925 atomic64_add(delta, &event->count);
3927} 3926}
3928 3927
3929static int task_clock_perf_counter_enable(struct perf_counter *counter) 3928static int task_clock_perf_event_enable(struct perf_event *event)
3930{ 3929{
3931 struct hw_perf_counter *hwc = &counter->hw; 3930 struct hw_perf_event *hwc = &event->hw;
3932 u64 now; 3931 u64 now;
3933 3932
3934 now = counter->ctx->time; 3933 now = event->ctx->time;
3935 3934
3936 atomic64_set(&hwc->prev_count, now); 3935 atomic64_set(&hwc->prev_count, now);
3937 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3938 hwc->hrtimer.function = perf_swcounter_hrtimer; 3937 hwc->hrtimer.function = perf_swevent_hrtimer;
3939 if (hwc->sample_period) { 3938 if (hwc->sample_period) {
3940 u64 period = max_t(u64, 10000, hwc->sample_period); 3939 u64 period = max_t(u64, 10000, hwc->sample_period);
3941 __hrtimer_start_range_ns(&hwc->hrtimer, 3940 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3946,38 +3945,38 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
3946 return 0; 3945 return 0;
3947} 3946}
3948 3947
3949static void task_clock_perf_counter_disable(struct perf_counter *counter) 3948static void task_clock_perf_event_disable(struct perf_event *event)
3950{ 3949{
3951 if (counter->hw.sample_period) 3950 if (event->hw.sample_period)
3952 hrtimer_cancel(&counter->hw.hrtimer); 3951 hrtimer_cancel(&event->hw.hrtimer);
3953 task_clock_perf_counter_update(counter, counter->ctx->time); 3952 task_clock_perf_event_update(event, event->ctx->time);
3954 3953
3955} 3954}
3956 3955
3957static void task_clock_perf_counter_read(struct perf_counter *counter) 3956static void task_clock_perf_event_read(struct perf_event *event)
3958{ 3957{
3959 u64 time; 3958 u64 time;
3960 3959
3961 if (!in_nmi()) { 3960 if (!in_nmi()) {
3962 update_context_time(counter->ctx); 3961 update_context_time(event->ctx);
3963 time = counter->ctx->time; 3962 time = event->ctx->time;
3964 } else { 3963 } else {
3965 u64 now = perf_clock(); 3964 u64 now = perf_clock();
3966 u64 delta = now - counter->ctx->timestamp; 3965 u64 delta = now - event->ctx->timestamp;
3967 time = counter->ctx->time + delta; 3966 time = event->ctx->time + delta;
3968 } 3967 }
3969 3968
3970 task_clock_perf_counter_update(counter, time); 3969 task_clock_perf_event_update(event, time);
3971} 3970}
3972 3971
3973static const struct pmu perf_ops_task_clock = { 3972static const struct pmu perf_ops_task_clock = {
3974 .enable = task_clock_perf_counter_enable, 3973 .enable = task_clock_perf_event_enable,
3975 .disable = task_clock_perf_counter_disable, 3974 .disable = task_clock_perf_event_disable,
3976 .read = task_clock_perf_counter_read, 3975 .read = task_clock_perf_event_read,
3977}; 3976};
3978 3977
3979#ifdef CONFIG_EVENT_PROFILE 3978#ifdef CONFIG_EVENT_PROFILE
3980void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, 3979void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3981 int entry_size) 3980 int entry_size)
3982{ 3981{
3983 struct perf_raw_record raw = { 3982 struct perf_raw_record raw = {
@@ -3995,78 +3994,78 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3995 if (!regs) 3994 if (!regs)
3996 regs = task_pt_regs(current); 3995 regs = task_pt_regs(current);
3997 3996
3998 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 3997 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3999 &data, regs); 3998 &data, regs);
4000} 3999}
4001EXPORT_SYMBOL_GPL(perf_tpcounter_event); 4000EXPORT_SYMBOL_GPL(perf_tp_event);
4002 4001
4003extern int ftrace_profile_enable(int); 4002extern int ftrace_profile_enable(int);
4004extern void ftrace_profile_disable(int); 4003extern void ftrace_profile_disable(int);
4005 4004
4006static void tp_perf_counter_destroy(struct perf_counter *counter) 4005static void tp_perf_event_destroy(struct perf_event *event)
4007{ 4006{
4008 ftrace_profile_disable(counter->attr.config); 4007 ftrace_profile_disable(event->attr.config);
4009} 4008}
4010 4009
4011static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4010static const struct pmu *tp_perf_event_init(struct perf_event *event)
4012{ 4011{
4013 /* 4012 /*
4014 * Raw tracepoint data is a severe data leak, only allow root to 4013 * Raw tracepoint data is a severe data leak, only allow root to
4015 * have these. 4014 * have these.
4016 */ 4015 */
4017 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && 4016 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4018 perf_paranoid_tracepoint_raw() && 4017 perf_paranoid_tracepoint_raw() &&
4019 !capable(CAP_SYS_ADMIN)) 4018 !capable(CAP_SYS_ADMIN))
4020 return ERR_PTR(-EPERM); 4019 return ERR_PTR(-EPERM);
4021 4020
4022 if (ftrace_profile_enable(counter->attr.config)) 4021 if (ftrace_profile_enable(event->attr.config))
4023 return NULL; 4022 return NULL;
4024 4023
4025 counter->destroy = tp_perf_counter_destroy; 4024 event->destroy = tp_perf_event_destroy;
4026 4025
4027 return &perf_ops_generic; 4026 return &perf_ops_generic;
4028} 4027}
4029#else 4028#else
4030static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4029static const struct pmu *tp_perf_event_init(struct perf_event *event)
4031{ 4030{
4032 return NULL; 4031 return NULL;
4033} 4032}
4034#endif 4033#endif
4035 4034
4036atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 4035atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4037 4036
4038static void sw_perf_counter_destroy(struct perf_counter *counter) 4037static void sw_perf_event_destroy(struct perf_event *event)
4039{ 4038{
4040 u64 event = counter->attr.config; 4039 u64 event_id = event->attr.config;
4041 4040
4042 WARN_ON(counter->parent); 4041 WARN_ON(event->parent);
4043 4042
4044 atomic_dec(&perf_swcounter_enabled[event]); 4043 atomic_dec(&perf_swevent_enabled[event_id]);
4045} 4044}
4046 4045
4047static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) 4046static const struct pmu *sw_perf_event_init(struct perf_event *event)
4048{ 4047{
4049 const struct pmu *pmu = NULL; 4048 const struct pmu *pmu = NULL;
4050 u64 event = counter->attr.config; 4049 u64 event_id = event->attr.config;
4051 4050
4052 /* 4051 /*
4053 * Software counters (currently) can't in general distinguish 4052 * Software events (currently) can't in general distinguish
4054 * between user, kernel and hypervisor events. 4053 * between user, kernel and hypervisor events.
4055 * However, context switches and cpu migrations are considered 4054 * However, context switches and cpu migrations are considered
4056 * to be kernel events, and page faults are never hypervisor 4055 * to be kernel events, and page faults are never hypervisor
4057 * events. 4056 * events.
4058 */ 4057 */
4059 switch (event) { 4058 switch (event_id) {
4060 case PERF_COUNT_SW_CPU_CLOCK: 4059 case PERF_COUNT_SW_CPU_CLOCK:
4061 pmu = &perf_ops_cpu_clock; 4060 pmu = &perf_ops_cpu_clock;
4062 4061
4063 break; 4062 break;
4064 case PERF_COUNT_SW_TASK_CLOCK: 4063 case PERF_COUNT_SW_TASK_CLOCK:
4065 /* 4064 /*
4066 * If the user instantiates this as a per-cpu counter, 4065 * If the user instantiates this as a per-cpu event,
4067 * use the cpu_clock counter instead. 4066 * use the cpu_clock event instead.
4068 */ 4067 */
4069 if (counter->ctx->task) 4068 if (event->ctx->task)
4070 pmu = &perf_ops_task_clock; 4069 pmu = &perf_ops_task_clock;
4071 else 4070 else
4072 pmu = &perf_ops_cpu_clock; 4071 pmu = &perf_ops_cpu_clock;
@@ -4077,9 +4076,9 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4077 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4076 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4078 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4077 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4079 case PERF_COUNT_SW_CPU_MIGRATIONS: 4078 case PERF_COUNT_SW_CPU_MIGRATIONS:
4080 if (!counter->parent) { 4079 if (!event->parent) {
4081 atomic_inc(&perf_swcounter_enabled[event]); 4080 atomic_inc(&perf_swevent_enabled[event_id]);
4082 counter->destroy = sw_perf_counter_destroy; 4081 event->destroy = sw_perf_event_destroy;
4083 } 4082 }
4084 pmu = &perf_ops_generic; 4083 pmu = &perf_ops_generic;
4085 break; 4084 break;
@@ -4089,62 +4088,62 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4089} 4088}
4090 4089
4091/* 4090/*
4092 * Allocate and initialize a counter structure 4091 * Allocate and initialize a event structure
4093 */ 4092 */
4094static struct perf_counter * 4093static struct perf_event *
4095perf_counter_alloc(struct perf_counter_attr *attr, 4094perf_event_alloc(struct perf_event_attr *attr,
4096 int cpu, 4095 int cpu,
4097 struct perf_counter_context *ctx, 4096 struct perf_event_context *ctx,
4098 struct perf_counter *group_leader, 4097 struct perf_event *group_leader,
4099 struct perf_counter *parent_counter, 4098 struct perf_event *parent_event,
4100 gfp_t gfpflags) 4099 gfp_t gfpflags)
4101{ 4100{
4102 const struct pmu *pmu; 4101 const struct pmu *pmu;
4103 struct perf_counter *counter; 4102 struct perf_event *event;
4104 struct hw_perf_counter *hwc; 4103 struct hw_perf_event *hwc;
4105 long err; 4104 long err;
4106 4105
4107 counter = kzalloc(sizeof(*counter), gfpflags); 4106 event = kzalloc(sizeof(*event), gfpflags);
4108 if (!counter) 4107 if (!event)
4109 return ERR_PTR(-ENOMEM); 4108 return ERR_PTR(-ENOMEM);
4110 4109
4111 /* 4110 /*
4112 * Single counters are their own group leaders, with an 4111 * Single events are their own group leaders, with an
4113 * empty sibling list: 4112 * empty sibling list:
4114 */ 4113 */
4115 if (!group_leader) 4114 if (!group_leader)
4116 group_leader = counter; 4115 group_leader = event;
4117 4116
4118 mutex_init(&counter->child_mutex); 4117 mutex_init(&event->child_mutex);
4119 INIT_LIST_HEAD(&counter->child_list); 4118 INIT_LIST_HEAD(&event->child_list);
4120 4119
4121 INIT_LIST_HEAD(&counter->list_entry); 4120 INIT_LIST_HEAD(&event->group_entry);
4122 INIT_LIST_HEAD(&counter->event_entry); 4121 INIT_LIST_HEAD(&event->event_entry);
4123 INIT_LIST_HEAD(&counter->sibling_list); 4122 INIT_LIST_HEAD(&event->sibling_list);
4124 init_waitqueue_head(&counter->waitq); 4123 init_waitqueue_head(&event->waitq);
4125 4124
4126 mutex_init(&counter->mmap_mutex); 4125 mutex_init(&event->mmap_mutex);
4127 4126
4128 counter->cpu = cpu; 4127 event->cpu = cpu;
4129 counter->attr = *attr; 4128 event->attr = *attr;
4130 counter->group_leader = group_leader; 4129 event->group_leader = group_leader;
4131 counter->pmu = NULL; 4130 event->pmu = NULL;
4132 counter->ctx = ctx; 4131 event->ctx = ctx;
4133 counter->oncpu = -1; 4132 event->oncpu = -1;
4134 4133
4135 counter->parent = parent_counter; 4134 event->parent = parent_event;
4136 4135
4137 counter->ns = get_pid_ns(current->nsproxy->pid_ns); 4136 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4138 counter->id = atomic64_inc_return(&perf_counter_id); 4137 event->id = atomic64_inc_return(&perf_event_id);
4139 4138
4140 counter->state = PERF_COUNTER_STATE_INACTIVE; 4139 event->state = PERF_EVENT_STATE_INACTIVE;
4141 4140
4142 if (attr->disabled) 4141 if (attr->disabled)
4143 counter->state = PERF_COUNTER_STATE_OFF; 4142 event->state = PERF_EVENT_STATE_OFF;
4144 4143
4145 pmu = NULL; 4144 pmu = NULL;
4146 4145
4147 hwc = &counter->hw; 4146 hwc = &event->hw;
4148 hwc->sample_period = attr->sample_period; 4147 hwc->sample_period = attr->sample_period;
4149 if (attr->freq && attr->sample_freq) 4148 if (attr->freq && attr->sample_freq)
4150 hwc->sample_period = 1; 4149 hwc->sample_period = 1;
@@ -4153,7 +4152,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4153 atomic64_set(&hwc->period_left, hwc->sample_period); 4152 atomic64_set(&hwc->period_left, hwc->sample_period);
4154 4153
4155 /* 4154 /*
4156 * we currently do not support PERF_FORMAT_GROUP on inherited counters 4155 * we currently do not support PERF_FORMAT_GROUP on inherited events
4157 */ 4156 */
4158 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 4157 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4159 goto done; 4158 goto done;
@@ -4162,15 +4161,15 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4162 case PERF_TYPE_RAW: 4161 case PERF_TYPE_RAW:
4163 case PERF_TYPE_HARDWARE: 4162 case PERF_TYPE_HARDWARE:
4164 case PERF_TYPE_HW_CACHE: 4163 case PERF_TYPE_HW_CACHE:
4165 pmu = hw_perf_counter_init(counter); 4164 pmu = hw_perf_event_init(event);
4166 break; 4165 break;
4167 4166
4168 case PERF_TYPE_SOFTWARE: 4167 case PERF_TYPE_SOFTWARE:
4169 pmu = sw_perf_counter_init(counter); 4168 pmu = sw_perf_event_init(event);
4170 break; 4169 break;
4171 4170
4172 case PERF_TYPE_TRACEPOINT: 4171 case PERF_TYPE_TRACEPOINT:
4173 pmu = tp_perf_counter_init(counter); 4172 pmu = tp_perf_event_init(event);
4174 break; 4173 break;
4175 4174
4176 default: 4175 default:
@@ -4184,29 +4183,29 @@ done:
4184 err = PTR_ERR(pmu); 4183 err = PTR_ERR(pmu);
4185 4184
4186 if (err) { 4185 if (err) {
4187 if (counter->ns) 4186 if (event->ns)
4188 put_pid_ns(counter->ns); 4187 put_pid_ns(event->ns);
4189 kfree(counter); 4188 kfree(event);
4190 return ERR_PTR(err); 4189 return ERR_PTR(err);
4191 } 4190 }
4192 4191
4193 counter->pmu = pmu; 4192 event->pmu = pmu;
4194 4193
4195 if (!counter->parent) { 4194 if (!event->parent) {
4196 atomic_inc(&nr_counters); 4195 atomic_inc(&nr_events);
4197 if (counter->attr.mmap) 4196 if (event->attr.mmap)
4198 atomic_inc(&nr_mmap_counters); 4197 atomic_inc(&nr_mmap_events);
4199 if (counter->attr.comm) 4198 if (event->attr.comm)
4200 atomic_inc(&nr_comm_counters); 4199 atomic_inc(&nr_comm_events);
4201 if (counter->attr.task) 4200 if (event->attr.task)
4202 atomic_inc(&nr_task_counters); 4201 atomic_inc(&nr_task_events);
4203 } 4202 }
4204 4203
4205 return counter; 4204 return event;
4206} 4205}
4207 4206
4208static int perf_copy_attr(struct perf_counter_attr __user *uattr, 4207static int perf_copy_attr(struct perf_event_attr __user *uattr,
4209 struct perf_counter_attr *attr) 4208 struct perf_event_attr *attr)
4210{ 4209{
4211 u32 size; 4210 u32 size;
4212 int ret; 4211 int ret;
@@ -4285,11 +4284,11 @@ err_size:
4285 goto out; 4284 goto out;
4286} 4285}
4287 4286
4288int perf_counter_set_output(struct perf_counter *counter, int output_fd) 4287int perf_event_set_output(struct perf_event *event, int output_fd)
4289{ 4288{
4290 struct perf_counter *output_counter = NULL; 4289 struct perf_event *output_event = NULL;
4291 struct file *output_file = NULL; 4290 struct file *output_file = NULL;
4292 struct perf_counter *old_output; 4291 struct perf_event *old_output;
4293 int fput_needed = 0; 4292 int fput_needed = 0;
4294 int ret = -EINVAL; 4293 int ret = -EINVAL;
4295 4294
@@ -4303,28 +4302,28 @@ int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4303 if (output_file->f_op != &perf_fops) 4302 if (output_file->f_op != &perf_fops)
4304 goto out; 4303 goto out;
4305 4304
4306 output_counter = output_file->private_data; 4305 output_event = output_file->private_data;
4307 4306
4308 /* Don't chain output fds */ 4307 /* Don't chain output fds */
4309 if (output_counter->output) 4308 if (output_event->output)
4310 goto out; 4309 goto out;
4311 4310
4312 /* Don't set an output fd when we already have an output channel */ 4311 /* Don't set an output fd when we already have an output channel */
4313 if (counter->data) 4312 if (event->data)
4314 goto out; 4313 goto out;
4315 4314
4316 atomic_long_inc(&output_file->f_count); 4315 atomic_long_inc(&output_file->f_count);
4317 4316
4318set: 4317set:
4319 mutex_lock(&counter->mmap_mutex); 4318 mutex_lock(&event->mmap_mutex);
4320 old_output = counter->output; 4319 old_output = event->output;
4321 rcu_assign_pointer(counter->output, output_counter); 4320 rcu_assign_pointer(event->output, output_event);
4322 mutex_unlock(&counter->mmap_mutex); 4321 mutex_unlock(&event->mmap_mutex);
4323 4322
4324 if (old_output) { 4323 if (old_output) {
4325 /* 4324 /*
4326 * we need to make sure no existing perf_output_*() 4325 * we need to make sure no existing perf_output_*()
4327 * is still referencing this counter. 4326 * is still referencing this event.
4328 */ 4327 */
4329 synchronize_rcu(); 4328 synchronize_rcu();
4330 fput(old_output->filp); 4329 fput(old_output->filp);
@@ -4337,21 +4336,21 @@ out:
4337} 4336}
4338 4337
4339/** 4338/**
4340 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 4339 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4341 * 4340 *
4342 * @attr_uptr: event type attributes for monitoring/sampling 4341 * @attr_uptr: event_id type attributes for monitoring/sampling
4343 * @pid: target pid 4342 * @pid: target pid
4344 * @cpu: target cpu 4343 * @cpu: target cpu
4345 * @group_fd: group leader counter fd 4344 * @group_fd: group leader event fd
4346 */ 4345 */
4347SYSCALL_DEFINE5(perf_counter_open, 4346SYSCALL_DEFINE5(perf_event_open,
4348 struct perf_counter_attr __user *, attr_uptr, 4347 struct perf_event_attr __user *, attr_uptr,
4349 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 4348 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4350{ 4349{
4351 struct perf_counter *counter, *group_leader; 4350 struct perf_event *event, *group_leader;
4352 struct perf_counter_attr attr; 4351 struct perf_event_attr attr;
4353 struct perf_counter_context *ctx; 4352 struct perf_event_context *ctx;
4354 struct file *counter_file = NULL; 4353 struct file *event_file = NULL;
4355 struct file *group_file = NULL; 4354 struct file *group_file = NULL;
4356 int fput_needed = 0; 4355 int fput_needed = 0;
4357 int fput_needed2 = 0; 4356 int fput_needed2 = 0;
@@ -4371,7 +4370,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4371 } 4370 }
4372 4371
4373 if (attr.freq) { 4372 if (attr.freq) {
4374 if (attr.sample_freq > sysctl_perf_counter_sample_rate) 4373 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4375 return -EINVAL; 4374 return -EINVAL;
4376 } 4375 }
4377 4376
@@ -4383,7 +4382,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4383 return PTR_ERR(ctx); 4382 return PTR_ERR(ctx);
4384 4383
4385 /* 4384 /*
4386 * Look up the group leader (we will attach this counter to it): 4385 * Look up the group leader (we will attach this event to it):
4387 */ 4386 */
4388 group_leader = NULL; 4387 group_leader = NULL;
4389 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { 4388 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
@@ -4414,45 +4413,45 @@ SYSCALL_DEFINE5(perf_counter_open,
4414 goto err_put_context; 4413 goto err_put_context;
4415 } 4414 }
4416 4415
4417 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, 4416 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4418 NULL, GFP_KERNEL); 4417 NULL, GFP_KERNEL);
4419 err = PTR_ERR(counter); 4418 err = PTR_ERR(event);
4420 if (IS_ERR(counter)) 4419 if (IS_ERR(event))
4421 goto err_put_context; 4420 goto err_put_context;
4422 4421
4423 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 4422 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
4424 if (err < 0) 4423 if (err < 0)
4425 goto err_free_put_context; 4424 goto err_free_put_context;
4426 4425
4427 counter_file = fget_light(err, &fput_needed2); 4426 event_file = fget_light(err, &fput_needed2);
4428 if (!counter_file) 4427 if (!event_file)
4429 goto err_free_put_context; 4428 goto err_free_put_context;
4430 4429
4431 if (flags & PERF_FLAG_FD_OUTPUT) { 4430 if (flags & PERF_FLAG_FD_OUTPUT) {
4432 err = perf_counter_set_output(counter, group_fd); 4431 err = perf_event_set_output(event, group_fd);
4433 if (err) 4432 if (err)
4434 goto err_fput_free_put_context; 4433 goto err_fput_free_put_context;
4435 } 4434 }
4436 4435
4437 counter->filp = counter_file; 4436 event->filp = event_file;
4438 WARN_ON_ONCE(ctx->parent_ctx); 4437 WARN_ON_ONCE(ctx->parent_ctx);
4439 mutex_lock(&ctx->mutex); 4438 mutex_lock(&ctx->mutex);
4440 perf_install_in_context(ctx, counter, cpu); 4439 perf_install_in_context(ctx, event, cpu);
4441 ++ctx->generation; 4440 ++ctx->generation;
4442 mutex_unlock(&ctx->mutex); 4441 mutex_unlock(&ctx->mutex);
4443 4442
4444 counter->owner = current; 4443 event->owner = current;
4445 get_task_struct(current); 4444 get_task_struct(current);
4446 mutex_lock(&current->perf_counter_mutex); 4445 mutex_lock(&current->perf_event_mutex);
4447 list_add_tail(&counter->owner_entry, &current->perf_counter_list); 4446 list_add_tail(&event->owner_entry, &current->perf_event_list);
4448 mutex_unlock(&current->perf_counter_mutex); 4447 mutex_unlock(&current->perf_event_mutex);
4449 4448
4450err_fput_free_put_context: 4449err_fput_free_put_context:
4451 fput_light(counter_file, fput_needed2); 4450 fput_light(event_file, fput_needed2);
4452 4451
4453err_free_put_context: 4452err_free_put_context:
4454 if (err < 0) 4453 if (err < 0)
4455 kfree(counter); 4454 kfree(event);
4456 4455
4457err_put_context: 4456err_put_context:
4458 if (err < 0) 4457 if (err < 0)
@@ -4464,88 +4463,88 @@ err_put_context:
4464} 4463}
4465 4464
4466/* 4465/*
4467 * inherit a counter from parent task to child task: 4466 * inherit a event from parent task to child task:
4468 */ 4467 */
4469static struct perf_counter * 4468static struct perf_event *
4470inherit_counter(struct perf_counter *parent_counter, 4469inherit_event(struct perf_event *parent_event,
4471 struct task_struct *parent, 4470 struct task_struct *parent,
4472 struct perf_counter_context *parent_ctx, 4471 struct perf_event_context *parent_ctx,
4473 struct task_struct *child, 4472 struct task_struct *child,
4474 struct perf_counter *group_leader, 4473 struct perf_event *group_leader,
4475 struct perf_counter_context *child_ctx) 4474 struct perf_event_context *child_ctx)
4476{ 4475{
4477 struct perf_counter *child_counter; 4476 struct perf_event *child_event;
4478 4477
4479 /* 4478 /*
4480 * Instead of creating recursive hierarchies of counters, 4479 * Instead of creating recursive hierarchies of events,
4481 * we link inherited counters back to the original parent, 4480 * we link inherited events back to the original parent,
4482 * which has a filp for sure, which we use as the reference 4481 * which has a filp for sure, which we use as the reference
4483 * count: 4482 * count:
4484 */ 4483 */
4485 if (parent_counter->parent) 4484 if (parent_event->parent)
4486 parent_counter = parent_counter->parent; 4485 parent_event = parent_event->parent;
4487 4486
4488 child_counter = perf_counter_alloc(&parent_counter->attr, 4487 child_event = perf_event_alloc(&parent_event->attr,
4489 parent_counter->cpu, child_ctx, 4488 parent_event->cpu, child_ctx,
4490 group_leader, parent_counter, 4489 group_leader, parent_event,
4491 GFP_KERNEL); 4490 GFP_KERNEL);
4492 if (IS_ERR(child_counter)) 4491 if (IS_ERR(child_event))
4493 return child_counter; 4492 return child_event;
4494 get_ctx(child_ctx); 4493 get_ctx(child_ctx);
4495 4494
4496 /* 4495 /*
4497 * Make the child state follow the state of the parent counter, 4496 * Make the child state follow the state of the parent event,
4498 * not its attr.disabled bit. We hold the parent's mutex, 4497 * not its attr.disabled bit. We hold the parent's mutex,
4499 * so we won't race with perf_counter_{en, dis}able_family. 4498 * so we won't race with perf_event_{en, dis}able_family.
4500 */ 4499 */
4501 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) 4500 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4502 child_counter->state = PERF_COUNTER_STATE_INACTIVE; 4501 child_event->state = PERF_EVENT_STATE_INACTIVE;
4503 else 4502 else
4504 child_counter->state = PERF_COUNTER_STATE_OFF; 4503 child_event->state = PERF_EVENT_STATE_OFF;
4505 4504
4506 if (parent_counter->attr.freq) 4505 if (parent_event->attr.freq)
4507 child_counter->hw.sample_period = parent_counter->hw.sample_period; 4506 child_event->hw.sample_period = parent_event->hw.sample_period;
4508 4507
4509 /* 4508 /*
4510 * Link it up in the child's context: 4509 * Link it up in the child's context:
4511 */ 4510 */
4512 add_counter_to_ctx(child_counter, child_ctx); 4511 add_event_to_ctx(child_event, child_ctx);
4513 4512
4514 /* 4513 /*
4515 * Get a reference to the parent filp - we will fput it 4514 * Get a reference to the parent filp - we will fput it
4516 * when the child counter exits. This is safe to do because 4515 * when the child event exits. This is safe to do because
4517 * we are in the parent and we know that the filp still 4516 * we are in the parent and we know that the filp still
4518 * exists and has a nonzero count: 4517 * exists and has a nonzero count:
4519 */ 4518 */
4520 atomic_long_inc(&parent_counter->filp->f_count); 4519 atomic_long_inc(&parent_event->filp->f_count);
4521 4520
4522 /* 4521 /*
4523 * Link this into the parent counter's child list 4522 * Link this into the parent event's child list
4524 */ 4523 */
4525 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4524 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4526 mutex_lock(&parent_counter->child_mutex); 4525 mutex_lock(&parent_event->child_mutex);
4527 list_add_tail(&child_counter->child_list, &parent_counter->child_list); 4526 list_add_tail(&child_event->child_list, &parent_event->child_list);
4528 mutex_unlock(&parent_counter->child_mutex); 4527 mutex_unlock(&parent_event->child_mutex);
4529 4528
4530 return child_counter; 4529 return child_event;
4531} 4530}
4532 4531
4533static int inherit_group(struct perf_counter *parent_counter, 4532static int inherit_group(struct perf_event *parent_event,
4534 struct task_struct *parent, 4533 struct task_struct *parent,
4535 struct perf_counter_context *parent_ctx, 4534 struct perf_event_context *parent_ctx,
4536 struct task_struct *child, 4535 struct task_struct *child,
4537 struct perf_counter_context *child_ctx) 4536 struct perf_event_context *child_ctx)
4538{ 4537{
4539 struct perf_counter *leader; 4538 struct perf_event *leader;
4540 struct perf_counter *sub; 4539 struct perf_event *sub;
4541 struct perf_counter *child_ctr; 4540 struct perf_event *child_ctr;
4542 4541
4543 leader = inherit_counter(parent_counter, parent, parent_ctx, 4542 leader = inherit_event(parent_event, parent, parent_ctx,
4544 child, NULL, child_ctx); 4543 child, NULL, child_ctx);
4545 if (IS_ERR(leader)) 4544 if (IS_ERR(leader))
4546 return PTR_ERR(leader); 4545 return PTR_ERR(leader);
4547 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { 4546 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4548 child_ctr = inherit_counter(sub, parent, parent_ctx, 4547 child_ctr = inherit_event(sub, parent, parent_ctx,
4549 child, leader, child_ctx); 4548 child, leader, child_ctx);
4550 if (IS_ERR(child_ctr)) 4549 if (IS_ERR(child_ctr))
4551 return PTR_ERR(child_ctr); 4550 return PTR_ERR(child_ctr);
@@ -4553,74 +4552,74 @@ static int inherit_group(struct perf_counter *parent_counter,
4553 return 0; 4552 return 0;
4554} 4553}
4555 4554
4556static void sync_child_counter(struct perf_counter *child_counter, 4555static void sync_child_event(struct perf_event *child_event,
4557 struct task_struct *child) 4556 struct task_struct *child)
4558{ 4557{
4559 struct perf_counter *parent_counter = child_counter->parent; 4558 struct perf_event *parent_event = child_event->parent;
4560 u64 child_val; 4559 u64 child_val;
4561 4560
4562 if (child_counter->attr.inherit_stat) 4561 if (child_event->attr.inherit_stat)
4563 perf_counter_read_event(child_counter, child); 4562 perf_event_read_event(child_event, child);
4564 4563
4565 child_val = atomic64_read(&child_counter->count); 4564 child_val = atomic64_read(&child_event->count);
4566 4565
4567 /* 4566 /*
4568 * Add back the child's count to the parent's count: 4567 * Add back the child's count to the parent's count:
4569 */ 4568 */
4570 atomic64_add(child_val, &parent_counter->count); 4569 atomic64_add(child_val, &parent_event->count);
4571 atomic64_add(child_counter->total_time_enabled, 4570 atomic64_add(child_event->total_time_enabled,
4572 &parent_counter->child_total_time_enabled); 4571 &parent_event->child_total_time_enabled);
4573 atomic64_add(child_counter->total_time_running, 4572 atomic64_add(child_event->total_time_running,
4574 &parent_counter->child_total_time_running); 4573 &parent_event->child_total_time_running);
4575 4574
4576 /* 4575 /*
4577 * Remove this counter from the parent's list 4576 * Remove this event from the parent's list
4578 */ 4577 */
4579 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4578 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4580 mutex_lock(&parent_counter->child_mutex); 4579 mutex_lock(&parent_event->child_mutex);
4581 list_del_init(&child_counter->child_list); 4580 list_del_init(&child_event->child_list);
4582 mutex_unlock(&parent_counter->child_mutex); 4581 mutex_unlock(&parent_event->child_mutex);
4583 4582
4584 /* 4583 /*
4585 * Release the parent counter, if this was the last 4584 * Release the parent event, if this was the last
4586 * reference to it. 4585 * reference to it.
4587 */ 4586 */
4588 fput(parent_counter->filp); 4587 fput(parent_event->filp);
4589} 4588}
4590 4589
4591static void 4590static void
4592__perf_counter_exit_task(struct perf_counter *child_counter, 4591__perf_event_exit_task(struct perf_event *child_event,
4593 struct perf_counter_context *child_ctx, 4592 struct perf_event_context *child_ctx,
4594 struct task_struct *child) 4593 struct task_struct *child)
4595{ 4594{
4596 struct perf_counter *parent_counter; 4595 struct perf_event *parent_event;
4597 4596
4598 update_counter_times(child_counter); 4597 update_event_times(child_event);
4599 perf_counter_remove_from_context(child_counter); 4598 perf_event_remove_from_context(child_event);
4600 4599
4601 parent_counter = child_counter->parent; 4600 parent_event = child_event->parent;
4602 /* 4601 /*
4603 * It can happen that parent exits first, and has counters 4602 * It can happen that parent exits first, and has events
4604 * that are still around due to the child reference. These 4603 * that are still around due to the child reference. These
4605 * counters need to be zapped - but otherwise linger. 4604 * events need to be zapped - but otherwise linger.
4606 */ 4605 */
4607 if (parent_counter) { 4606 if (parent_event) {
4608 sync_child_counter(child_counter, child); 4607 sync_child_event(child_event, child);
4609 free_counter(child_counter); 4608 free_event(child_event);
4610 } 4609 }
4611} 4610}
4612 4611
4613/* 4612/*
4614 * When a child task exits, feed back counter values to parent counters. 4613 * When a child task exits, feed back event values to parent events.
4615 */ 4614 */
4616void perf_counter_exit_task(struct task_struct *child) 4615void perf_event_exit_task(struct task_struct *child)
4617{ 4616{
4618 struct perf_counter *child_counter, *tmp; 4617 struct perf_event *child_event, *tmp;
4619 struct perf_counter_context *child_ctx; 4618 struct perf_event_context *child_ctx;
4620 unsigned long flags; 4619 unsigned long flags;
4621 4620
4622 if (likely(!child->perf_counter_ctxp)) { 4621 if (likely(!child->perf_event_ctxp)) {
4623 perf_counter_task(child, NULL, 0); 4622 perf_event_task(child, NULL, 0);
4624 return; 4623 return;
4625 } 4624 }
4626 4625
@@ -4631,37 +4630,37 @@ void perf_counter_exit_task(struct task_struct *child)
4631 * scheduled, so we are now safe from rescheduling changing 4630 * scheduled, so we are now safe from rescheduling changing
4632 * our context. 4631 * our context.
4633 */ 4632 */
4634 child_ctx = child->perf_counter_ctxp; 4633 child_ctx = child->perf_event_ctxp;
4635 __perf_counter_task_sched_out(child_ctx); 4634 __perf_event_task_sched_out(child_ctx);
4636 4635
4637 /* 4636 /*
4638 * Take the context lock here so that if find_get_context is 4637 * Take the context lock here so that if find_get_context is
4639 * reading child->perf_counter_ctxp, we wait until it has 4638 * reading child->perf_event_ctxp, we wait until it has
4640 * incremented the context's refcount before we do put_ctx below. 4639 * incremented the context's refcount before we do put_ctx below.
4641 */ 4640 */
4642 spin_lock(&child_ctx->lock); 4641 spin_lock(&child_ctx->lock);
4643 child->perf_counter_ctxp = NULL; 4642 child->perf_event_ctxp = NULL;
4644 /* 4643 /*
4645 * If this context is a clone; unclone it so it can't get 4644 * If this context is a clone; unclone it so it can't get
4646 * swapped to another process while we're removing all 4645 * swapped to another process while we're removing all
4647 * the counters from it. 4646 * the events from it.
4648 */ 4647 */
4649 unclone_ctx(child_ctx); 4648 unclone_ctx(child_ctx);
4650 spin_unlock_irqrestore(&child_ctx->lock, flags); 4649 spin_unlock_irqrestore(&child_ctx->lock, flags);
4651 4650
4652 /* 4651 /*
4653 * Report the task dead after unscheduling the counters so that we 4652 * Report the task dead after unscheduling the events so that we
4654 * won't get any samples after PERF_EVENT_EXIT. We can however still 4653 * won't get any samples after PERF_RECORD_EXIT. We can however still
4655 * get a few PERF_EVENT_READ events. 4654 * get a few PERF_RECORD_READ events.
4656 */ 4655 */
4657 perf_counter_task(child, child_ctx, 0); 4656 perf_event_task(child, child_ctx, 0);
4658 4657
4659 /* 4658 /*
4660 * We can recurse on the same lock type through: 4659 * We can recurse on the same lock type through:
4661 * 4660 *
4662 * __perf_counter_exit_task() 4661 * __perf_event_exit_task()
4663 * sync_child_counter() 4662 * sync_child_event()
4664 * fput(parent_counter->filp) 4663 * fput(parent_event->filp)
4665 * perf_release() 4664 * perf_release()
4666 * mutex_lock(&ctx->mutex) 4665 * mutex_lock(&ctx->mutex)
4667 * 4666 *
@@ -4670,16 +4669,16 @@ void perf_counter_exit_task(struct task_struct *child)
4670 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 4669 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4671 4670
4672again: 4671again:
4673 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, 4672 list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
4674 list_entry) 4673 group_entry)
4675 __perf_counter_exit_task(child_counter, child_ctx, child); 4674 __perf_event_exit_task(child_event, child_ctx, child);
4676 4675
4677 /* 4676 /*
4678 * If the last counter was a group counter, it will have appended all 4677 * If the last event was a group event, it will have appended all
4679 * its siblings to the list, but we obtained 'tmp' before that which 4678 * its siblings to the list, but we obtained 'tmp' before that which
4680 * will still point to the list head terminating the iteration. 4679 * will still point to the list head terminating the iteration.
4681 */ 4680 */
4682 if (!list_empty(&child_ctx->counter_list)) 4681 if (!list_empty(&child_ctx->group_list))
4683 goto again; 4682 goto again;
4684 4683
4685 mutex_unlock(&child_ctx->mutex); 4684 mutex_unlock(&child_ctx->mutex);
@@ -4691,33 +4690,33 @@ again:
4691 * free an unexposed, unused context as created by inheritance by 4690 * free an unexposed, unused context as created by inheritance by
4692 * init_task below, used by fork() in case of fail. 4691 * init_task below, used by fork() in case of fail.
4693 */ 4692 */
4694void perf_counter_free_task(struct task_struct *task) 4693void perf_event_free_task(struct task_struct *task)
4695{ 4694{
4696 struct perf_counter_context *ctx = task->perf_counter_ctxp; 4695 struct perf_event_context *ctx = task->perf_event_ctxp;
4697 struct perf_counter *counter, *tmp; 4696 struct perf_event *event, *tmp;
4698 4697
4699 if (!ctx) 4698 if (!ctx)
4700 return; 4699 return;
4701 4700
4702 mutex_lock(&ctx->mutex); 4701 mutex_lock(&ctx->mutex);
4703again: 4702again:
4704 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { 4703 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
4705 struct perf_counter *parent = counter->parent; 4704 struct perf_event *parent = event->parent;
4706 4705
4707 if (WARN_ON_ONCE(!parent)) 4706 if (WARN_ON_ONCE(!parent))
4708 continue; 4707 continue;
4709 4708
4710 mutex_lock(&parent->child_mutex); 4709 mutex_lock(&parent->child_mutex);
4711 list_del_init(&counter->child_list); 4710 list_del_init(&event->child_list);
4712 mutex_unlock(&parent->child_mutex); 4711 mutex_unlock(&parent->child_mutex);
4713 4712
4714 fput(parent->filp); 4713 fput(parent->filp);
4715 4714
4716 list_del_counter(counter, ctx); 4715 list_del_event(event, ctx);
4717 free_counter(counter); 4716 free_event(event);
4718 } 4717 }
4719 4718
4720 if (!list_empty(&ctx->counter_list)) 4719 if (!list_empty(&ctx->group_list))
4721 goto again; 4720 goto again;
4722 4721
4723 mutex_unlock(&ctx->mutex); 4722 mutex_unlock(&ctx->mutex);
@@ -4726,37 +4725,37 @@ again:
4726} 4725}
4727 4726
4728/* 4727/*
4729 * Initialize the perf_counter context in task_struct 4728 * Initialize the perf_event context in task_struct
4730 */ 4729 */
4731int perf_counter_init_task(struct task_struct *child) 4730int perf_event_init_task(struct task_struct *child)
4732{ 4731{
4733 struct perf_counter_context *child_ctx, *parent_ctx; 4732 struct perf_event_context *child_ctx, *parent_ctx;
4734 struct perf_counter_context *cloned_ctx; 4733 struct perf_event_context *cloned_ctx;
4735 struct perf_counter *counter; 4734 struct perf_event *event;
4736 struct task_struct *parent = current; 4735 struct task_struct *parent = current;
4737 int inherited_all = 1; 4736 int inherited_all = 1;
4738 int ret = 0; 4737 int ret = 0;
4739 4738
4740 child->perf_counter_ctxp = NULL; 4739 child->perf_event_ctxp = NULL;
4741 4740
4742 mutex_init(&child->perf_counter_mutex); 4741 mutex_init(&child->perf_event_mutex);
4743 INIT_LIST_HEAD(&child->perf_counter_list); 4742 INIT_LIST_HEAD(&child->perf_event_list);
4744 4743
4745 if (likely(!parent->perf_counter_ctxp)) 4744 if (likely(!parent->perf_event_ctxp))
4746 return 0; 4745 return 0;
4747 4746
4748 /* 4747 /*
4749 * This is executed from the parent task context, so inherit 4748 * This is executed from the parent task context, so inherit
4750 * counters that have been marked for cloning. 4749 * events that have been marked for cloning.
4751 * First allocate and initialize a context for the child. 4750 * First allocate and initialize a context for the child.
4752 */ 4751 */
4753 4752
4754 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 4753 child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4755 if (!child_ctx) 4754 if (!child_ctx)
4756 return -ENOMEM; 4755 return -ENOMEM;
4757 4756
4758 __perf_counter_init_context(child_ctx, child); 4757 __perf_event_init_context(child_ctx, child);
4759 child->perf_counter_ctxp = child_ctx; 4758 child->perf_event_ctxp = child_ctx;
4760 get_task_struct(child); 4759 get_task_struct(child);
4761 4760
4762 /* 4761 /*
@@ -4782,16 +4781,16 @@ int perf_counter_init_task(struct task_struct *child)
4782 * We dont have to disable NMIs - we are only looking at 4781 * We dont have to disable NMIs - we are only looking at
4783 * the list, not manipulating it: 4782 * the list, not manipulating it:
4784 */ 4783 */
4785 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { 4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
4786 if (counter != counter->group_leader) 4785 if (event != event->group_leader)
4787 continue; 4786 continue;
4788 4787
4789 if (!counter->attr.inherit) { 4788 if (!event->attr.inherit) {
4790 inherited_all = 0; 4789 inherited_all = 0;
4791 continue; 4790 continue;
4792 } 4791 }
4793 4792
4794 ret = inherit_group(counter, parent, parent_ctx, 4793 ret = inherit_group(event, parent, parent_ctx,
4795 child, child_ctx); 4794 child, child_ctx);
4796 if (ret) { 4795 if (ret) {
4797 inherited_all = 0; 4796 inherited_all = 0;
@@ -4805,7 +4804,7 @@ int perf_counter_init_task(struct task_struct *child)
4805 * context, or of whatever the parent is a clone of. 4804 * context, or of whatever the parent is a clone of.
4806 * Note that if the parent is a clone, it could get 4805 * Note that if the parent is a clone, it could get
4807 * uncloned at any point, but that doesn't matter 4806 * uncloned at any point, but that doesn't matter
4808 * because the list of counters and the generation 4807 * because the list of events and the generation
4809 * count can't have changed since we took the mutex. 4808 * count can't have changed since we took the mutex.
4810 */ 4809 */
4811 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); 4810 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
@@ -4826,41 +4825,41 @@ int perf_counter_init_task(struct task_struct *child)
4826 return ret; 4825 return ret;
4827} 4826}
4828 4827
4829static void __cpuinit perf_counter_init_cpu(int cpu) 4828static void __cpuinit perf_event_init_cpu(int cpu)
4830{ 4829{
4831 struct perf_cpu_context *cpuctx; 4830 struct perf_cpu_context *cpuctx;
4832 4831
4833 cpuctx = &per_cpu(perf_cpu_context, cpu); 4832 cpuctx = &per_cpu(perf_cpu_context, cpu);
4834 __perf_counter_init_context(&cpuctx->ctx, NULL); 4833 __perf_event_init_context(&cpuctx->ctx, NULL);
4835 4834
4836 spin_lock(&perf_resource_lock); 4835 spin_lock(&perf_resource_lock);
4837 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 4836 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4838 spin_unlock(&perf_resource_lock); 4837 spin_unlock(&perf_resource_lock);
4839 4838
4840 hw_perf_counter_setup(cpu); 4839 hw_perf_event_setup(cpu);
4841} 4840}
4842 4841
4843#ifdef CONFIG_HOTPLUG_CPU 4842#ifdef CONFIG_HOTPLUG_CPU
4844static void __perf_counter_exit_cpu(void *info) 4843static void __perf_event_exit_cpu(void *info)
4845{ 4844{
4846 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 4845 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4847 struct perf_counter_context *ctx = &cpuctx->ctx; 4846 struct perf_event_context *ctx = &cpuctx->ctx;
4848 struct perf_counter *counter, *tmp; 4847 struct perf_event *event, *tmp;
4849 4848
4850 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) 4849 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
4851 __perf_counter_remove_from_context(counter); 4850 __perf_event_remove_from_context(event);
4852} 4851}
4853static void perf_counter_exit_cpu(int cpu) 4852static void perf_event_exit_cpu(int cpu)
4854{ 4853{
4855 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 4854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4856 struct perf_counter_context *ctx = &cpuctx->ctx; 4855 struct perf_event_context *ctx = &cpuctx->ctx;
4857 4856
4858 mutex_lock(&ctx->mutex); 4857 mutex_lock(&ctx->mutex);
4859 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); 4858 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
4860 mutex_unlock(&ctx->mutex); 4859 mutex_unlock(&ctx->mutex);
4861} 4860}
4862#else 4861#else
4863static inline void perf_counter_exit_cpu(int cpu) { } 4862static inline void perf_event_exit_cpu(int cpu) { }
4864#endif 4863#endif
4865 4864
4866static int __cpuinit 4865static int __cpuinit
@@ -4872,17 +4871,17 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4872 4871
4873 case CPU_UP_PREPARE: 4872 case CPU_UP_PREPARE:
4874 case CPU_UP_PREPARE_FROZEN: 4873 case CPU_UP_PREPARE_FROZEN:
4875 perf_counter_init_cpu(cpu); 4874 perf_event_init_cpu(cpu);
4876 break; 4875 break;
4877 4876
4878 case CPU_ONLINE: 4877 case CPU_ONLINE:
4879 case CPU_ONLINE_FROZEN: 4878 case CPU_ONLINE_FROZEN:
4880 hw_perf_counter_setup_online(cpu); 4879 hw_perf_event_setup_online(cpu);
4881 break; 4880 break;
4882 4881
4883 case CPU_DOWN_PREPARE: 4882 case CPU_DOWN_PREPARE:
4884 case CPU_DOWN_PREPARE_FROZEN: 4883 case CPU_DOWN_PREPARE_FROZEN:
4885 perf_counter_exit_cpu(cpu); 4884 perf_event_exit_cpu(cpu);
4886 break; 4885 break;
4887 4886
4888 default: 4887 default:
@@ -4900,7 +4899,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
4900 .priority = 20, 4899 .priority = 20,
4901}; 4900};
4902 4901
4903void __init perf_counter_init(void) 4902void __init perf_event_init(void)
4904{ 4903{
4905 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4904 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4906 (void *)(long)smp_processor_id()); 4905 (void *)(long)smp_processor_id());
@@ -4926,7 +4925,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4926 err = strict_strtoul(buf, 10, &val); 4925 err = strict_strtoul(buf, 10, &val);
4927 if (err) 4926 if (err)
4928 return err; 4927 return err;
4929 if (val > perf_max_counters) 4928 if (val > perf_max_events)
4930 return -EINVAL; 4929 return -EINVAL;
4931 4930
4932 spin_lock(&perf_resource_lock); 4931 spin_lock(&perf_resource_lock);
@@ -4934,8 +4933,8 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4934 for_each_online_cpu(cpu) { 4933 for_each_online_cpu(cpu) {
4935 cpuctx = &per_cpu(perf_cpu_context, cpu); 4934 cpuctx = &per_cpu(perf_cpu_context, cpu);
4936 spin_lock_irq(&cpuctx->ctx.lock); 4935 spin_lock_irq(&cpuctx->ctx.lock);
4937 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, 4936 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
4938 perf_max_counters - perf_reserved_percpu); 4937 perf_max_events - perf_reserved_percpu);
4939 cpuctx->max_pertask = mpt; 4938 cpuctx->max_pertask = mpt;
4940 spin_unlock_irq(&cpuctx->ctx.lock); 4939 spin_unlock_irq(&cpuctx->ctx.lock);
4941 } 4940 }
@@ -4990,12 +4989,12 @@ static struct attribute *perfclass_attrs[] = {
4990 4989
4991static struct attribute_group perfclass_attr_group = { 4990static struct attribute_group perfclass_attr_group = {
4992 .attrs = perfclass_attrs, 4991 .attrs = perfclass_attrs,
4993 .name = "perf_counters", 4992 .name = "perf_events",
4994}; 4993};
4995 4994
4996static int __init perf_counter_sysfs_init(void) 4995static int __init perf_event_sysfs_init(void)
4997{ 4996{
4998 return sysfs_create_group(&cpu_sysdev_class.kset.kobj, 4997 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4999 &perfclass_attr_group); 4998 &perfclass_attr_group);
5000} 4999}
5001device_initcall(perf_counter_sysfs_init); 5000device_initcall(perf_event_sysfs_init);
diff --git a/kernel/sched.c b/kernel/sched.c
index 830967e18285..91843ba7f237 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2053 if (task_hot(p, old_rq->clock, NULL)) 2053 if (task_hot(p, old_rq->clock, NULL))
2054 schedstat_inc(p, se.nr_forced2_migrations); 2054 schedstat_inc(p, se.nr_forced2_migrations);
2055#endif 2055#endif
2056 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2056 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2057 1, 1, NULL, 0); 2057 1, 1, NULL, 0);
2058 } 2058 }
2059 p->se.vruntime -= old_cfsrq->min_vruntime - 2059 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2718,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2718 */ 2718 */
2719 prev_state = prev->state; 2719 prev_state = prev->state;
2720 finish_arch_switch(prev); 2720 finish_arch_switch(prev);
2721 perf_counter_task_sched_in(current, cpu_of(rq)); 2721 perf_event_task_sched_in(current, cpu_of(rq));
2722 finish_lock_switch(rq, prev); 2722 finish_lock_switch(rq, prev);
2723 2723
2724 fire_sched_in_preempt_notifiers(current); 2724 fire_sched_in_preempt_notifiers(current);
@@ -5193,7 +5193,7 @@ void scheduler_tick(void)
5193 curr->sched_class->task_tick(rq, curr, 0); 5193 curr->sched_class->task_tick(rq, curr, 0);
5194 spin_unlock(&rq->lock); 5194 spin_unlock(&rq->lock);
5195 5195
5196 perf_counter_task_tick(curr, cpu); 5196 perf_event_task_tick(curr, cpu);
5197 5197
5198#ifdef CONFIG_SMP 5198#ifdef CONFIG_SMP
5199 rq->idle_at_tick = idle_cpu(cpu); 5199 rq->idle_at_tick = idle_cpu(cpu);
@@ -5409,7 +5409,7 @@ need_resched_nonpreemptible:
5409 5409
5410 if (likely(prev != next)) { 5410 if (likely(prev != next)) {
5411 sched_info_switch(prev, next); 5411 sched_info_switch(prev, next);
5412 perf_counter_task_sched_out(prev, next, cpu); 5412 perf_event_task_sched_out(prev, next, cpu);
5413 5413
5414 rq->nr_switches++; 5414 rq->nr_switches++;
5415 rq->curr = next; 5415 rq->curr = next;
@@ -7671,7 +7671,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7671/* 7671/*
7672 * Register at high priority so that task migration (migrate_all_tasks) 7672 * Register at high priority so that task migration (migrate_all_tasks)
7673 * happens before everything else. This has to be lower priority than 7673 * happens before everything else. This has to be lower priority than
7674 * the notifier in the perf_counter subsystem, though. 7674 * the notifier in the perf_event subsystem, though.
7675 */ 7675 */
7676static struct notifier_block __cpuinitdata migration_notifier = { 7676static struct notifier_block __cpuinitdata migration_notifier = {
7677 .notifier_call = migration_call, 7677 .notifier_call = migration_call,
@@ -9528,7 +9528,7 @@ void __init sched_init(void)
9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9529#endif /* SMP */ 9529#endif /* SMP */
9530 9530
9531 perf_counter_init(); 9531 perf_event_init();
9532 9532
9533 scheduler_running = 1; 9533 scheduler_running = 1;
9534} 9534}
diff --git a/kernel/sys.c b/kernel/sys.c
index b3f1097c76fa..ea5c3bcac881 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,7 +14,7 @@
14#include <linux/prctl.h> 14#include <linux/prctl.h>
15#include <linux/highuid.h> 15#include <linux/highuid.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/resource.h> 18#include <linux/resource.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
@@ -1511,11 +1511,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1511 case PR_SET_TSC: 1511 case PR_SET_TSC:
1512 error = SET_TSC_CTL(arg2); 1512 error = SET_TSC_CTL(arg2);
1513 break; 1513 break;
1514 case PR_TASK_PERF_COUNTERS_DISABLE: 1514 case PR_TASK_PERF_EVENTS_DISABLE:
1515 error = perf_counter_task_disable(); 1515 error = perf_event_task_disable();
1516 break; 1516 break;
1517 case PR_TASK_PERF_COUNTERS_ENABLE: 1517 case PR_TASK_PERF_EVENTS_ENABLE:
1518 error = perf_counter_task_enable(); 1518 error = perf_event_task_enable();
1519 break; 1519 break;
1520 case PR_GET_TIMERSLACK: 1520 case PR_GET_TIMERSLACK:
1521 error = current->timer_slack_ns; 1521 error = current->timer_slack_ns;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 68320f6b07b5..515bc230ac2a 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -177,4 +177,4 @@ cond_syscall(sys_eventfd);
177cond_syscall(sys_eventfd2); 177cond_syscall(sys_eventfd2);
178 178
179/* performance counters: */ 179/* performance counters: */
180cond_syscall(sys_perf_counter_open); 180cond_syscall(sys_perf_event_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1a631ba684a4..6ba49c7cb128 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,7 +50,7 @@
50#include <linux/reboot.h> 50#include <linux/reboot.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/slow-work.h> 52#include <linux/slow-work.h>
53#include <linux/perf_counter.h> 53#include <linux/perf_event.h>
54 54
55#include <asm/uaccess.h> 55#include <asm/uaccess.h>
56#include <asm/processor.h> 56#include <asm/processor.h>
@@ -964,28 +964,28 @@ static struct ctl_table kern_table[] = {
964 .child = slow_work_sysctls, 964 .child = slow_work_sysctls,
965 }, 965 },
966#endif 966#endif
967#ifdef CONFIG_PERF_COUNTERS 967#ifdef CONFIG_PERF_EVENTS
968 { 968 {
969 .ctl_name = CTL_UNNUMBERED, 969 .ctl_name = CTL_UNNUMBERED,
970 .procname = "perf_counter_paranoid", 970 .procname = "perf_event_paranoid",
971 .data = &sysctl_perf_counter_paranoid, 971 .data = &sysctl_perf_event_paranoid,
972 .maxlen = sizeof(sysctl_perf_counter_paranoid), 972 .maxlen = sizeof(sysctl_perf_event_paranoid),
973 .mode = 0644, 973 .mode = 0644,
974 .proc_handler = &proc_dointvec, 974 .proc_handler = &proc_dointvec,
975 }, 975 },
976 { 976 {
977 .ctl_name = CTL_UNNUMBERED, 977 .ctl_name = CTL_UNNUMBERED,
978 .procname = "perf_counter_mlock_kb", 978 .procname = "perf_event_mlock_kb",
979 .data = &sysctl_perf_counter_mlock, 979 .data = &sysctl_perf_event_mlock,
980 .maxlen = sizeof(sysctl_perf_counter_mlock), 980 .maxlen = sizeof(sysctl_perf_event_mlock),
981 .mode = 0644, 981 .mode = 0644,
982 .proc_handler = &proc_dointvec, 982 .proc_handler = &proc_dointvec,
983 }, 983 },
984 { 984 {
985 .ctl_name = CTL_UNNUMBERED, 985 .ctl_name = CTL_UNNUMBERED,
986 .procname = "perf_counter_max_sample_rate", 986 .procname = "perf_event_max_sample_rate",
987 .data = &sysctl_perf_counter_sample_rate, 987 .data = &sysctl_perf_event_sample_rate,
988 .maxlen = sizeof(sysctl_perf_counter_sample_rate), 988 .maxlen = sizeof(sysctl_perf_event_sample_rate),
989 .mode = 0644, 989 .mode = 0644,
990 .proc_handler = &proc_dointvec, 990 .proc_handler = &proc_dointvec,
991 }, 991 },
diff --git a/kernel/timer.c b/kernel/timer.c
index bbb51074680e..811e5c391456 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_counter.h> 40#include <linux/perf_event.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42 42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -1187,7 +1187,7 @@ static void run_timer_softirq(struct softirq_action *h)
1187{ 1187{
1188 struct tvec_base *base = __get_cpu_var(tvec_bases); 1188 struct tvec_base *base = __get_cpu_var(tvec_bases);
1189 1189
1190 perf_counter_do_pending(); 1190 perf_event_do_pending();
1191 1191
1192 hrtimer_run_pending(); 1192 hrtimer_run_pending();
1193 1193
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7a3550cf2597..9fbce6c9d2e1 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
2#include <trace/events/syscalls.h> 2#include <trace/events/syscalls.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/ftrace.h> 4#include <linux/ftrace.h>
5#include <linux/perf_counter.h> 5#include <linux/perf_event.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8#include "trace_output.h" 8#include "trace_output.h"
@@ -433,7 +433,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
433 rec->nr = syscall_nr; 433 rec->nr = syscall_nr;
434 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 434 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
435 (unsigned long *)&rec->args); 435 (unsigned long *)&rec->args);
436 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); 436 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
437 437
438end: 438end:
439 local_irq_restore(flags); 439 local_irq_restore(flags);
@@ -532,7 +532,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
532 rec->nr = syscall_nr; 532 rec->nr = syscall_nr;
533 rec->ret = syscall_get_return_value(current, regs); 533 rec->ret = syscall_get_return_value(current, regs);
534 534
535 perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size); 535 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
536 536
537end: 537end:
538 local_irq_restore(flags); 538 local_irq_restore(flags);