aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 06:02:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 08:28:04 -0400
commitcdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (patch)
tree81f98a3ab46c589792057fe2392c1e10f8ad7893 /include
parentdfc65094d0313cc48969fa60bcf33d693aeb05a7 (diff)
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/init_task.h14
-rw-r--r--include/linux/perf_event.h (renamed from include/linux/perf_counter.h)306
-rw-r--r--include/linux/prctl.h4
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/trace/ftrace.h10
7 files changed, 178 insertions, 178 deletions
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 1125e5a1ee5d..d76b66acea95 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
620 620
621#define __NR_rt_tgsigqueueinfo 240 621#define __NR_rt_tgsigqueueinfo 240
622__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 622__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
623#define __NR_perf_counter_open 241 623#define __NR_perf_event_open 241
624__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) 624__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
625 625
626#undef __NR_syscalls 626#undef __NR_syscalls
627#define __NR_syscalls 242 627#define __NR_syscalls 242
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9e7f2e8fc66e..21a6f5d9af22 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -106,13 +106,13 @@ extern struct group_info init_groups;
106 106
107extern struct cred init_cred; 107extern struct cred init_cred;
108 108
109#ifdef CONFIG_PERF_COUNTERS 109#ifdef CONFIG_PERF_EVENTS
110# define INIT_PERF_COUNTERS(tsk) \ 110# define INIT_PERF_EVENTS(tsk) \
111 .perf_counter_mutex = \ 111 .perf_event_mutex = \
112 __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ 112 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
113 .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), 113 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
114#else 114#else
115# define INIT_PERF_COUNTERS(tsk) 115# define INIT_PERF_EVENTS(tsk)
116#endif 116#endif
117 117
118/* 118/*
@@ -178,7 +178,7 @@ extern struct cred init_cred;
178 }, \ 178 }, \
179 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 179 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
180 INIT_IDS \ 180 INIT_IDS \
181 INIT_PERF_COUNTERS(tsk) \ 181 INIT_PERF_EVENTS(tsk) \
182 INIT_TRACE_IRQFLAGS \ 182 INIT_TRACE_IRQFLAGS \
183 INIT_LOCKDEP \ 183 INIT_LOCKDEP \
184 INIT_FTRACE_GRAPH \ 184 INIT_FTRACE_GRAPH \
diff --git a/include/linux/perf_counter.h b/include/linux/perf_event.h
index f64862732673..ae9d9ed6df2a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_event.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counters: 2 * Performance events:
3 * 3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
@@ -11,8 +11,8 @@
11 * 11 *
12 * For licencing details see kernel-base/COPYING 12 * For licencing details see kernel-base/COPYING
13 */ 13 */
14#ifndef _LINUX_PERF_COUNTER_H 14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_COUNTER_H 15#define _LINUX_PERF_EVENT_H
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/ioctl.h> 18#include <linux/ioctl.h>
@@ -36,8 +36,8 @@ enum perf_type_id {
36}; 36};
37 37
38/* 38/*
39 * Generalized performance counter event types, used by the 39 * Generalized performance event event_id types, used by the
40 * attr.event_id parameter of the sys_perf_counter_open() 40 * attr.event_id parameter of the sys_perf_event_open()
41 * syscall: 41 * syscall:
42 */ 42 */
43enum perf_hw_id { 43enum perf_hw_id {
@@ -56,7 +56,7 @@ enum perf_hw_id {
56}; 56};
57 57
58/* 58/*
59 * Generalized hardware cache counters: 59 * Generalized hardware cache events:
60 * 60 *
61 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x 61 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
62 * { read, write, prefetch } x 62 * { read, write, prefetch } x
@@ -89,8 +89,8 @@ enum perf_hw_cache_op_result_id {
89}; 89};
90 90
91/* 91/*
92 * Special "software" counters provided by the kernel, even if the hardware 92 * Special "software" events provided by the kernel, even if the hardware
93 * does not support performance counters. These counters measure various 93 * does not support performance events. These events measure various
94 * physical and sw events of the kernel (and allow the profiling of them as 94 * physical and sw events of the kernel (and allow the profiling of them as
95 * well): 95 * well):
96 */ 96 */
@@ -110,7 +110,7 @@ enum perf_sw_ids {
110 * Bits that can be set in attr.sample_type to request information 110 * Bits that can be set in attr.sample_type to request information
111 * in the overflow packets. 111 * in the overflow packets.
112 */ 112 */
113enum perf_counter_sample_format { 113enum perf_event_sample_format {
114 PERF_SAMPLE_IP = 1U << 0, 114 PERF_SAMPLE_IP = 1U << 0,
115 PERF_SAMPLE_TID = 1U << 1, 115 PERF_SAMPLE_TID = 1U << 1,
116 PERF_SAMPLE_TIME = 1U << 2, 116 PERF_SAMPLE_TIME = 1U << 2,
@@ -127,7 +127,7 @@ enum perf_counter_sample_format {
127}; 127};
128 128
129/* 129/*
130 * The format of the data returned by read() on a perf counter fd, 130 * The format of the data returned by read() on a perf event fd,
131 * as specified by attr.read_format: 131 * as specified by attr.read_format:
132 * 132 *
133 * struct read_format { 133 * struct read_format {
@@ -146,7 +146,7 @@ enum perf_counter_sample_format {
146 * } && PERF_FORMAT_GROUP 146 * } && PERF_FORMAT_GROUP
147 * }; 147 * };
148 */ 148 */
149enum perf_counter_read_format { 149enum perf_event_read_format {
150 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 150 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
151 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 151 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
152 PERF_FORMAT_ID = 1U << 2, 152 PERF_FORMAT_ID = 1U << 2,
@@ -158,9 +158,9 @@ enum perf_counter_read_format {
158#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 158#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
159 159
160/* 160/*
161 * Hardware event to monitor via a performance monitoring counter: 161 * Hardware event_id to monitor via a performance monitoring event:
162 */ 162 */
163struct perf_counter_attr { 163struct perf_event_attr {
164 164
165 /* 165 /*
166 * Major type: hardware/software/tracepoint/etc. 166 * Major type: hardware/software/tracepoint/etc.
@@ -213,28 +213,28 @@ struct perf_counter_attr {
213}; 213};
214 214
215/* 215/*
216 * Ioctls that can be done on a perf counter fd: 216 * Ioctls that can be done on a perf event fd:
217 */ 217 */
218#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) 218#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
219#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) 219#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
220#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) 220#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
221#define PERF_COUNTER_IOC_RESET _IO ('$', 3) 221#define PERF_EVENT_IOC_RESET _IO ('$', 3)
222#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) 222#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
223#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) 223#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
224 224
225enum perf_counter_ioc_flags { 225enum perf_event_ioc_flags {
226 PERF_IOC_FLAG_GROUP = 1U << 0, 226 PERF_IOC_FLAG_GROUP = 1U << 0,
227}; 227};
228 228
229/* 229/*
230 * Structure of the page that can be mapped via mmap 230 * Structure of the page that can be mapped via mmap
231 */ 231 */
232struct perf_counter_mmap_page { 232struct perf_event_mmap_page {
233 __u32 version; /* version number of this structure */ 233 __u32 version; /* version number of this structure */
234 __u32 compat_version; /* lowest version this is compat with */ 234 __u32 compat_version; /* lowest version this is compat with */
235 235
236 /* 236 /*
237 * Bits needed to read the hw counters in user-space. 237 * Bits needed to read the hw events in user-space.
238 * 238 *
239 * u32 seq; 239 * u32 seq;
240 * s64 count; 240 * s64 count;
@@ -256,10 +256,10 @@ struct perf_counter_mmap_page {
256 * processes. 256 * processes.
257 */ 257 */
258 __u32 lock; /* seqlock for synchronization */ 258 __u32 lock; /* seqlock for synchronization */
259 __u32 index; /* hardware counter identifier */ 259 __u32 index; /* hardware event identifier */
260 __s64 offset; /* add to hardware counter value */ 260 __s64 offset; /* add to hardware event value */
261 __u64 time_enabled; /* time counter active */ 261 __u64 time_enabled; /* time event active */
262 __u64 time_running; /* time counter on cpu */ 262 __u64 time_running; /* time event on cpu */
263 263
264 /* 264 /*
265 * Hole for extension of the self monitor capabilities 265 * Hole for extension of the self monitor capabilities
@@ -272,7 +272,7 @@ struct perf_counter_mmap_page {
272 * 272 *
273 * User-space reading the @data_head value should issue an rmb(), on 273 * User-space reading the @data_head value should issue an rmb(), on
274 * SMP capable platforms, after reading this value -- see 274 * SMP capable platforms, after reading this value -- see
275 * perf_counter_wakeup(). 275 * perf_event_wakeup().
276 * 276 *
277 * When the mapping is PROT_WRITE the @data_tail value should be 277 * When the mapping is PROT_WRITE the @data_tail value should be
278 * written by userspace to reflect the last read data. In this case 278 * written by userspace to reflect the last read data. In this case
@@ -282,11 +282,11 @@ struct perf_counter_mmap_page {
282 __u64 data_tail; /* user-space written tail */ 282 __u64 data_tail; /* user-space written tail */
283}; 283};
284 284
285#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) 285#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
286#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) 286#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
287#define PERF_EVENT_MISC_KERNEL (1 << 0) 287#define PERF_RECORD_MISC_KERNEL (1 << 0)
288#define PERF_EVENT_MISC_USER (2 << 0) 288#define PERF_RECORD_MISC_USER (2 << 0)
289#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) 289#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
290 290
291struct perf_event_header { 291struct perf_event_header {
292 __u32 type; 292 __u32 type;
@@ -310,7 +310,7 @@ enum perf_event_type {
310 * char filename[]; 310 * char filename[];
311 * }; 311 * };
312 */ 312 */
313 PERF_EVENT_MMAP = 1, 313 PERF_RECORD_MMAP = 1,
314 314
315 /* 315 /*
316 * struct { 316 * struct {
@@ -319,7 +319,7 @@ enum perf_event_type {
319 * u64 lost; 319 * u64 lost;
320 * }; 320 * };
321 */ 321 */
322 PERF_EVENT_LOST = 2, 322 PERF_RECORD_LOST = 2,
323 323
324 /* 324 /*
325 * struct { 325 * struct {
@@ -329,7 +329,7 @@ enum perf_event_type {
329 * char comm[]; 329 * char comm[];
330 * }; 330 * };
331 */ 331 */
332 PERF_EVENT_COMM = 3, 332 PERF_RECORD_COMM = 3,
333 333
334 /* 334 /*
335 * struct { 335 * struct {
@@ -339,7 +339,7 @@ enum perf_event_type {
339 * u64 time; 339 * u64 time;
340 * }; 340 * };
341 */ 341 */
342 PERF_EVENT_EXIT = 4, 342 PERF_RECORD_EXIT = 4,
343 343
344 /* 344 /*
345 * struct { 345 * struct {
@@ -349,8 +349,8 @@ enum perf_event_type {
349 * u64 stream_id; 349 * u64 stream_id;
350 * }; 350 * };
351 */ 351 */
352 PERF_EVENT_THROTTLE = 5, 352 PERF_RECORD_THROTTLE = 5,
353 PERF_EVENT_UNTHROTTLE = 6, 353 PERF_RECORD_UNTHROTTLE = 6,
354 354
355 /* 355 /*
356 * struct { 356 * struct {
@@ -360,7 +360,7 @@ enum perf_event_type {
360 * { u64 time; } && PERF_SAMPLE_TIME 360 * { u64 time; } && PERF_SAMPLE_TIME
361 * }; 361 * };
362 */ 362 */
363 PERF_EVENT_FORK = 7, 363 PERF_RECORD_FORK = 7,
364 364
365 /* 365 /*
366 * struct { 366 * struct {
@@ -370,7 +370,7 @@ enum perf_event_type {
370 * struct read_format values; 370 * struct read_format values;
371 * }; 371 * };
372 */ 372 */
373 PERF_EVENT_READ = 8, 373 PERF_RECORD_READ = 8,
374 374
375 /* 375 /*
376 * struct { 376 * struct {
@@ -395,7 +395,7 @@ enum perf_event_type {
395 * # 395 * #
396 * # That is, the ABI doesn't make any promises wrt to 396 * # That is, the ABI doesn't make any promises wrt to
397 * # the stability of its content, it may vary depending 397 * # the stability of its content, it may vary depending
398 * # on event, hardware, kernel version and phase of 398 * # on event_id, hardware, kernel version and phase of
399 * # the moon. 399 * # the moon.
400 * # 400 * #
401 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 401 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
@@ -405,9 +405,9 @@ enum perf_event_type {
405 * char data[size];}&& PERF_SAMPLE_RAW 405 * char data[size];}&& PERF_SAMPLE_RAW
406 * }; 406 * };
407 */ 407 */
408 PERF_EVENT_SAMPLE = 9, 408 PERF_RECORD_SAMPLE = 9,
409 409
410 PERF_EVENT_MAX, /* non-ABI */ 410 PERF_RECORD_MAX, /* non-ABI */
411}; 411};
412 412
413enum perf_callchain_context { 413enum perf_callchain_context {
@@ -430,8 +430,8 @@ enum perf_callchain_context {
430 * Kernel-internal data types and definitions: 430 * Kernel-internal data types and definitions:
431 */ 431 */
432 432
433#ifdef CONFIG_PERF_COUNTERS 433#ifdef CONFIG_PERF_EVENTS
434# include <asm/perf_counter.h> 434# include <asm/perf_event.h>
435#endif 435#endif
436 436
437#include <linux/list.h> 437#include <linux/list.h>
@@ -459,15 +459,15 @@ struct perf_raw_record {
459struct task_struct; 459struct task_struct;
460 460
461/** 461/**
462 * struct hw_perf_counter - performance counter hardware details: 462 * struct hw_perf_event - performance event hardware details:
463 */ 463 */
464struct hw_perf_counter { 464struct hw_perf_event {
465#ifdef CONFIG_PERF_COUNTERS 465#ifdef CONFIG_PERF_EVENTS
466 union { 466 union {
467 struct { /* hardware */ 467 struct { /* hardware */
468 u64 config; 468 u64 config;
469 unsigned long config_base; 469 unsigned long config_base;
470 unsigned long counter_base; 470 unsigned long event_base;
471 int idx; 471 int idx;
472 }; 472 };
473 union { /* software */ 473 union { /* software */
@@ -487,26 +487,26 @@ struct hw_perf_counter {
487#endif 487#endif
488}; 488};
489 489
490struct perf_counter; 490struct perf_event;
491 491
492/** 492/**
493 * struct pmu - generic performance monitoring unit 493 * struct pmu - generic performance monitoring unit
494 */ 494 */
495struct pmu { 495struct pmu {
496 int (*enable) (struct perf_counter *counter); 496 int (*enable) (struct perf_event *event);
497 void (*disable) (struct perf_counter *counter); 497 void (*disable) (struct perf_event *event);
498 void (*read) (struct perf_counter *counter); 498 void (*read) (struct perf_event *event);
499 void (*unthrottle) (struct perf_counter *counter); 499 void (*unthrottle) (struct perf_event *event);
500}; 500};
501 501
502/** 502/**
503 * enum perf_counter_active_state - the states of a counter 503 * enum perf_event_active_state - the states of a event
504 */ 504 */
505enum perf_counter_active_state { 505enum perf_event_active_state {
506 PERF_COUNTER_STATE_ERROR = -2, 506 PERF_EVENT_STATE_ERROR = -2,
507 PERF_COUNTER_STATE_OFF = -1, 507 PERF_EVENT_STATE_OFF = -1,
508 PERF_COUNTER_STATE_INACTIVE = 0, 508 PERF_EVENT_STATE_INACTIVE = 0,
509 PERF_COUNTER_STATE_ACTIVE = 1, 509 PERF_EVENT_STATE_ACTIVE = 1,
510}; 510};
511 511
512struct file; 512struct file;
@@ -518,7 +518,7 @@ struct perf_mmap_data {
518 int nr_locked; /* nr pages mlocked */ 518 int nr_locked; /* nr pages mlocked */
519 519
520 atomic_t poll; /* POLL_ for wakeups */ 520 atomic_t poll; /* POLL_ for wakeups */
521 atomic_t events; /* event limit */ 521 atomic_t events; /* event_id limit */
522 522
523 atomic_long_t head; /* write position */ 523 atomic_long_t head; /* write position */
524 atomic_long_t done_head; /* completed head */ 524 atomic_long_t done_head; /* completed head */
@@ -529,7 +529,7 @@ struct perf_mmap_data {
529 529
530 long watermark; /* wakeup watermark */ 530 long watermark; /* wakeup watermark */
531 531
532 struct perf_counter_mmap_page *user_page; 532 struct perf_event_mmap_page *user_page;
533 void *data_pages[0]; 533 void *data_pages[0];
534}; 534};
535 535
@@ -539,56 +539,56 @@ struct perf_pending_entry {
539}; 539};
540 540
541/** 541/**
542 * struct perf_counter - performance counter kernel representation: 542 * struct perf_event - performance event kernel representation:
543 */ 543 */
544struct perf_counter { 544struct perf_event {
545#ifdef CONFIG_PERF_COUNTERS 545#ifdef CONFIG_PERF_EVENTS
546 struct list_head group_entry; 546 struct list_head group_entry;
547 struct list_head event_entry; 547 struct list_head event_entry;
548 struct list_head sibling_list; 548 struct list_head sibling_list;
549 int nr_siblings; 549 int nr_siblings;
550 struct perf_counter *group_leader; 550 struct perf_event *group_leader;
551 struct perf_counter *output; 551 struct perf_event *output;
552 const struct pmu *pmu; 552 const struct pmu *pmu;
553 553
554 enum perf_counter_active_state state; 554 enum perf_event_active_state state;
555 atomic64_t count; 555 atomic64_t count;
556 556
557 /* 557 /*
558 * These are the total time in nanoseconds that the counter 558 * These are the total time in nanoseconds that the event
559 * has been enabled (i.e. eligible to run, and the task has 559 * has been enabled (i.e. eligible to run, and the task has
560 * been scheduled in, if this is a per-task counter) 560 * been scheduled in, if this is a per-task event)
561 * and running (scheduled onto the CPU), respectively. 561 * and running (scheduled onto the CPU), respectively.
562 * 562 *
563 * They are computed from tstamp_enabled, tstamp_running and 563 * They are computed from tstamp_enabled, tstamp_running and
564 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. 564 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
565 */ 565 */
566 u64 total_time_enabled; 566 u64 total_time_enabled;
567 u64 total_time_running; 567 u64 total_time_running;
568 568
569 /* 569 /*
570 * These are timestamps used for computing total_time_enabled 570 * These are timestamps used for computing total_time_enabled
571 * and total_time_running when the counter is in INACTIVE or 571 * and total_time_running when the event is in INACTIVE or
572 * ACTIVE state, measured in nanoseconds from an arbitrary point 572 * ACTIVE state, measured in nanoseconds from an arbitrary point
573 * in time. 573 * in time.
574 * tstamp_enabled: the notional time when the counter was enabled 574 * tstamp_enabled: the notional time when the event was enabled
575 * tstamp_running: the notional time when the counter was scheduled on 575 * tstamp_running: the notional time when the event was scheduled on
576 * tstamp_stopped: in INACTIVE state, the notional time when the 576 * tstamp_stopped: in INACTIVE state, the notional time when the
577 * counter was scheduled off. 577 * event was scheduled off.
578 */ 578 */
579 u64 tstamp_enabled; 579 u64 tstamp_enabled;
580 u64 tstamp_running; 580 u64 tstamp_running;
581 u64 tstamp_stopped; 581 u64 tstamp_stopped;
582 582
583 struct perf_counter_attr attr; 583 struct perf_event_attr attr;
584 struct hw_perf_counter hw; 584 struct hw_perf_event hw;
585 585
586 struct perf_counter_context *ctx; 586 struct perf_event_context *ctx;
587 struct file *filp; 587 struct file *filp;
588 588
589 /* 589 /*
590 * These accumulate total time (in nanoseconds) that children 590 * These accumulate total time (in nanoseconds) that children
591 * counters have been enabled and running, respectively. 591 * events have been enabled and running, respectively.
592 */ 592 */
593 atomic64_t child_total_time_enabled; 593 atomic64_t child_total_time_enabled;
594 atomic64_t child_total_time_running; 594 atomic64_t child_total_time_running;
@@ -598,7 +598,7 @@ struct perf_counter {
598 */ 598 */
599 struct mutex child_mutex; 599 struct mutex child_mutex;
600 struct list_head child_list; 600 struct list_head child_list;
601 struct perf_counter *parent; 601 struct perf_event *parent;
602 602
603 int oncpu; 603 int oncpu;
604 int cpu; 604 int cpu;
@@ -623,7 +623,7 @@ struct perf_counter {
623 623
624 atomic_t event_limit; 624 atomic_t event_limit;
625 625
626 void (*destroy)(struct perf_counter *); 626 void (*destroy)(struct perf_event *);
627 struct rcu_head rcu_head; 627 struct rcu_head rcu_head;
628 628
629 struct pid_namespace *ns; 629 struct pid_namespace *ns;
@@ -632,18 +632,18 @@ struct perf_counter {
632}; 632};
633 633
634/** 634/**
635 * struct perf_counter_context - counter context structure 635 * struct perf_event_context - event context structure
636 * 636 *
637 * Used as a container for task counters and CPU counters as well: 637 * Used as a container for task events and CPU events as well:
638 */ 638 */
639struct perf_counter_context { 639struct perf_event_context {
640 /* 640 /*
641 * Protect the states of the counters in the list, 641 * Protect the states of the events in the list,
642 * nr_active, and the list: 642 * nr_active, and the list:
643 */ 643 */
644 spinlock_t lock; 644 spinlock_t lock;
645 /* 645 /*
646 * Protect the list of counters. Locking either mutex or lock 646 * Protect the list of events. Locking either mutex or lock
647 * is sufficient to ensure the list doesn't change; to change 647 * is sufficient to ensure the list doesn't change; to change
648 * the list you need to lock both the mutex and the spinlock. 648 * the list you need to lock both the mutex and the spinlock.
649 */ 649 */
@@ -651,7 +651,7 @@ struct perf_counter_context {
651 651
652 struct list_head group_list; 652 struct list_head group_list;
653 struct list_head event_list; 653 struct list_head event_list;
654 int nr_counters; 654 int nr_events;
655 int nr_active; 655 int nr_active;
656 int is_active; 656 int is_active;
657 int nr_stat; 657 int nr_stat;
@@ -668,7 +668,7 @@ struct perf_counter_context {
668 * These fields let us detect when two contexts have both 668 * These fields let us detect when two contexts have both
669 * been cloned (inherited) from a common ancestor. 669 * been cloned (inherited) from a common ancestor.
670 */ 670 */
671 struct perf_counter_context *parent_ctx; 671 struct perf_event_context *parent_ctx;
672 u64 parent_gen; 672 u64 parent_gen;
673 u64 generation; 673 u64 generation;
674 int pin_count; 674 int pin_count;
@@ -676,11 +676,11 @@ struct perf_counter_context {
676}; 676};
677 677
678/** 678/**
679 * struct perf_counter_cpu_context - per cpu counter context structure 679 * struct perf_event_cpu_context - per cpu event context structure
680 */ 680 */
681struct perf_cpu_context { 681struct perf_cpu_context {
682 struct perf_counter_context ctx; 682 struct perf_event_context ctx;
683 struct perf_counter_context *task_ctx; 683 struct perf_event_context *task_ctx;
684 int active_oncpu; 684 int active_oncpu;
685 int max_pertask; 685 int max_pertask;
686 int exclusive; 686 int exclusive;
@@ -694,7 +694,7 @@ struct perf_cpu_context {
694}; 694};
695 695
696struct perf_output_handle { 696struct perf_output_handle {
697 struct perf_counter *counter; 697 struct perf_event *event;
698 struct perf_mmap_data *data; 698 struct perf_mmap_data *data;
699 unsigned long head; 699 unsigned long head;
700 unsigned long offset; 700 unsigned long offset;
@@ -704,35 +704,35 @@ struct perf_output_handle {
704 unsigned long flags; 704 unsigned long flags;
705}; 705};
706 706
707#ifdef CONFIG_PERF_COUNTERS 707#ifdef CONFIG_PERF_EVENTS
708 708
709/* 709/*
710 * Set by architecture code: 710 * Set by architecture code:
711 */ 711 */
712extern int perf_max_counters; 712extern int perf_max_events;
713 713
714extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); 714extern const struct pmu *hw_perf_event_init(struct perf_event *event);
715 715
716extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); 716extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
717extern void perf_counter_task_sched_out(struct task_struct *task, 717extern void perf_event_task_sched_out(struct task_struct *task,
718 struct task_struct *next, int cpu); 718 struct task_struct *next, int cpu);
719extern void perf_counter_task_tick(struct task_struct *task, int cpu); 719extern void perf_event_task_tick(struct task_struct *task, int cpu);
720extern int perf_counter_init_task(struct task_struct *child); 720extern int perf_event_init_task(struct task_struct *child);
721extern void perf_counter_exit_task(struct task_struct *child); 721extern void perf_event_exit_task(struct task_struct *child);
722extern void perf_counter_free_task(struct task_struct *task); 722extern void perf_event_free_task(struct task_struct *task);
723extern void set_perf_counter_pending(void); 723extern void set_perf_event_pending(void);
724extern void perf_counter_do_pending(void); 724extern void perf_event_do_pending(void);
725extern void perf_counter_print_debug(void); 725extern void perf_event_print_debug(void);
726extern void __perf_disable(void); 726extern void __perf_disable(void);
727extern bool __perf_enable(void); 727extern bool __perf_enable(void);
728extern void perf_disable(void); 728extern void perf_disable(void);
729extern void perf_enable(void); 729extern void perf_enable(void);
730extern int perf_counter_task_disable(void); 730extern int perf_event_task_disable(void);
731extern int perf_counter_task_enable(void); 731extern int perf_event_task_enable(void);
732extern int hw_perf_group_sched_in(struct perf_counter *group_leader, 732extern int hw_perf_group_sched_in(struct perf_event *group_leader,
733 struct perf_cpu_context *cpuctx, 733 struct perf_cpu_context *cpuctx,
734 struct perf_counter_context *ctx, int cpu); 734 struct perf_event_context *ctx, int cpu);
735extern void perf_counter_update_userpage(struct perf_counter *counter); 735extern void perf_event_update_userpage(struct perf_event *event);
736 736
737struct perf_sample_data { 737struct perf_sample_data {
738 u64 type; 738 u64 type;
@@ -758,96 +758,96 @@ struct perf_sample_data {
758extern void perf_output_sample(struct perf_output_handle *handle, 758extern void perf_output_sample(struct perf_output_handle *handle,
759 struct perf_event_header *header, 759 struct perf_event_header *header,
760 struct perf_sample_data *data, 760 struct perf_sample_data *data,
761 struct perf_counter *counter); 761 struct perf_event *event);
762extern void perf_prepare_sample(struct perf_event_header *header, 762extern void perf_prepare_sample(struct perf_event_header *header,
763 struct perf_sample_data *data, 763 struct perf_sample_data *data,
764 struct perf_counter *counter, 764 struct perf_event *event,
765 struct pt_regs *regs); 765 struct pt_regs *regs);
766 766
767extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 767extern int perf_event_overflow(struct perf_event *event, int nmi,
768 struct perf_sample_data *data, 768 struct perf_sample_data *data,
769 struct pt_regs *regs); 769 struct pt_regs *regs);
770 770
771/* 771/*
772 * Return 1 for a software counter, 0 for a hardware counter 772 * Return 1 for a software event, 0 for a hardware event
773 */ 773 */
774static inline int is_software_counter(struct perf_counter *counter) 774static inline int is_software_event(struct perf_event *event)
775{ 775{
776 return (counter->attr.type != PERF_TYPE_RAW) && 776 return (event->attr.type != PERF_TYPE_RAW) &&
777 (counter->attr.type != PERF_TYPE_HARDWARE) && 777 (event->attr.type != PERF_TYPE_HARDWARE) &&
778 (counter->attr.type != PERF_TYPE_HW_CACHE); 778 (event->attr.type != PERF_TYPE_HW_CACHE);
779} 779}
780 780
781extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 781extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
782 782
783extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); 783extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
784 784
785static inline void 785static inline void
786perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 786perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
787{ 787{
788 if (atomic_read(&perf_swcounter_enabled[event])) 788 if (atomic_read(&perf_swevent_enabled[event_id]))
789 __perf_swcounter_event(event, nr, nmi, regs, addr); 789 __perf_sw_event(event_id, nr, nmi, regs, addr);
790} 790}
791 791
792extern void __perf_counter_mmap(struct vm_area_struct *vma); 792extern void __perf_event_mmap(struct vm_area_struct *vma);
793 793
794static inline void perf_counter_mmap(struct vm_area_struct *vma) 794static inline void perf_event_mmap(struct vm_area_struct *vma)
795{ 795{
796 if (vma->vm_flags & VM_EXEC) 796 if (vma->vm_flags & VM_EXEC)
797 __perf_counter_mmap(vma); 797 __perf_event_mmap(vma);
798} 798}
799 799
800extern void perf_counter_comm(struct task_struct *tsk); 800extern void perf_event_comm(struct task_struct *tsk);
801extern void perf_counter_fork(struct task_struct *tsk); 801extern void perf_event_fork(struct task_struct *tsk);
802 802
803extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 803extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
804 804
805extern int sysctl_perf_counter_paranoid; 805extern int sysctl_perf_event_paranoid;
806extern int sysctl_perf_counter_mlock; 806extern int sysctl_perf_event_mlock;
807extern int sysctl_perf_counter_sample_rate; 807extern int sysctl_perf_event_sample_rate;
808 808
809extern void perf_counter_init(void); 809extern void perf_event_init(void);
810extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, 810extern void perf_tp_event(int event_id, u64 addr, u64 count,
811 void *record, int entry_size); 811 void *record, int entry_size);
812 812
813#ifndef perf_misc_flags 813#ifndef perf_misc_flags
814#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ 814#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
815 PERF_EVENT_MISC_KERNEL) 815 PERF_RECORD_MISC_KERNEL)
816#define perf_instruction_pointer(regs) instruction_pointer(regs) 816#define perf_instruction_pointer(regs) instruction_pointer(regs)
817#endif 817#endif
818 818
819extern int perf_output_begin(struct perf_output_handle *handle, 819extern int perf_output_begin(struct perf_output_handle *handle,
820 struct perf_counter *counter, unsigned int size, 820 struct perf_event *event, unsigned int size,
821 int nmi, int sample); 821 int nmi, int sample);
822extern void perf_output_end(struct perf_output_handle *handle); 822extern void perf_output_end(struct perf_output_handle *handle);
823extern void perf_output_copy(struct perf_output_handle *handle, 823extern void perf_output_copy(struct perf_output_handle *handle,
824 const void *buf, unsigned int len); 824 const void *buf, unsigned int len);
825#else 825#else
826static inline void 826static inline void
827perf_counter_task_sched_in(struct task_struct *task, int cpu) { } 827perf_event_task_sched_in(struct task_struct *task, int cpu) { }
828static inline void 828static inline void
829perf_counter_task_sched_out(struct task_struct *task, 829perf_event_task_sched_out(struct task_struct *task,
830 struct task_struct *next, int cpu) { } 830 struct task_struct *next, int cpu) { }
831static inline void 831static inline void
832perf_counter_task_tick(struct task_struct *task, int cpu) { } 832perf_event_task_tick(struct task_struct *task, int cpu) { }
833static inline int perf_counter_init_task(struct task_struct *child) { return 0; } 833static inline int perf_event_init_task(struct task_struct *child) { return 0; }
834static inline void perf_counter_exit_task(struct task_struct *child) { } 834static inline void perf_event_exit_task(struct task_struct *child) { }
835static inline void perf_counter_free_task(struct task_struct *task) { } 835static inline void perf_event_free_task(struct task_struct *task) { }
836static inline void perf_counter_do_pending(void) { } 836static inline void perf_event_do_pending(void) { }
837static inline void perf_counter_print_debug(void) { } 837static inline void perf_event_print_debug(void) { }
838static inline void perf_disable(void) { } 838static inline void perf_disable(void) { }
839static inline void perf_enable(void) { } 839static inline void perf_enable(void) { }
840static inline int perf_counter_task_disable(void) { return -EINVAL; } 840static inline int perf_event_task_disable(void) { return -EINVAL; }
841static inline int perf_counter_task_enable(void) { return -EINVAL; } 841static inline int perf_event_task_enable(void) { return -EINVAL; }
842 842
843static inline void 843static inline void
844perf_swcounter_event(u32 event, u64 nr, int nmi, 844perf_sw_event(u32 event_id, u64 nr, int nmi,
845 struct pt_regs *regs, u64 addr) { } 845 struct pt_regs *regs, u64 addr) { }
846 846
847static inline void perf_counter_mmap(struct vm_area_struct *vma) { } 847static inline void perf_event_mmap(struct vm_area_struct *vma) { }
848static inline void perf_counter_comm(struct task_struct *tsk) { } 848static inline void perf_event_comm(struct task_struct *tsk) { }
849static inline void perf_counter_fork(struct task_struct *tsk) { } 849static inline void perf_event_fork(struct task_struct *tsk) { }
850static inline void perf_counter_init(void) { } 850static inline void perf_event_init(void) { }
851 851
852#endif 852#endif
853 853
@@ -855,4 +855,4 @@ static inline void perf_counter_init(void) { }
855 perf_output_copy((handle), &(x), sizeof(x)) 855 perf_output_copy((handle), &(x), sizeof(x))
856 856
857#endif /* __KERNEL__ */ 857#endif /* __KERNEL__ */
858#endif /* _LINUX_PERF_COUNTER_H */ 858#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index b00df4c79c63..07bff666e65b 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -85,7 +85,7 @@
85#define PR_SET_TIMERSLACK 29 85#define PR_SET_TIMERSLACK 29
86#define PR_GET_TIMERSLACK 30 86#define PR_GET_TIMERSLACK 30
87 87
88#define PR_TASK_PERF_COUNTERS_DISABLE 31 88#define PR_TASK_PERF_EVENTS_DISABLE 31
89#define PR_TASK_PERF_COUNTERS_ENABLE 32 89#define PR_TASK_PERF_EVENTS_ENABLE 32
90 90
91#endif /* _LINUX_PRCTL_H */ 91#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8af3d249170e..8b265a8986d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,7 +100,7 @@ struct robust_list_head;
100struct bio; 100struct bio;
101struct fs_struct; 101struct fs_struct;
102struct bts_context; 102struct bts_context;
103struct perf_counter_context; 103struct perf_event_context;
104 104
105/* 105/*
106 * List of flags we want to share for kernel threads, 106 * List of flags we want to share for kernel threads,
@@ -701,7 +701,7 @@ struct user_struct {
701#endif 701#endif
702#endif 702#endif
703 703
704#ifdef CONFIG_PERF_COUNTERS 704#ifdef CONFIG_PERF_EVENTS
705 atomic_long_t locked_vm; 705 atomic_long_t locked_vm;
706#endif 706#endif
707}; 707};
@@ -1449,10 +1449,10 @@ struct task_struct {
1449 struct list_head pi_state_list; 1449 struct list_head pi_state_list;
1450 struct futex_pi_state *pi_state_cache; 1450 struct futex_pi_state *pi_state_cache;
1451#endif 1451#endif
1452#ifdef CONFIG_PERF_COUNTERS 1452#ifdef CONFIG_PERF_EVENTS
1453 struct perf_counter_context *perf_counter_ctxp; 1453 struct perf_event_context *perf_event_ctxp;
1454 struct mutex perf_counter_mutex; 1454 struct mutex perf_event_mutex;
1455 struct list_head perf_counter_list; 1455 struct list_head perf_event_list;
1456#endif 1456#endif
1457#ifdef CONFIG_NUMA 1457#ifdef CONFIG_NUMA
1458 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1458 struct mempolicy *mempolicy; /* Protected by alloc_lock */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a8e37821cc60..02f19f9a76c6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -55,7 +55,7 @@ struct compat_timeval;
55struct robust_list_head; 55struct robust_list_head;
56struct getcpu_cache; 56struct getcpu_cache;
57struct old_linux_dirent; 57struct old_linux_dirent;
58struct perf_counter_attr; 58struct perf_event_attr;
59 59
60#include <linux/types.h> 60#include <linux/types.h>
61#include <linux/aio_abi.h> 61#include <linux/aio_abi.h>
@@ -885,7 +885,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
885int kernel_execve(const char *filename, char *const argv[], char *const envp[]); 885int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
886 886
887 887
888asmlinkage long sys_perf_counter_open( 888asmlinkage long sys_perf_event_open(
889 struct perf_counter_attr __user *attr_uptr, 889 struct perf_event_attr __user *attr_uptr,
890 pid_t pid, int cpu, int group_fd, unsigned long flags); 890 pid_t pid, int cpu, int group_fd, unsigned long flags);
891#endif 891#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 72a3b437b829..ec91e78244f0 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -378,7 +378,7 @@ static inline int ftrace_get_offsets_##call( \
378#ifdef CONFIG_EVENT_PROFILE 378#ifdef CONFIG_EVENT_PROFILE
379 379
380/* 380/*
381 * Generate the functions needed for tracepoint perf_counter support. 381 * Generate the functions needed for tracepoint perf_event support.
382 * 382 *
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
384 * 384 *
@@ -656,7 +656,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
656 * { 656 * {
657 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 657 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
658 * struct ftrace_event_call *event_call = &event_<call>; 658 * struct ftrace_event_call *event_call = &event_<call>;
659 * extern void perf_tpcounter_event(int, u64, u64, void *, int); 659 * extern void perf_tp_event(int, u64, u64, void *, int);
660 * struct ftrace_raw_##call *entry; 660 * struct ftrace_raw_##call *entry;
661 * u64 __addr = 0, __count = 1; 661 * u64 __addr = 0, __count = 1;
662 * unsigned long irq_flags; 662 * unsigned long irq_flags;
@@ -691,7 +691,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
691 * 691 *
692 * <assign> <- affect our values 692 * <assign> <- affect our values
693 * 693 *
694 * perf_tpcounter_event(event_call->id, __addr, __count, entry, 694 * perf_tp_event(event_call->id, __addr, __count, entry,
695 * __entry_size); <- submit them to perf counter 695 * __entry_size); <- submit them to perf counter
696 * } while (0); 696 * } while (0);
697 * 697 *
@@ -712,7 +712,7 @@ static void ftrace_profile_##call(proto) \
712{ \ 712{ \
713 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 713 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
714 struct ftrace_event_call *event_call = &event_##call; \ 714 struct ftrace_event_call *event_call = &event_##call; \
715 extern void perf_tpcounter_event(int, u64, u64, void *, int); \ 715 extern void perf_tp_event(int, u64, u64, void *, int); \
716 struct ftrace_raw_##call *entry; \ 716 struct ftrace_raw_##call *entry; \
717 u64 __addr = 0, __count = 1; \ 717 u64 __addr = 0, __count = 1; \
718 unsigned long irq_flags; \ 718 unsigned long irq_flags; \
@@ -742,7 +742,7 @@ static void ftrace_profile_##call(proto) \
742 \ 742 \
743 { assign; } \ 743 { assign; } \
744 \ 744 \
745 perf_tpcounter_event(event_call->id, __addr, __count, entry,\ 745 perf_tp_event(event_call->id, __addr, __count, entry,\
746 __entry_size); \ 746 __entry_size); \
747 } while (0); \ 747 } while (0); \
748 \ 748 \