aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:15:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:15:07 -0400
commit43c1266ce4dc06bfd236cec31e11e9ecd69c0bef (patch)
tree40a86739ca4c36200f447f655b01c57cfe646e26
parentb8c7f1dc5ca4e0d10709182233cdab932cef593d (diff)
parent57c0c15b5244320065374ad2c54f4fbec77a6428 (diff)
Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Tidy up after the big rename perf: Do the big rename: Performance Counters -> Performance Events perf_counter: Rename 'event' to event_id/hw_event perf_counter: Rename list_entry -> group_entry, counter_list -> group_list Manually resolved some fairly trivial conflicts with the tracing tree in include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/blackfin/include/asm/unistd.h2
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/include/asm/perf_event.h (renamed from arch/frv/include/asm/perf_counter.h)10
-rw-r--r--arch/frv/include/asm/unistd.h2
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/perf_event.c (renamed from arch/frv/lib/perf_counter.c)8
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/kernel/entry.S2
-rw-r--r--arch/m68knommu/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/mips/include/asm/unistd.h6
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mn10300/include/asm/unistd.h2
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/perf_counter.h7
-rw-r--r--arch/parisc/include/asm/perf_event.h7
-rw-r--r--arch/parisc/include/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h22
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h (renamed from arch/powerpc/include/asm/perf_counter.h)26
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c (renamed from arch/powerpc/kernel/perf_counter.c)582
-rw-r--r--arch/powerpc/kernel/power4-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c2
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c2
-rw-r--r--arch/powerpc/kernel/time.c30
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/perf_counter.h10
-rw-r--r--arch/s390/include/asm/perf_event.h10
-rw-r--r--arch/s390/include/asm/unistd.h2
-rw-r--r--arch/s390/kernel/compat_wrapper.S8
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/mm/fault.c8
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/perf_counter.h9
-rw-r--r--arch/sh/include/asm/perf_event.h9
-rw-r--r--arch/sh/include/asm/unistd_32.h2
-rw-r--r--arch/sh/include/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/mm/fault_32.c8
-rw-r--r--arch/sh/mm/tlbflush_64.c8
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/perf_counter.h14
-rw-r--r--arch/sparc/include/asm/perf_event.h14
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/nmi.c4
-rw-r--r--arch/sparc/kernel/pcr.c10
-rw-r--r--arch/sparc/kernel/perf_event.c (renamed from arch/sparc/kernel/perf_counter.c)178
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/entry_arch.h2
-rw-r--r--arch/x86/include/asm/perf_event.h (renamed from arch/x86/include/asm/perf_counter.h)30
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c (renamed from arch/x86/kernel/cpu/perf_counter.c)556
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--arch/x86/oprofile/op_x86_model.h2
-rw-r--r--drivers/char/sysrq.c4
-rw-r--r--fs/exec.c6
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/init_task.h14
-rw-r--r--include/linux/perf_counter.h497
-rw-r--r--include/linux/perf_event.h858
-rw-r--r--include/linux/prctl.h4
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/trace/ftrace.h10
-rw-r--r--init/Kconfig45
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/perf_event.c (renamed from kernel/perf_counter.c)2449
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c22
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/trace/trace_syscalls.c6
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/mprotect.c4
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-annotate.c28
-rw-r--r--tools/perf/builtin-record.c22
-rw-r--r--tools/perf/builtin-report.c48
-rw-r--r--tools/perf/builtin-sched.c20
-rw-r--r--tools/perf/builtin-stat.c10
-rw-r--r--tools/perf/builtin-timechart.c14
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c22
-rw-r--r--tools/perf/design.txt58
-rw-r--r--tools/perf/perf.h12
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/header.c6
-rw-r--r--tools/perf/util/header.h8
-rw-r--r--tools/perf/util/parse-events.c32
-rw-r--r--tools/perf/util/parse-events.h2
-rw-r--r--tools/perf/util/trace-event-info.c8
-rw-r--r--tools/perf/util/trace-event.h2
134 files changed, 3258 insertions, 2801 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 43761a00e3f1..751a307dc44e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4000,7 +4000,7 @@ S: Maintained
4000F: include/linux/delayacct.h 4000F: include/linux/delayacct.h
4001F: kernel/delayacct.c 4001F: kernel/delayacct.c
4002 4002
4003PERFORMANCE COUNTER SUBSYSTEM 4003PERFORMANCE EVENTS SUBSYSTEM
4004M: Peter Zijlstra <a.p.zijlstra@chello.nl> 4004M: Peter Zijlstra <a.p.zijlstra@chello.nl>
4005M: Paul Mackerras <paulus@samba.org> 4005M: Paul Mackerras <paulus@samba.org>
4006M: Ingo Molnar <mingo@elte.hu> 4006M: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 9122c9ee18fb..89f7eade20af 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -390,7 +390,7 @@
390#define __NR_preadv (__NR_SYSCALL_BASE+361) 390#define __NR_preadv (__NR_SYSCALL_BASE+361)
391#define __NR_pwritev (__NR_SYSCALL_BASE+362) 391#define __NR_pwritev (__NR_SYSCALL_BASE+362)
392#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) 392#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
393#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364) 393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
394 394
395/* 395/*
396 * The following SWIs are ARM private. 396 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index ecfa98954d1d..fafce1b5c69f 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -373,7 +373,7 @@
373 CALL(sys_preadv) 373 CALL(sys_preadv)
374 CALL(sys_pwritev) 374 CALL(sys_pwritev)
375 CALL(sys_rt_tgsigqueueinfo) 375 CALL(sys_rt_tgsigqueueinfo)
376 CALL(sys_perf_counter_open) 376 CALL(sys_perf_event_open)
377#ifndef syscalls_counted 377#ifndef syscalls_counted
378.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 378.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
379#define syscalls_counted 379#define syscalls_counted
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index c8e7ee4768cd..02b1529dad57 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -381,7 +381,7 @@
381#define __NR_preadv 366 381#define __NR_preadv 366
382#define __NR_pwritev 367 382#define __NR_pwritev 367
383#define __NR_rt_tgsigqueueinfo 368 383#define __NR_rt_tgsigqueueinfo 368
384#define __NR_perf_counter_open 369 384#define __NR_perf_event_open 369
385 385
386#define __NR_syscall 370 386#define __NR_syscall 370
387#define NR_syscalls __NR_syscall 387#define NR_syscalls __NR_syscall
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 01af24cde362..1e7cac23e25f 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table)
1620 .long _sys_preadv 1620 .long _sys_preadv
1621 .long _sys_pwritev 1621 .long _sys_pwritev
1622 .long _sys_rt_tgsigqueueinfo 1622 .long _sys_rt_tgsigqueueinfo
1623 .long _sys_perf_counter_open 1623 .long _sys_perf_event_open
1624 1624
1625 .rept NR_syscalls-(.-_sys_call_table)/4 1625 .rept NR_syscalls-(.-_sys_call_table)/4
1626 .long _sys_ni_syscall 1626 .long _sys_ni_syscall
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index b86e19c9b5b0..4b5830bcbe2e 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,7 +7,7 @@ config FRV
7 default y 7 default y
8 select HAVE_IDE 8 select HAVE_IDE
9 select HAVE_ARCH_TRACEHOOK 9 select HAVE_ARCH_TRACEHOOK
10 select HAVE_PERF_COUNTERS 10 select HAVE_PERF_EVENTS
11 11
12config ZONE_DMA 12config ZONE_DMA
13 bool 13 bool
diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_event.h
index ccf726e61b2e..a69e0155d146 100644
--- a/arch/frv/include/asm/perf_counter.h
+++ b/arch/frv/include/asm/perf_event.h
@@ -1,4 +1,4 @@
1/* FRV performance counter support 1/* FRV performance event support
2 * 2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -9,9 +9,9 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _ASM_PERF_COUNTER_H 12#ifndef _ASM_PERF_EVENT_H
13#define _ASM_PERF_COUNTER_H 13#define _ASM_PERF_EVENT_H
14 14
15#define PERF_COUNTER_INDEX_OFFSET 0 15#define PERF_EVENT_INDEX_OFFSET 0
16 16
17#endif /* _ASM_PERF_COUNTER_H */ 17#endif /* _ASM_PERF_EVENT_H */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 4a8fb427ce0a..be6ef0f5cd42 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -342,7 +342,7 @@
342#define __NR_preadv 333 342#define __NR_preadv 333
343#define __NR_pwritev 334 343#define __NR_pwritev 334
344#define __NR_rt_tgsigqueueinfo 335 344#define __NR_rt_tgsigqueueinfo 335
345#define __NR_perf_counter_open 336 345#define __NR_perf_event_open 336
346 346
347#ifdef __KERNEL__ 347#ifdef __KERNEL__
348 348
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index fde1e446b440..189397ec012a 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1525,6 +1525,6 @@ sys_call_table:
1525 .long sys_preadv 1525 .long sys_preadv
1526 .long sys_pwritev 1526 .long sys_pwritev
1527 .long sys_rt_tgsigqueueinfo /* 335 */ 1527 .long sys_rt_tgsigqueueinfo /* 335 */
1528 .long sys_perf_counter_open 1528 .long sys_perf_event_open
1529 1529
1530syscall_table_size = (. - sys_call_table) 1530syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 0a377210c89b..f4709756d0d9 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
5lib-y := \ 5lib-y := \
6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o 8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_event.c
index 2000feecd571..9ac5acfd2e91 100644
--- a/arch/frv/lib/perf_counter.c
+++ b/arch/frv/lib/perf_event.c
@@ -1,4 +1,4 @@
1/* Performance counter handling 1/* Performance event handling
2 * 2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -9,11 +9,11 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13 13
14/* 14/*
15 * mark the performance counter as pending 15 * mark the performance event as pending
16 */ 16 */
17void set_perf_counter_pending(void) 17void set_perf_event_pending(void)
18{ 18{
19} 19}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 946d8691f2b0..48b87f5ced50 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -335,7 +335,7 @@
335#define __NR_preadv 329 335#define __NR_preadv 329
336#define __NR_pwritev 330 336#define __NR_pwritev 330
337#define __NR_rt_tgsigqueueinfo 331 337#define __NR_rt_tgsigqueueinfo 331
338#define __NR_perf_counter_open 332 338#define __NR_perf_event_open 332
339 339
340#ifdef __KERNEL__ 340#ifdef __KERNEL__
341 341
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 922f52e7ed1a..c5b33634c980 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -756,5 +756,5 @@ sys_call_table:
756 .long sys_preadv 756 .long sys_preadv
757 .long sys_pwritev /* 330 */ 757 .long sys_pwritev /* 330 */
758 .long sys_rt_tgsigqueueinfo 758 .long sys_rt_tgsigqueueinfo
759 .long sys_perf_counter_open 759 .long sys_perf_event_open
760 760
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 0ae123e08985..23535cc415ae 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -350,7 +350,7 @@ ENTRY(sys_call_table)
350 .long sys_preadv 350 .long sys_preadv
351 .long sys_pwritev /* 330 */ 351 .long sys_pwritev /* 330 */
352 .long sys_rt_tgsigqueueinfo 352 .long sys_rt_tgsigqueueinfo
353 .long sys_perf_counter_open 353 .long sys_perf_event_open
354 354
355 .rept NR_syscalls-(.-sys_call_table)/4 355 .rept NR_syscalls-(.-sys_call_table)/4
356 .long sys_ni_syscall 356 .long sys_ni_syscall
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 0b852327c0e7..cb05a07e55e9 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -381,7 +381,7 @@
381#define __NR_preadv 363 /* new */ 381#define __NR_preadv 363 /* new */
382#define __NR_pwritev 364 /* new */ 382#define __NR_pwritev 364 /* new */
383#define __NR_rt_tgsigqueueinfo 365 /* new */ 383#define __NR_rt_tgsigqueueinfo 365 /* new */
384#define __NR_perf_counter_open 366 /* new */ 384#define __NR_perf_event_open 366 /* new */
385 385
386#define __NR_syscalls 367 386#define __NR_syscalls 367
387 387
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 457216097dfd..ecec19155135 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -370,4 +370,4 @@ ENTRY(sys_call_table)
370 .long sys_ni_syscall 370 .long sys_ni_syscall
371 .long sys_ni_syscall 371 .long sys_ni_syscall
372 .long sys_rt_tgsigqueueinfo /* 365 */ 372 .long sys_rt_tgsigqueueinfo /* 365 */
373 .long sys_perf_counter_open 373 .long sys_perf_event_open
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index e753a777949b..8c9dfa9e9018 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -353,7 +353,7 @@
353#define __NR_preadv (__NR_Linux + 330) 353#define __NR_preadv (__NR_Linux + 330)
354#define __NR_pwritev (__NR_Linux + 331) 354#define __NR_pwritev (__NR_Linux + 331)
355#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) 355#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
356#define __NR_perf_counter_open (__NR_Linux + 333) 356#define __NR_perf_event_open (__NR_Linux + 333)
357#define __NR_accept4 (__NR_Linux + 334) 357#define __NR_accept4 (__NR_Linux + 334)
358 358
359/* 359/*
@@ -664,7 +664,7 @@
664#define __NR_preadv (__NR_Linux + 289) 664#define __NR_preadv (__NR_Linux + 289)
665#define __NR_pwritev (__NR_Linux + 290) 665#define __NR_pwritev (__NR_Linux + 290)
666#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) 666#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
667#define __NR_perf_counter_open (__NR_Linux + 292) 667#define __NR_perf_event_open (__NR_Linux + 292)
668#define __NR_accept4 (__NR_Linux + 293) 668#define __NR_accept4 (__NR_Linux + 293)
669 669
670/* 670/*
@@ -979,7 +979,7 @@
979#define __NR_preadv (__NR_Linux + 293) 979#define __NR_preadv (__NR_Linux + 293)
980#define __NR_pwritev (__NR_Linux + 294) 980#define __NR_pwritev (__NR_Linux + 294)
981#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) 981#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295)
982#define __NR_perf_counter_open (__NR_Linux + 296) 982#define __NR_perf_event_open (__NR_Linux + 296)
983#define __NR_accept4 (__NR_Linux + 297) 983#define __NR_accept4 (__NR_Linux + 297)
984 984
985/* 985/*
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7c2de4f091c4..fd2a9bb620d6 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -581,7 +581,7 @@ einval: li v0, -ENOSYS
581 sys sys_preadv 6 /* 4330 */ 581 sys sys_preadv 6 /* 4330 */
582 sys sys_pwritev 6 582 sys sys_pwritev 6
583 sys sys_rt_tgsigqueueinfo 4 583 sys sys_rt_tgsigqueueinfo 4
584 sys sys_perf_counter_open 5 584 sys sys_perf_event_open 5
585 sys sys_accept4 4 585 sys sys_accept4 4
586 .endm 586 .endm
587 587
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index b97b993846d6..18bf7f32c5e4 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -418,6 +418,6 @@ sys_call_table:
418 PTR sys_preadv 418 PTR sys_preadv
419 PTR sys_pwritev /* 5390 */ 419 PTR sys_pwritev /* 5390 */
420 PTR sys_rt_tgsigqueueinfo 420 PTR sys_rt_tgsigqueueinfo
421 PTR sys_perf_counter_open 421 PTR sys_perf_event_open
422 PTR sys_accept4 422 PTR sys_accept4
423 .size sys_call_table,.-sys_call_table 423 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 1a6ae124635b..6ebc07976694 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -416,6 +416,6 @@ EXPORT(sysn32_call_table)
416 PTR sys_preadv 416 PTR sys_preadv
417 PTR sys_pwritev 417 PTR sys_pwritev
418 PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ 418 PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
419 PTR sys_perf_counter_open 419 PTR sys_perf_event_open
420 PTR sys_accept4 420 PTR sys_accept4
421 .size sysn32_call_table,.-sysn32_call_table 421 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index cd31087a651f..9bbf9775e0bd 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -536,6 +536,6 @@ sys_call_table:
536 PTR compat_sys_preadv /* 4330 */ 536 PTR compat_sys_preadv /* 4330 */
537 PTR compat_sys_pwritev 537 PTR compat_sys_pwritev
538 PTR compat_sys_rt_tgsigqueueinfo 538 PTR compat_sys_rt_tgsigqueueinfo
539 PTR sys_perf_counter_open 539 PTR sys_perf_event_open
540 PTR sys_accept4 540 PTR sys_accept4
541 .size sys_call_table,.-sys_call_table 541 .size sys_call_table,.-sys_call_table
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index fad68616af32..2a983931c11f 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -347,7 +347,7 @@
347#define __NR_preadv 334 347#define __NR_preadv 334
348#define __NR_pwritev 335 348#define __NR_pwritev 335
349#define __NR_rt_tgsigqueueinfo 336 349#define __NR_rt_tgsigqueueinfo 336
350#define __NR_perf_counter_open 337 350#define __NR_perf_event_open 337
351 351
352#ifdef __KERNEL__ 352#ifdef __KERNEL__
353 353
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index e0d2563af4f2..a94e7ea3faa6 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -723,7 +723,7 @@ ENTRY(sys_call_table)
723 .long sys_preadv 723 .long sys_preadv
724 .long sys_pwritev /* 335 */ 724 .long sys_pwritev /* 335 */
725 .long sys_rt_tgsigqueueinfo 725 .long sys_rt_tgsigqueueinfo
726 .long sys_perf_counter_open 726 .long sys_perf_event_open
727 727
728 728
729nr_syscalls=(.-sys_call_table)/4 729nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 06f8d5b5b0f9..f388dc68f605 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,7 +16,7 @@ config PARISC
16 select RTC_DRV_GENERIC 16 select RTC_DRV_GENERIC
17 select INIT_ALL_POSSIBLE 17 select INIT_ALL_POSSIBLE
18 select BUG 18 select BUG
19 select HAVE_PERF_COUNTERS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 help 21 help
22 The PA-RISC microprocessor is designed by Hewlett-Packard and used 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/perf_counter.h b/arch/parisc/include/asm/perf_counter.h
deleted file mode 100644
index dc9e829f7013..000000000000
--- a/arch/parisc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_PARISC_PERF_COUNTER_H
2#define __ASM_PARISC_PERF_COUNTER_H
3
4/* parisc only supports software counters through this interface. */
5static inline void set_perf_counter_pending(void) { }
6
7#endif /* __ASM_PARISC_PERF_COUNTER_H */
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
new file mode 100644
index 000000000000..cc146427d8f9
--- /dev/null
+++ b/arch/parisc/include/asm/perf_event.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_PARISC_PERF_EVENT_H
2#define __ASM_PARISC_PERF_EVENT_H
3
4/* parisc only supports software events through this interface. */
5static inline void set_perf_event_pending(void) { }
6
7#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index f3d3b8b012c4..cda158318c62 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -810,9 +810,9 @@
810#define __NR_preadv (__NR_Linux + 315) 810#define __NR_preadv (__NR_Linux + 315)
811#define __NR_pwritev (__NR_Linux + 316) 811#define __NR_pwritev (__NR_Linux + 316)
812#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) 812#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
813#define __NR_perf_counter_open (__NR_Linux + 318) 813#define __NR_perf_event_open (__NR_Linux + 318)
814 814
815#define __NR_Linux_syscalls (__NR_perf_counter_open + 1) 815#define __NR_Linux_syscalls (__NR_perf_event_open + 1)
816 816
817 817
818#define __IGNORE_select /* newselect */ 818#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index cf145eb026b3..843f423dec67 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -416,7 +416,7 @@
416 ENTRY_COMP(preadv) /* 315 */ 416 ENTRY_COMP(preadv) /* 315 */
417 ENTRY_COMP(pwritev) 417 ENTRY_COMP(pwritev)
418 ENTRY_COMP(rt_tgsigqueueinfo) 418 ENTRY_COMP(rt_tgsigqueueinfo)
419 ENTRY_SAME(perf_counter_open) 419 ENTRY_SAME(perf_event_open)
420 420
421 /* Nothing yet */ 421 /* Nothing yet */
422 422
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8250902265c6..4fd479059d65 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -129,7 +129,7 @@ config PPC
129 select HAVE_OPROFILE 129 select HAVE_OPROFILE
130 select HAVE_SYSCALL_WRAPPERS if PPC64 130 select HAVE_SYSCALL_WRAPPERS if PPC64
131 select GENERIC_ATOMIC64 if PPC32 131 select GENERIC_ATOMIC64 if PPC32
132 select HAVE_PERF_COUNTERS 132 select HAVE_PERF_EVENTS
133 133
134config EARLY_PRINTK 134config EARLY_PRINTK
135 bool 135 bool
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e73d554538dd..abbc2aaaced5 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
135 */ 135 */
136struct irq_chip; 136struct irq_chip;
137 137
138#ifdef CONFIG_PERF_COUNTERS 138#ifdef CONFIG_PERF_EVENTS
139 139
140#ifdef CONFIG_PPC64 140#ifdef CONFIG_PPC64
141static inline unsigned long test_perf_counter_pending(void) 141static inline unsigned long test_perf_event_pending(void)
142{ 142{
143 unsigned long x; 143 unsigned long x;
144 144
145 asm volatile("lbz %0,%1(13)" 145 asm volatile("lbz %0,%1(13)"
146 : "=r" (x) 146 : "=r" (x)
147 : "i" (offsetof(struct paca_struct, perf_counter_pending))); 147 : "i" (offsetof(struct paca_struct, perf_event_pending)));
148 return x; 148 return x;
149} 149}
150 150
151static inline void set_perf_counter_pending(void) 151static inline void set_perf_event_pending(void)
152{ 152{
153 asm volatile("stb %0,%1(13)" : : 153 asm volatile("stb %0,%1(13)" : :
154 "r" (1), 154 "r" (1),
155 "i" (offsetof(struct paca_struct, perf_counter_pending))); 155 "i" (offsetof(struct paca_struct, perf_event_pending)));
156} 156}
157 157
158static inline void clear_perf_counter_pending(void) 158static inline void clear_perf_event_pending(void)
159{ 159{
160 asm volatile("stb %0,%1(13)" : : 160 asm volatile("stb %0,%1(13)" : :
161 "r" (0), 161 "r" (0),
162 "i" (offsetof(struct paca_struct, perf_counter_pending))); 162 "i" (offsetof(struct paca_struct, perf_event_pending)));
163} 163}
164#endif /* CONFIG_PPC64 */ 164#endif /* CONFIG_PPC64 */
165 165
166#else /* CONFIG_PERF_COUNTERS */ 166#else /* CONFIG_PERF_EVENTS */
167 167
168static inline unsigned long test_perf_counter_pending(void) 168static inline unsigned long test_perf_event_pending(void)
169{ 169{
170 return 0; 170 return 0;
171} 171}
172 172
173static inline void clear_perf_counter_pending(void) {} 173static inline void clear_perf_event_pending(void) {}
174#endif /* CONFIG_PERF_COUNTERS */ 174#endif /* CONFIG_PERF_EVENTS */
175 175
176#endif /* __KERNEL__ */ 176#endif /* __KERNEL__ */
177#endif /* _ASM_POWERPC_HW_IRQ_H */ 177#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b634456ea893..7d8514ceceae 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -122,7 +122,7 @@ struct paca_struct {
122 u8 soft_enabled; /* irq soft-enable flag */ 122 u8 soft_enabled; /* irq soft-enable flag */
123 u8 hard_enabled; /* set if irqs are enabled in MSR */ 123 u8 hard_enabled; /* set if irqs are enabled in MSR */
124 u8 io_sync; /* writel() needs spin_unlock sync */ 124 u8 io_sync; /* writel() needs spin_unlock sync */
125 u8 perf_counter_pending; /* PM interrupt while soft-disabled */ 125 u8 perf_event_pending; /* PM interrupt while soft-disabled */
126 126
127 /* Stuff for accurate time accounting */ 127 /* Stuff for accurate time accounting */
128 u64 user_time; /* accumulated usermode TB ticks */ 128 u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_event.h
index 0ea0639fcf75..2499aaadaeb9 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter support - PowerPC-specific definitions. 2 * Performance event support - PowerPC-specific definitions.
3 * 3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * 5 *
@@ -12,9 +12,9 @@
12 12
13#include <asm/hw_irq.h> 13#include <asm/hw_irq.h>
14 14
15#define MAX_HWCOUNTERS 8 15#define MAX_HWEVENTS 8
16#define MAX_EVENT_ALTERNATIVES 8 16#define MAX_EVENT_ALTERNATIVES 8
17#define MAX_LIMITED_HWCOUNTERS 2 17#define MAX_LIMITED_HWEVENTS 2
18 18
19/* 19/*
20 * This struct provides the constants and functions needed to 20 * This struct provides the constants and functions needed to
@@ -22,18 +22,18 @@
22 */ 22 */
23struct power_pmu { 23struct power_pmu {
24 const char *name; 24 const char *name;
25 int n_counter; 25 int n_event;
26 int max_alternatives; 26 int max_alternatives;
27 unsigned long add_fields; 27 unsigned long add_fields;
28 unsigned long test_adder; 28 unsigned long test_adder;
29 int (*compute_mmcr)(u64 events[], int n_ev, 29 int (*compute_mmcr)(u64 events[], int n_ev,
30 unsigned int hwc[], unsigned long mmcr[]); 30 unsigned int hwc[], unsigned long mmcr[]);
31 int (*get_constraint)(u64 event, unsigned long *mskp, 31 int (*get_constraint)(u64 event_id, unsigned long *mskp,
32 unsigned long *valp); 32 unsigned long *valp);
33 int (*get_alternatives)(u64 event, unsigned int flags, 33 int (*get_alternatives)(u64 event_id, unsigned int flags,
34 u64 alt[]); 34 u64 alt[]);
35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
36 int (*limited_pmc_event)(u64 event); 36 int (*limited_pmc_event)(u64 event_id);
37 u32 flags; 37 u32 flags;
38 int n_generic; 38 int n_generic;
39 int *generic_events; 39 int *generic_events;
@@ -61,10 +61,10 @@ struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs); 61extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63 63
64#define PERF_COUNTER_INDEX_OFFSET 1 64#define PERF_EVENT_INDEX_OFFSET 1
65 65
66/* 66/*
67 * Only override the default definitions in include/linux/perf_counter.h 67 * Only override the default definitions in include/linux/perf_event.h
68 * if we have hardware PMU support. 68 * if we have hardware PMU support.
69 */ 69 */
70#ifdef CONFIG_PPC_PERF_CTRS 70#ifdef CONFIG_PPC_PERF_CTRS
@@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
73 73
74/* 74/*
75 * The power_pmu.get_constraint function returns a 32/64-bit value and 75 * The power_pmu.get_constraint function returns a 32/64-bit value and
76 * a 32/64-bit mask that express the constraints between this event and 76 * a 32/64-bit mask that express the constraints between this event_id and
77 * other events. 77 * other events.
78 * 78 *
79 * The value and mask are divided up into (non-overlapping) bitfields 79 * The value and mask are divided up into (non-overlapping) bitfields
80 * of three different types: 80 * of three different types:
81 * 81 *
82 * Select field: this expresses the constraint that some set of bits 82 * Select field: this expresses the constraint that some set of bits
83 * in MMCR* needs to be set to a specific value for this event. For a 83 * in MMCR* needs to be set to a specific value for this event_id. For a
84 * select field, the mask contains 1s in every bit of the field, and 84 * select field, the mask contains 1s in every bit of the field, and
85 * the value contains a unique value for each possible setting of the 85 * the value contains a unique value for each possible setting of the
86 * MMCR* bits. The constraint checking code will ensure that two events 86 * MMCR* bits. The constraint checking code will ensure that two events
@@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
102 * possible.) For N classes, the field is N+1 bits wide, and each class 102 * possible.) For N classes, the field is N+1 bits wide, and each class
103 * is assigned one bit from the least-significant N bits. The mask has 103 * is assigned one bit from the least-significant N bits. The mask has
104 * only the most-significant bit set, and the value has only the bit 104 * only the most-significant bit set, and the value has only the bit
105 * for the event's class set. The test_adder has the least significant 105 * for the event_id's class set. The test_adder has the least significant
106 * bit set in the field. 106 * bit set in the field.
107 * 107 *
108 * If an event is not subject to the constraint expressed by a particular 108 * If an event_id is not subject to the constraint expressed by a particular
109 * field, then it will have 0 in both the mask and value for that field. 109 * field, then it will have 0 in both the mask and value for that field.
110 */ 110 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ed24bd92fe49..c7d671a7d9a1 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
322SYSCALL_SPU(dup3) 322SYSCALL_SPU(dup3)
323SYSCALL_SPU(pipe2) 323SYSCALL_SPU(pipe2)
324SYSCALL(inotify_init1) 324SYSCALL(inotify_init1)
325SYSCALL_SPU(perf_counter_open) 325SYSCALL_SPU(perf_event_open)
326COMPAT_SYS_SPU(preadv) 326COMPAT_SYS_SPU(preadv)
327COMPAT_SYS_SPU(pwritev) 327COMPAT_SYS_SPU(pwritev)
328COMPAT_SYS(rt_tgsigqueueinfo) 328COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cef080bfc607..f6ca76176766 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -341,7 +341,7 @@
341#define __NR_dup3 316 341#define __NR_dup3 316
342#define __NR_pipe2 317 342#define __NR_pipe2 317
343#define __NR_inotify_init1 318 343#define __NR_inotify_init1 318
344#define __NR_perf_counter_open 319 344#define __NR_perf_event_open 319
345#define __NR_preadv 320 345#define __NR_preadv 320
346#define __NR_pwritev 321 346#define __NR_pwritev 321
347#define __NR_rt_tgsigqueueinfo 322 347#define __NR_rt_tgsigqueueinfo 322
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 569f79ccd310..b23664a0b86c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
97 97
98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
100obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o 100obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o
101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ 101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
102 power5+-pmu.o power6-pmu.o power7-pmu.o 102 power5+-pmu.o power6-pmu.o power7-pmu.o
103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f0df285f0f87..0812b0f414bb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,7 @@ int main(void)
133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); 136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
138#ifdef CONFIG_PPC_MM_SLICES 138#ifdef CONFIG_PPC_MM_SLICES
139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 66bcda34a6bb..900e0eea0099 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5562: 5562:
557 TRACE_AND_RESTORE_IRQ(r5); 557 TRACE_AND_RESTORE_IRQ(r5);
558 558
559#ifdef CONFIG_PERF_COUNTERS 559#ifdef CONFIG_PERF_EVENTS
560 /* check paca->perf_counter_pending if we're enabling ints */ 560 /* check paca->perf_event_pending if we're enabling ints */
561 lbz r3,PACAPERFPEND(r13) 561 lbz r3,PACAPERFPEND(r13)
562 and. r3,r3,r5 562 and. r3,r3,r5
563 beq 27f 563 beq 27f
564 bl .perf_counter_do_pending 564 bl .perf_event_do_pending
56527: 56527:
566#endif /* CONFIG_PERF_COUNTERS */ 566#endif /* CONFIG_PERF_EVENTS */
567 567
568 /* extract EE bit and use it to restore paca->hard_enabled */ 568 /* extract EE bit and use it to restore paca->hard_enabled */
569 ld r3,_MSR(r1) 569 ld r3,_MSR(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f7f376ea7b17..e5d121177984 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,7 @@
53#include <linux/bootmem.h> 53#include <linux/bootmem.h>
54#include <linux/pci.h> 54#include <linux/pci.h>
55#include <linux/debugfs.h> 55#include <linux/debugfs.h>
56#include <linux/perf_counter.h> 56#include <linux/perf_event.h>
57 57
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <asm/system.h> 59#include <asm/system.h>
@@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en)
138 } 138 }
139#endif /* CONFIG_PPC_STD_MMU_64 */ 139#endif /* CONFIG_PPC_STD_MMU_64 */
140 140
141 if (test_perf_counter_pending()) { 141 if (test_perf_event_pending()) {
142 clear_perf_counter_pending(); 142 clear_perf_event_pending();
143 perf_counter_do_pending(); 143 perf_event_do_pending();
144 } 144 }
145 145
146 /* 146 /*
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index cc466d039af6..09d72028f317 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <asm/reg.h> 13#include <asm/reg.h>
14#include <asm/cputable.h> 14#include <asm/cputable.h>
15 15
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index f74b62c67511..0a03cf70d247 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -10,7 +10,7 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/perf_counter.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_event.c
index 5ccf9bca96c0..197b7d958796 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter support - powerpc architecture code 2 * Performance event support - powerpc architecture code
3 * 3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 * 5 *
@@ -10,7 +10,7 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/perf_counter.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/hardirq.h> 15#include <linux/hardirq.h>
16#include <asm/reg.h> 16#include <asm/reg.h>
@@ -19,24 +19,24 @@
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21 21
22struct cpu_hw_counters { 22struct cpu_hw_events {
23 int n_counters; 23 int n_events;
24 int n_percpu; 24 int n_percpu;
25 int disabled; 25 int disabled;
26 int n_added; 26 int n_added;
27 int n_limited; 27 int n_limited;
28 u8 pmcs_enabled; 28 u8 pmcs_enabled;
29 struct perf_counter *counter[MAX_HWCOUNTERS]; 29 struct perf_event *event[MAX_HWEVENTS];
30 u64 events[MAX_HWCOUNTERS]; 30 u64 events[MAX_HWEVENTS];
31 unsigned int flags[MAX_HWCOUNTERS]; 31 unsigned int flags[MAX_HWEVENTS];
32 unsigned long mmcr[3]; 32 unsigned long mmcr[3];
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; 33 struct perf_event *limited_event[MAX_LIMITED_HWEVENTS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; 34 u8 limited_hwidx[MAX_LIMITED_HWEVENTS];
35 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
38}; 38};
39DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); 39DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
40 40
41struct power_pmu *ppmu; 41struct power_pmu *ppmu;
42 42
@@ -47,7 +47,7 @@ struct power_pmu *ppmu;
47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors), 47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
48 * then we need to use the FCHV bit to ignore kernel events. 48 * then we need to use the FCHV bit to ignore kernel events.
49 */ 49 */
50static unsigned int freeze_counters_kernel = MMCR0_FCS; 50static unsigned int freeze_events_kernel = MMCR0_FCS;
51 51
52/* 52/*
53 * 32-bit doesn't have MMCRA but does have an MMCR2, 53 * 32-bit doesn't have MMCRA but does have an MMCR2,
@@ -122,14 +122,14 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
122 122
123 if (ppmu->flags & PPMU_ALT_SIPR) { 123 if (ppmu->flags & PPMU_ALT_SIPR) {
124 if (mmcra & POWER6_MMCRA_SIHV) 124 if (mmcra & POWER6_MMCRA_SIHV)
125 return PERF_EVENT_MISC_HYPERVISOR; 125 return PERF_RECORD_MISC_HYPERVISOR;
126 return (mmcra & POWER6_MMCRA_SIPR) ? 126 return (mmcra & POWER6_MMCRA_SIPR) ?
127 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; 127 PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
128 } 128 }
129 if (mmcra & MMCRA_SIHV) 129 if (mmcra & MMCRA_SIHV)
130 return PERF_EVENT_MISC_HYPERVISOR; 130 return PERF_RECORD_MISC_HYPERVISOR;
131 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : 131 return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
132 PERF_EVENT_MISC_KERNEL; 132 PERF_RECORD_MISC_KERNEL;
133} 133}
134 134
135/* 135/*
@@ -152,9 +152,9 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
152 152
153#endif /* CONFIG_PPC64 */ 153#endif /* CONFIG_PPC64 */
154 154
155static void perf_counter_interrupt(struct pt_regs *regs); 155static void perf_event_interrupt(struct pt_regs *regs);
156 156
157void perf_counter_print_debug(void) 157void perf_event_print_debug(void)
158{ 158{
159} 159}
160 160
@@ -240,31 +240,31 @@ static void write_pmc(int idx, unsigned long val)
240 * Check if a set of events can all go on the PMU at once. 240 * Check if a set of events can all go on the PMU at once.
241 * If they can't, this will look at alternative codes for the events 241 * If they can't, this will look at alternative codes for the events
242 * and see if any combination of alternative codes is feasible. 242 * and see if any combination of alternative codes is feasible.
243 * The feasible set is returned in event[]. 243 * The feasible set is returned in event_id[].
244 */ 244 */
245static int power_check_constraints(struct cpu_hw_counters *cpuhw, 245static int power_check_constraints(struct cpu_hw_events *cpuhw,
246 u64 event[], unsigned int cflags[], 246 u64 event_id[], unsigned int cflags[],
247 int n_ev) 247 int n_ev)
248{ 248{
249 unsigned long mask, value, nv; 249 unsigned long mask, value, nv;
250 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; 250 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
251 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; 251 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
252 int i, j; 252 int i, j;
253 unsigned long addf = ppmu->add_fields; 253 unsigned long addf = ppmu->add_fields;
254 unsigned long tadd = ppmu->test_adder; 254 unsigned long tadd = ppmu->test_adder;
255 255
256 if (n_ev > ppmu->n_counter) 256 if (n_ev > ppmu->n_event)
257 return -1; 257 return -1;
258 258
259 /* First see if the events will go on as-is */ 259 /* First see if the events will go on as-is */
260 for (i = 0; i < n_ev; ++i) { 260 for (i = 0; i < n_ev; ++i) {
261 if ((cflags[i] & PPMU_LIMITED_PMC_REQD) 261 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
262 && !ppmu->limited_pmc_event(event[i])) { 262 && !ppmu->limited_pmc_event(event_id[i])) {
263 ppmu->get_alternatives(event[i], cflags[i], 263 ppmu->get_alternatives(event_id[i], cflags[i],
264 cpuhw->alternatives[i]); 264 cpuhw->alternatives[i]);
265 event[i] = cpuhw->alternatives[i][0]; 265 event_id[i] = cpuhw->alternatives[i][0];
266 } 266 }
267 if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0], 267 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
268 &cpuhw->avalues[i][0])) 268 &cpuhw->avalues[i][0]))
269 return -1; 269 return -1;
270 } 270 }
@@ -287,7 +287,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
287 return -1; 287 return -1;
288 for (i = 0; i < n_ev; ++i) { 288 for (i = 0; i < n_ev; ++i) {
289 choice[i] = 0; 289 choice[i] = 0;
290 n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], 290 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
291 cpuhw->alternatives[i]); 291 cpuhw->alternatives[i]);
292 for (j = 1; j < n_alt[i]; ++j) 292 for (j = 1; j < n_alt[i]; ++j)
293 ppmu->get_constraint(cpuhw->alternatives[i][j], 293 ppmu->get_constraint(cpuhw->alternatives[i][j],
@@ -307,7 +307,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
307 j = choice[i]; 307 j = choice[i];
308 } 308 }
309 /* 309 /*
310 * See if any alternative k for event i, 310 * See if any alternative k for event_id i,
311 * where k > j, will satisfy the constraints. 311 * where k > j, will satisfy the constraints.
312 */ 312 */
313 while (++j < n_alt[i]) { 313 while (++j < n_alt[i]) {
@@ -321,16 +321,16 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
321 if (j >= n_alt[i]) { 321 if (j >= n_alt[i]) {
322 /* 322 /*
323 * No feasible alternative, backtrack 323 * No feasible alternative, backtrack
324 * to event i-1 and continue enumerating its 324 * to event_id i-1 and continue enumerating its
325 * alternatives from where we got up to. 325 * alternatives from where we got up to.
326 */ 326 */
327 if (--i < 0) 327 if (--i < 0)
328 return -1; 328 return -1;
329 } else { 329 } else {
330 /* 330 /*
331 * Found a feasible alternative for event i, 331 * Found a feasible alternative for event_id i,
332 * remember where we got up to with this event, 332 * remember where we got up to with this event_id,
333 * go on to the next event, and start with 333 * go on to the next event_id, and start with
334 * the first alternative for it. 334 * the first alternative for it.
335 */ 335 */
336 choice[i] = j; 336 choice[i] = j;
@@ -345,21 +345,21 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
345 345
346 /* OK, we have a feasible combination, tell the caller the solution */ 346 /* OK, we have a feasible combination, tell the caller the solution */
347 for (i = 0; i < n_ev; ++i) 347 for (i = 0; i < n_ev; ++i)
348 event[i] = cpuhw->alternatives[i][choice[i]]; 348 event_id[i] = cpuhw->alternatives[i][choice[i]];
349 return 0; 349 return 0;
350} 350}
351 351
352/* 352/*
353 * Check if newly-added counters have consistent settings for 353 * Check if newly-added events have consistent settings for
354 * exclude_{user,kernel,hv} with each other and any previously 354 * exclude_{user,kernel,hv} with each other and any previously
355 * added counters. 355 * added events.
356 */ 356 */
357static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], 357static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
358 int n_prev, int n_new) 358 int n_prev, int n_new)
359{ 359{
360 int eu = 0, ek = 0, eh = 0; 360 int eu = 0, ek = 0, eh = 0;
361 int i, n, first; 361 int i, n, first;
362 struct perf_counter *counter; 362 struct perf_event *event;
363 363
364 n = n_prev + n_new; 364 n = n_prev + n_new;
365 if (n <= 1) 365 if (n <= 1)
@@ -371,15 +371,15 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
371 cflags[i] &= ~PPMU_LIMITED_PMC_REQD; 371 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
372 continue; 372 continue;
373 } 373 }
374 counter = ctrs[i]; 374 event = ctrs[i];
375 if (first) { 375 if (first) {
376 eu = counter->attr.exclude_user; 376 eu = event->attr.exclude_user;
377 ek = counter->attr.exclude_kernel; 377 ek = event->attr.exclude_kernel;
378 eh = counter->attr.exclude_hv; 378 eh = event->attr.exclude_hv;
379 first = 0; 379 first = 0;
380 } else if (counter->attr.exclude_user != eu || 380 } else if (event->attr.exclude_user != eu ||
381 counter->attr.exclude_kernel != ek || 381 event->attr.exclude_kernel != ek ||
382 counter->attr.exclude_hv != eh) { 382 event->attr.exclude_hv != eh) {
383 return -EAGAIN; 383 return -EAGAIN;
384 } 384 }
385 } 385 }
@@ -392,11 +392,11 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
392 return 0; 392 return 0;
393} 393}
394 394
395static void power_pmu_read(struct perf_counter *counter) 395static void power_pmu_read(struct perf_event *event)
396{ 396{
397 s64 val, delta, prev; 397 s64 val, delta, prev;
398 398
399 if (!counter->hw.idx) 399 if (!event->hw.idx)
400 return; 400 return;
401 /* 401 /*
402 * Performance monitor interrupts come even when interrupts 402 * Performance monitor interrupts come even when interrupts
@@ -404,21 +404,21 @@ static void power_pmu_read(struct perf_counter *counter)
404 * Therefore we treat them like NMIs. 404 * Therefore we treat them like NMIs.
405 */ 405 */
406 do { 406 do {
407 prev = atomic64_read(&counter->hw.prev_count); 407 prev = atomic64_read(&event->hw.prev_count);
408 barrier(); 408 barrier();
409 val = read_pmc(counter->hw.idx); 409 val = read_pmc(event->hw.idx);
410 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); 410 } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
411 411
412 /* The counters are only 32 bits wide */ 412 /* The counters are only 32 bits wide */
413 delta = (val - prev) & 0xfffffffful; 413 delta = (val - prev) & 0xfffffffful;
414 atomic64_add(delta, &counter->count); 414 atomic64_add(delta, &event->count);
415 atomic64_sub(delta, &counter->hw.period_left); 415 atomic64_sub(delta, &event->hw.period_left);
416} 416}
417 417
418/* 418/*
419 * On some machines, PMC5 and PMC6 can't be written, don't respect 419 * On some machines, PMC5 and PMC6 can't be written, don't respect
420 * the freeze conditions, and don't generate interrupts. This tells 420 * the freeze conditions, and don't generate interrupts. This tells
421 * us if `counter' is using such a PMC. 421 * us if `event' is using such a PMC.
422 */ 422 */
423static int is_limited_pmc(int pmcnum) 423static int is_limited_pmc(int pmcnum)
424{ 424{
@@ -426,53 +426,53 @@ static int is_limited_pmc(int pmcnum)
426 && (pmcnum == 5 || pmcnum == 6); 426 && (pmcnum == 5 || pmcnum == 6);
427} 427}
428 428
429static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, 429static void freeze_limited_events(struct cpu_hw_events *cpuhw,
430 unsigned long pmc5, unsigned long pmc6) 430 unsigned long pmc5, unsigned long pmc6)
431{ 431{
432 struct perf_counter *counter; 432 struct perf_event *event;
433 u64 val, prev, delta; 433 u64 val, prev, delta;
434 int i; 434 int i;
435 435
436 for (i = 0; i < cpuhw->n_limited; ++i) { 436 for (i = 0; i < cpuhw->n_limited; ++i) {
437 counter = cpuhw->limited_counter[i]; 437 event = cpuhw->limited_event[i];
438 if (!counter->hw.idx) 438 if (!event->hw.idx)
439 continue; 439 continue;
440 val = (counter->hw.idx == 5) ? pmc5 : pmc6; 440 val = (event->hw.idx == 5) ? pmc5 : pmc6;
441 prev = atomic64_read(&counter->hw.prev_count); 441 prev = atomic64_read(&event->hw.prev_count);
442 counter->hw.idx = 0; 442 event->hw.idx = 0;
443 delta = (val - prev) & 0xfffffffful; 443 delta = (val - prev) & 0xfffffffful;
444 atomic64_add(delta, &counter->count); 444 atomic64_add(delta, &event->count);
445 } 445 }
446} 446}
447 447
448static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, 448static void thaw_limited_events(struct cpu_hw_events *cpuhw,
449 unsigned long pmc5, unsigned long pmc6) 449 unsigned long pmc5, unsigned long pmc6)
450{ 450{
451 struct perf_counter *counter; 451 struct perf_event *event;
452 u64 val; 452 u64 val;
453 int i; 453 int i;
454 454
455 for (i = 0; i < cpuhw->n_limited; ++i) { 455 for (i = 0; i < cpuhw->n_limited; ++i) {
456 counter = cpuhw->limited_counter[i]; 456 event = cpuhw->limited_event[i];
457 counter->hw.idx = cpuhw->limited_hwidx[i]; 457 event->hw.idx = cpuhw->limited_hwidx[i];
458 val = (counter->hw.idx == 5) ? pmc5 : pmc6; 458 val = (event->hw.idx == 5) ? pmc5 : pmc6;
459 atomic64_set(&counter->hw.prev_count, val); 459 atomic64_set(&event->hw.prev_count, val);
460 perf_counter_update_userpage(counter); 460 perf_event_update_userpage(event);
461 } 461 }
462} 462}
463 463
464/* 464/*
465 * Since limited counters don't respect the freeze conditions, we 465 * Since limited events don't respect the freeze conditions, we
466 * have to read them immediately after freezing or unfreezing the 466 * have to read them immediately after freezing or unfreezing the
467 * other counters. We try to keep the values from the limited 467 * other events. We try to keep the values from the limited
468 * counters as consistent as possible by keeping the delay (in 468 * events as consistent as possible by keeping the delay (in
469 * cycles and instructions) between freezing/unfreezing and reading 469 * cycles and instructions) between freezing/unfreezing and reading
470 * the limited counters as small and consistent as possible. 470 * the limited events as small and consistent as possible.
471 * Therefore, if any limited counters are in use, we read them 471 * Therefore, if any limited events are in use, we read them
472 * both, and always in the same order, to minimize variability, 472 * both, and always in the same order, to minimize variability,
473 * and do it inside the same asm that writes MMCR0. 473 * and do it inside the same asm that writes MMCR0.
474 */ 474 */
475static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) 475static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
476{ 476{
477 unsigned long pmc5, pmc6; 477 unsigned long pmc5, pmc6;
478 478
@@ -485,7 +485,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
485 * Write MMCR0, then read PMC5 and PMC6 immediately. 485 * Write MMCR0, then read PMC5 and PMC6 immediately.
486 * To ensure we don't get a performance monitor interrupt 486 * To ensure we don't get a performance monitor interrupt
487 * between writing MMCR0 and freezing/thawing the limited 487 * between writing MMCR0 and freezing/thawing the limited
488 * counters, we first write MMCR0 with the counter overflow 488 * events, we first write MMCR0 with the event overflow
489 * interrupt enable bits turned off. 489 * interrupt enable bits turned off.
490 */ 490 */
491 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" 491 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
@@ -495,12 +495,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
495 "i" (SPRN_PMC5), "i" (SPRN_PMC6)); 495 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
496 496
497 if (mmcr0 & MMCR0_FC) 497 if (mmcr0 & MMCR0_FC)
498 freeze_limited_counters(cpuhw, pmc5, pmc6); 498 freeze_limited_events(cpuhw, pmc5, pmc6);
499 else 499 else
500 thaw_limited_counters(cpuhw, pmc5, pmc6); 500 thaw_limited_events(cpuhw, pmc5, pmc6);
501 501
502 /* 502 /*
503 * Write the full MMCR0 including the counter overflow interrupt 503 * Write the full MMCR0 including the event overflow interrupt
504 * enable bits, if necessary. 504 * enable bits, if necessary.
505 */ 505 */
506 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) 506 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -508,18 +508,18 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
508} 508}
509 509
510/* 510/*
511 * Disable all counters to prevent PMU interrupts and to allow 511 * Disable all events to prevent PMU interrupts and to allow
512 * counters to be added or removed. 512 * events to be added or removed.
513 */ 513 */
514void hw_perf_disable(void) 514void hw_perf_disable(void)
515{ 515{
516 struct cpu_hw_counters *cpuhw; 516 struct cpu_hw_events *cpuhw;
517 unsigned long flags; 517 unsigned long flags;
518 518
519 if (!ppmu) 519 if (!ppmu)
520 return; 520 return;
521 local_irq_save(flags); 521 local_irq_save(flags);
522 cpuhw = &__get_cpu_var(cpu_hw_counters); 522 cpuhw = &__get_cpu_var(cpu_hw_events);
523 523
524 if (!cpuhw->disabled) { 524 if (!cpuhw->disabled) {
525 cpuhw->disabled = 1; 525 cpuhw->disabled = 1;
@@ -545,7 +545,7 @@ void hw_perf_disable(void)
545 /* 545 /*
546 * Set the 'freeze counters' bit. 546 * Set the 'freeze counters' bit.
547 * The barrier is to make sure the mtspr has been 547 * The barrier is to make sure the mtspr has been
548 * executed and the PMU has frozen the counters 548 * executed and the PMU has frozen the events
549 * before we return. 549 * before we return.
550 */ 550 */
551 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); 551 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
@@ -555,26 +555,26 @@ void hw_perf_disable(void)
555} 555}
556 556
557/* 557/*
558 * Re-enable all counters if disable == 0. 558 * Re-enable all events if disable == 0.
559 * If we were previously disabled and counters were added, then 559 * If we were previously disabled and events were added, then
560 * put the new config on the PMU. 560 * put the new config on the PMU.
561 */ 561 */
562void hw_perf_enable(void) 562void hw_perf_enable(void)
563{ 563{
564 struct perf_counter *counter; 564 struct perf_event *event;
565 struct cpu_hw_counters *cpuhw; 565 struct cpu_hw_events *cpuhw;
566 unsigned long flags; 566 unsigned long flags;
567 long i; 567 long i;
568 unsigned long val; 568 unsigned long val;
569 s64 left; 569 s64 left;
570 unsigned int hwc_index[MAX_HWCOUNTERS]; 570 unsigned int hwc_index[MAX_HWEVENTS];
571 int n_lim; 571 int n_lim;
572 int idx; 572 int idx;
573 573
574 if (!ppmu) 574 if (!ppmu)
575 return; 575 return;
576 local_irq_save(flags); 576 local_irq_save(flags);
577 cpuhw = &__get_cpu_var(cpu_hw_counters); 577 cpuhw = &__get_cpu_var(cpu_hw_events);
578 if (!cpuhw->disabled) { 578 if (!cpuhw->disabled) {
579 local_irq_restore(flags); 579 local_irq_restore(flags);
580 return; 580 return;
@@ -582,23 +582,23 @@ void hw_perf_enable(void)
582 cpuhw->disabled = 0; 582 cpuhw->disabled = 0;
583 583
584 /* 584 /*
585 * If we didn't change anything, or only removed counters, 585 * If we didn't change anything, or only removed events,
586 * no need to recalculate MMCR* settings and reset the PMCs. 586 * no need to recalculate MMCR* settings and reset the PMCs.
587 * Just reenable the PMU with the current MMCR* settings 587 * Just reenable the PMU with the current MMCR* settings
588 * (possibly updated for removal of counters). 588 * (possibly updated for removal of events).
589 */ 589 */
590 if (!cpuhw->n_added) { 590 if (!cpuhw->n_added) {
591 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 591 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
592 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 592 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
593 if (cpuhw->n_counters == 0) 593 if (cpuhw->n_events == 0)
594 ppc_set_pmu_inuse(0); 594 ppc_set_pmu_inuse(0);
595 goto out_enable; 595 goto out_enable;
596 } 596 }
597 597
598 /* 598 /*
599 * Compute MMCR* values for the new set of counters 599 * Compute MMCR* values for the new set of events
600 */ 600 */
601 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, 601 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
602 cpuhw->mmcr)) { 602 cpuhw->mmcr)) {
603 /* shouldn't ever get here */ 603 /* shouldn't ever get here */
604 printk(KERN_ERR "oops compute_mmcr failed\n"); 604 printk(KERN_ERR "oops compute_mmcr failed\n");
@@ -607,22 +607,22 @@ void hw_perf_enable(void)
607 607
608 /* 608 /*
609 * Add in MMCR0 freeze bits corresponding to the 609 * Add in MMCR0 freeze bits corresponding to the
610 * attr.exclude_* bits for the first counter. 610 * attr.exclude_* bits for the first event.
611 * We have already checked that all counters have the 611 * We have already checked that all events have the
612 * same values for these bits as the first counter. 612 * same values for these bits as the first event.
613 */ 613 */
614 counter = cpuhw->counter[0]; 614 event = cpuhw->event[0];
615 if (counter->attr.exclude_user) 615 if (event->attr.exclude_user)
616 cpuhw->mmcr[0] |= MMCR0_FCP; 616 cpuhw->mmcr[0] |= MMCR0_FCP;
617 if (counter->attr.exclude_kernel) 617 if (event->attr.exclude_kernel)
618 cpuhw->mmcr[0] |= freeze_counters_kernel; 618 cpuhw->mmcr[0] |= freeze_events_kernel;
619 if (counter->attr.exclude_hv) 619 if (event->attr.exclude_hv)
620 cpuhw->mmcr[0] |= MMCR0_FCHV; 620 cpuhw->mmcr[0] |= MMCR0_FCHV;
621 621
622 /* 622 /*
623 * Write the new configuration to MMCR* with the freeze 623 * Write the new configuration to MMCR* with the freeze
624 * bit set and set the hardware counters to their initial values. 624 * bit set and set the hardware events to their initial values.
625 * Then unfreeze the counters. 625 * Then unfreeze the events.
626 */ 626 */
627 ppc_set_pmu_inuse(1); 627 ppc_set_pmu_inuse(1);
628 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 628 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
@@ -631,43 +631,43 @@ void hw_perf_enable(void)
631 | MMCR0_FC); 631 | MMCR0_FC);
632 632
633 /* 633 /*
634 * Read off any pre-existing counters that need to move 634 * Read off any pre-existing events that need to move
635 * to another PMC. 635 * to another PMC.
636 */ 636 */
637 for (i = 0; i < cpuhw->n_counters; ++i) { 637 for (i = 0; i < cpuhw->n_events; ++i) {
638 counter = cpuhw->counter[i]; 638 event = cpuhw->event[i];
639 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { 639 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
640 power_pmu_read(counter); 640 power_pmu_read(event);
641 write_pmc(counter->hw.idx, 0); 641 write_pmc(event->hw.idx, 0);
642 counter->hw.idx = 0; 642 event->hw.idx = 0;
643 } 643 }
644 } 644 }
645 645
646 /* 646 /*
647 * Initialize the PMCs for all the new and moved counters. 647 * Initialize the PMCs for all the new and moved events.
648 */ 648 */
649 cpuhw->n_limited = n_lim = 0; 649 cpuhw->n_limited = n_lim = 0;
650 for (i = 0; i < cpuhw->n_counters; ++i) { 650 for (i = 0; i < cpuhw->n_events; ++i) {
651 counter = cpuhw->counter[i]; 651 event = cpuhw->event[i];
652 if (counter->hw.idx) 652 if (event->hw.idx)
653 continue; 653 continue;
654 idx = hwc_index[i] + 1; 654 idx = hwc_index[i] + 1;
655 if (is_limited_pmc(idx)) { 655 if (is_limited_pmc(idx)) {
656 cpuhw->limited_counter[n_lim] = counter; 656 cpuhw->limited_event[n_lim] = event;
657 cpuhw->limited_hwidx[n_lim] = idx; 657 cpuhw->limited_hwidx[n_lim] = idx;
658 ++n_lim; 658 ++n_lim;
659 continue; 659 continue;
660 } 660 }
661 val = 0; 661 val = 0;
662 if (counter->hw.sample_period) { 662 if (event->hw.sample_period) {
663 left = atomic64_read(&counter->hw.period_left); 663 left = atomic64_read(&event->hw.period_left);
664 if (left < 0x80000000L) 664 if (left < 0x80000000L)
665 val = 0x80000000L - left; 665 val = 0x80000000L - left;
666 } 666 }
667 atomic64_set(&counter->hw.prev_count, val); 667 atomic64_set(&event->hw.prev_count, val);
668 counter->hw.idx = idx; 668 event->hw.idx = idx;
669 write_pmc(idx, val); 669 write_pmc(idx, val);
670 perf_counter_update_userpage(counter); 670 perf_event_update_userpage(event);
671 } 671 }
672 cpuhw->n_limited = n_lim; 672 cpuhw->n_limited = n_lim;
673 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; 673 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
@@ -688,85 +688,85 @@ void hw_perf_enable(void)
688 local_irq_restore(flags); 688 local_irq_restore(flags);
689} 689}
690 690
691static int collect_events(struct perf_counter *group, int max_count, 691static int collect_events(struct perf_event *group, int max_count,
692 struct perf_counter *ctrs[], u64 *events, 692 struct perf_event *ctrs[], u64 *events,
693 unsigned int *flags) 693 unsigned int *flags)
694{ 694{
695 int n = 0; 695 int n = 0;
696 struct perf_counter *counter; 696 struct perf_event *event;
697 697
698 if (!is_software_counter(group)) { 698 if (!is_software_event(group)) {
699 if (n >= max_count) 699 if (n >= max_count)
700 return -1; 700 return -1;
701 ctrs[n] = group; 701 ctrs[n] = group;
702 flags[n] = group->hw.counter_base; 702 flags[n] = group->hw.event_base;
703 events[n++] = group->hw.config; 703 events[n++] = group->hw.config;
704 } 704 }
705 list_for_each_entry(counter, &group->sibling_list, list_entry) { 705 list_for_each_entry(event, &group->sibling_list, list_entry) {
706 if (!is_software_counter(counter) && 706 if (!is_software_event(event) &&
707 counter->state != PERF_COUNTER_STATE_OFF) { 707 event->state != PERF_EVENT_STATE_OFF) {
708 if (n >= max_count) 708 if (n >= max_count)
709 return -1; 709 return -1;
710 ctrs[n] = counter; 710 ctrs[n] = event;
711 flags[n] = counter->hw.counter_base; 711 flags[n] = event->hw.event_base;
712 events[n++] = counter->hw.config; 712 events[n++] = event->hw.config;
713 } 713 }
714 } 714 }
715 return n; 715 return n;
716} 716}
717 717
718static void counter_sched_in(struct perf_counter *counter, int cpu) 718static void event_sched_in(struct perf_event *event, int cpu)
719{ 719{
720 counter->state = PERF_COUNTER_STATE_ACTIVE; 720 event->state = PERF_EVENT_STATE_ACTIVE;
721 counter->oncpu = cpu; 721 event->oncpu = cpu;
722 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; 722 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
723 if (is_software_counter(counter)) 723 if (is_software_event(event))
724 counter->pmu->enable(counter); 724 event->pmu->enable(event);
725} 725}
726 726
727/* 727/*
728 * Called to enable a whole group of counters. 728 * Called to enable a whole group of events.
729 * Returns 1 if the group was enabled, or -EAGAIN if it could not be. 729 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
730 * Assumes the caller has disabled interrupts and has 730 * Assumes the caller has disabled interrupts and has
731 * frozen the PMU with hw_perf_save_disable. 731 * frozen the PMU with hw_perf_save_disable.
732 */ 732 */
733int hw_perf_group_sched_in(struct perf_counter *group_leader, 733int hw_perf_group_sched_in(struct perf_event *group_leader,
734 struct perf_cpu_context *cpuctx, 734 struct perf_cpu_context *cpuctx,
735 struct perf_counter_context *ctx, int cpu) 735 struct perf_event_context *ctx, int cpu)
736{ 736{
737 struct cpu_hw_counters *cpuhw; 737 struct cpu_hw_events *cpuhw;
738 long i, n, n0; 738 long i, n, n0;
739 struct perf_counter *sub; 739 struct perf_event *sub;
740 740
741 if (!ppmu) 741 if (!ppmu)
742 return 0; 742 return 0;
743 cpuhw = &__get_cpu_var(cpu_hw_counters); 743 cpuhw = &__get_cpu_var(cpu_hw_events);
744 n0 = cpuhw->n_counters; 744 n0 = cpuhw->n_events;
745 n = collect_events(group_leader, ppmu->n_counter - n0, 745 n = collect_events(group_leader, ppmu->n_event - n0,
746 &cpuhw->counter[n0], &cpuhw->events[n0], 746 &cpuhw->event[n0], &cpuhw->events[n0],
747 &cpuhw->flags[n0]); 747 &cpuhw->flags[n0]);
748 if (n < 0) 748 if (n < 0)
749 return -EAGAIN; 749 return -EAGAIN;
750 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) 750 if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
751 return -EAGAIN; 751 return -EAGAIN;
752 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); 752 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
753 if (i < 0) 753 if (i < 0)
754 return -EAGAIN; 754 return -EAGAIN;
755 cpuhw->n_counters = n0 + n; 755 cpuhw->n_events = n0 + n;
756 cpuhw->n_added += n; 756 cpuhw->n_added += n;
757 757
758 /* 758 /*
759 * OK, this group can go on; update counter states etc., 759 * OK, this group can go on; update event states etc.,
760 * and enable any software counters 760 * and enable any software events
761 */ 761 */
762 for (i = n0; i < n0 + n; ++i) 762 for (i = n0; i < n0 + n; ++i)
763 cpuhw->counter[i]->hw.config = cpuhw->events[i]; 763 cpuhw->event[i]->hw.config = cpuhw->events[i];
764 cpuctx->active_oncpu += n; 764 cpuctx->active_oncpu += n;
765 n = 1; 765 n = 1;
766 counter_sched_in(group_leader, cpu); 766 event_sched_in(group_leader, cpu);
767 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { 767 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
768 if (sub->state != PERF_COUNTER_STATE_OFF) { 768 if (sub->state != PERF_EVENT_STATE_OFF) {
769 counter_sched_in(sub, cpu); 769 event_sched_in(sub, cpu);
770 ++n; 770 ++n;
771 } 771 }
772 } 772 }
@@ -776,14 +776,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
776} 776}
777 777
778/* 778/*
779 * Add a counter to the PMU. 779 * Add a event to the PMU.
780 * If all counters are not already frozen, then we disable and 780 * If all events are not already frozen, then we disable and
781 * re-enable the PMU in order to get hw_perf_enable to do the 781 * re-enable the PMU in order to get hw_perf_enable to do the
782 * actual work of reconfiguring the PMU. 782 * actual work of reconfiguring the PMU.
783 */ 783 */
784static int power_pmu_enable(struct perf_counter *counter) 784static int power_pmu_enable(struct perf_event *event)
785{ 785{
786 struct cpu_hw_counters *cpuhw; 786 struct cpu_hw_events *cpuhw;
787 unsigned long flags; 787 unsigned long flags;
788 int n0; 788 int n0;
789 int ret = -EAGAIN; 789 int ret = -EAGAIN;
@@ -792,23 +792,23 @@ static int power_pmu_enable(struct perf_counter *counter)
792 perf_disable(); 792 perf_disable();
793 793
794 /* 794 /*
795 * Add the counter to the list (if there is room) 795 * Add the event to the list (if there is room)
796 * and check whether the total set is still feasible. 796 * and check whether the total set is still feasible.
797 */ 797 */
798 cpuhw = &__get_cpu_var(cpu_hw_counters); 798 cpuhw = &__get_cpu_var(cpu_hw_events);
799 n0 = cpuhw->n_counters; 799 n0 = cpuhw->n_events;
800 if (n0 >= ppmu->n_counter) 800 if (n0 >= ppmu->n_event)
801 goto out; 801 goto out;
802 cpuhw->counter[n0] = counter; 802 cpuhw->event[n0] = event;
803 cpuhw->events[n0] = counter->hw.config; 803 cpuhw->events[n0] = event->hw.config;
804 cpuhw->flags[n0] = counter->hw.counter_base; 804 cpuhw->flags[n0] = event->hw.event_base;
805 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) 805 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
806 goto out; 806 goto out;
807 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) 807 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
808 goto out; 808 goto out;
809 809
810 counter->hw.config = cpuhw->events[n0]; 810 event->hw.config = cpuhw->events[n0];
811 ++cpuhw->n_counters; 811 ++cpuhw->n_events;
812 ++cpuhw->n_added; 812 ++cpuhw->n_added;
813 813
814 ret = 0; 814 ret = 0;
@@ -819,46 +819,46 @@ static int power_pmu_enable(struct perf_counter *counter)
819} 819}
820 820
821/* 821/*
822 * Remove a counter from the PMU. 822 * Remove a event from the PMU.
823 */ 823 */
824static void power_pmu_disable(struct perf_counter *counter) 824static void power_pmu_disable(struct perf_event *event)
825{ 825{
826 struct cpu_hw_counters *cpuhw; 826 struct cpu_hw_events *cpuhw;
827 long i; 827 long i;
828 unsigned long flags; 828 unsigned long flags;
829 829
830 local_irq_save(flags); 830 local_irq_save(flags);
831 perf_disable(); 831 perf_disable();
832 832
833 power_pmu_read(counter); 833 power_pmu_read(event);
834 834
835 cpuhw = &__get_cpu_var(cpu_hw_counters); 835 cpuhw = &__get_cpu_var(cpu_hw_events);
836 for (i = 0; i < cpuhw->n_counters; ++i) { 836 for (i = 0; i < cpuhw->n_events; ++i) {
837 if (counter == cpuhw->counter[i]) { 837 if (event == cpuhw->event[i]) {
838 while (++i < cpuhw->n_counters) 838 while (++i < cpuhw->n_events)
839 cpuhw->counter[i-1] = cpuhw->counter[i]; 839 cpuhw->event[i-1] = cpuhw->event[i];
840 --cpuhw->n_counters; 840 --cpuhw->n_events;
841 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); 841 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
842 if (counter->hw.idx) { 842 if (event->hw.idx) {
843 write_pmc(counter->hw.idx, 0); 843 write_pmc(event->hw.idx, 0);
844 counter->hw.idx = 0; 844 event->hw.idx = 0;
845 } 845 }
846 perf_counter_update_userpage(counter); 846 perf_event_update_userpage(event);
847 break; 847 break;
848 } 848 }
849 } 849 }
850 for (i = 0; i < cpuhw->n_limited; ++i) 850 for (i = 0; i < cpuhw->n_limited; ++i)
851 if (counter == cpuhw->limited_counter[i]) 851 if (event == cpuhw->limited_event[i])
852 break; 852 break;
853 if (i < cpuhw->n_limited) { 853 if (i < cpuhw->n_limited) {
854 while (++i < cpuhw->n_limited) { 854 while (++i < cpuhw->n_limited) {
855 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; 855 cpuhw->limited_event[i-1] = cpuhw->limited_event[i];
856 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; 856 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
857 } 857 }
858 --cpuhw->n_limited; 858 --cpuhw->n_limited;
859 } 859 }
860 if (cpuhw->n_counters == 0) { 860 if (cpuhw->n_events == 0) {
861 /* disable exceptions if no counters are running */ 861 /* disable exceptions if no events are running */
862 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); 862 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
863 } 863 }
864 864
@@ -867,28 +867,28 @@ static void power_pmu_disable(struct perf_counter *counter)
867} 867}
868 868
869/* 869/*
870 * Re-enable interrupts on a counter after they were throttled 870 * Re-enable interrupts on a event after they were throttled
871 * because they were coming too fast. 871 * because they were coming too fast.
872 */ 872 */
873static void power_pmu_unthrottle(struct perf_counter *counter) 873static void power_pmu_unthrottle(struct perf_event *event)
874{ 874{
875 s64 val, left; 875 s64 val, left;
876 unsigned long flags; 876 unsigned long flags;
877 877
878 if (!counter->hw.idx || !counter->hw.sample_period) 878 if (!event->hw.idx || !event->hw.sample_period)
879 return; 879 return;
880 local_irq_save(flags); 880 local_irq_save(flags);
881 perf_disable(); 881 perf_disable();
882 power_pmu_read(counter); 882 power_pmu_read(event);
883 left = counter->hw.sample_period; 883 left = event->hw.sample_period;
884 counter->hw.last_period = left; 884 event->hw.last_period = left;
885 val = 0; 885 val = 0;
886 if (left < 0x80000000L) 886 if (left < 0x80000000L)
887 val = 0x80000000L - left; 887 val = 0x80000000L - left;
888 write_pmc(counter->hw.idx, val); 888 write_pmc(event->hw.idx, val);
889 atomic64_set(&counter->hw.prev_count, val); 889 atomic64_set(&event->hw.prev_count, val);
890 atomic64_set(&counter->hw.period_left, left); 890 atomic64_set(&event->hw.period_left, left);
891 perf_counter_update_userpage(counter); 891 perf_event_update_userpage(event);
892 perf_enable(); 892 perf_enable();
893 local_irq_restore(flags); 893 local_irq_restore(flags);
894} 894}
@@ -901,29 +901,29 @@ struct pmu power_pmu = {
901}; 901};
902 902
903/* 903/*
904 * Return 1 if we might be able to put counter on a limited PMC, 904 * Return 1 if we might be able to put event on a limited PMC,
905 * or 0 if not. 905 * or 0 if not.
906 * A counter can only go on a limited PMC if it counts something 906 * A event can only go on a limited PMC if it counts something
907 * that a limited PMC can count, doesn't require interrupts, and 907 * that a limited PMC can count, doesn't require interrupts, and
908 * doesn't exclude any processor mode. 908 * doesn't exclude any processor mode.
909 */ 909 */
910static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, 910static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
911 unsigned int flags) 911 unsigned int flags)
912{ 912{
913 int n; 913 int n;
914 u64 alt[MAX_EVENT_ALTERNATIVES]; 914 u64 alt[MAX_EVENT_ALTERNATIVES];
915 915
916 if (counter->attr.exclude_user 916 if (event->attr.exclude_user
917 || counter->attr.exclude_kernel 917 || event->attr.exclude_kernel
918 || counter->attr.exclude_hv 918 || event->attr.exclude_hv
919 || counter->attr.sample_period) 919 || event->attr.sample_period)
920 return 0; 920 return 0;
921 921
922 if (ppmu->limited_pmc_event(ev)) 922 if (ppmu->limited_pmc_event(ev))
923 return 1; 923 return 1;
924 924
925 /* 925 /*
926 * The requested event isn't on a limited PMC already; 926 * The requested event_id isn't on a limited PMC already;
927 * see if any alternative code goes on a limited PMC. 927 * see if any alternative code goes on a limited PMC.
928 */ 928 */
929 if (!ppmu->get_alternatives) 929 if (!ppmu->get_alternatives)
@@ -936,9 +936,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
936} 936}
937 937
938/* 938/*
939 * Find an alternative event that goes on a normal PMC, if possible, 939 * Find an alternative event_id that goes on a normal PMC, if possible,
940 * and return the event code, or 0 if there is no such alternative. 940 * and return the event_id code, or 0 if there is no such alternative.
941 * (Note: event code 0 is "don't count" on all machines.) 941 * (Note: event_id code 0 is "don't count" on all machines.)
942 */ 942 */
943static u64 normal_pmc_alternative(u64 ev, unsigned long flags) 943static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
944{ 944{
@@ -952,26 +952,26 @@ static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
952 return alt[0]; 952 return alt[0];
953} 953}
954 954
955/* Number of perf_counters counting hardware events */ 955/* Number of perf_events counting hardware events */
956static atomic_t num_counters; 956static atomic_t num_events;
957/* Used to avoid races in calling reserve/release_pmc_hardware */ 957/* Used to avoid races in calling reserve/release_pmc_hardware */
958static DEFINE_MUTEX(pmc_reserve_mutex); 958static DEFINE_MUTEX(pmc_reserve_mutex);
959 959
960/* 960/*
961 * Release the PMU if this is the last perf_counter. 961 * Release the PMU if this is the last perf_event.
962 */ 962 */
963static void hw_perf_counter_destroy(struct perf_counter *counter) 963static void hw_perf_event_destroy(struct perf_event *event)
964{ 964{
965 if (!atomic_add_unless(&num_counters, -1, 1)) { 965 if (!atomic_add_unless(&num_events, -1, 1)) {
966 mutex_lock(&pmc_reserve_mutex); 966 mutex_lock(&pmc_reserve_mutex);
967 if (atomic_dec_return(&num_counters) == 0) 967 if (atomic_dec_return(&num_events) == 0)
968 release_pmc_hardware(); 968 release_pmc_hardware();
969 mutex_unlock(&pmc_reserve_mutex); 969 mutex_unlock(&pmc_reserve_mutex);
970 } 970 }
971} 971}
972 972
973/* 973/*
974 * Translate a generic cache event config to a raw event code. 974 * Translate a generic cache event_id config to a raw event_id code.
975 */ 975 */
976static int hw_perf_cache_event(u64 config, u64 *eventp) 976static int hw_perf_cache_event(u64 config, u64 *eventp)
977{ 977{
@@ -1000,39 +1000,39 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
1000 return 0; 1000 return 0;
1001} 1001}
1002 1002
1003const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 1003const struct pmu *hw_perf_event_init(struct perf_event *event)
1004{ 1004{
1005 u64 ev; 1005 u64 ev;
1006 unsigned long flags; 1006 unsigned long flags;
1007 struct perf_counter *ctrs[MAX_HWCOUNTERS]; 1007 struct perf_event *ctrs[MAX_HWEVENTS];
1008 u64 events[MAX_HWCOUNTERS]; 1008 u64 events[MAX_HWEVENTS];
1009 unsigned int cflags[MAX_HWCOUNTERS]; 1009 unsigned int cflags[MAX_HWEVENTS];
1010 int n; 1010 int n;
1011 int err; 1011 int err;
1012 struct cpu_hw_counters *cpuhw; 1012 struct cpu_hw_events *cpuhw;
1013 1013
1014 if (!ppmu) 1014 if (!ppmu)
1015 return ERR_PTR(-ENXIO); 1015 return ERR_PTR(-ENXIO);
1016 switch (counter->attr.type) { 1016 switch (event->attr.type) {
1017 case PERF_TYPE_HARDWARE: 1017 case PERF_TYPE_HARDWARE:
1018 ev = counter->attr.config; 1018 ev = event->attr.config;
1019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 1019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1020 return ERR_PTR(-EOPNOTSUPP); 1020 return ERR_PTR(-EOPNOTSUPP);
1021 ev = ppmu->generic_events[ev]; 1021 ev = ppmu->generic_events[ev];
1022 break; 1022 break;
1023 case PERF_TYPE_HW_CACHE: 1023 case PERF_TYPE_HW_CACHE:
1024 err = hw_perf_cache_event(counter->attr.config, &ev); 1024 err = hw_perf_cache_event(event->attr.config, &ev);
1025 if (err) 1025 if (err)
1026 return ERR_PTR(err); 1026 return ERR_PTR(err);
1027 break; 1027 break;
1028 case PERF_TYPE_RAW: 1028 case PERF_TYPE_RAW:
1029 ev = counter->attr.config; 1029 ev = event->attr.config;
1030 break; 1030 break;
1031 default: 1031 default:
1032 return ERR_PTR(-EINVAL); 1032 return ERR_PTR(-EINVAL);
1033 } 1033 }
1034 counter->hw.config_base = ev; 1034 event->hw.config_base = ev;
1035 counter->hw.idx = 0; 1035 event->hw.idx = 0;
1036 1036
1037 /* 1037 /*
1038 * If we are not running on a hypervisor, force the 1038 * If we are not running on a hypervisor, force the
@@ -1040,28 +1040,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1040 * the user set it to. 1040 * the user set it to.
1041 */ 1041 */
1042 if (!firmware_has_feature(FW_FEATURE_LPAR)) 1042 if (!firmware_has_feature(FW_FEATURE_LPAR))
1043 counter->attr.exclude_hv = 0; 1043 event->attr.exclude_hv = 0;
1044 1044
1045 /* 1045 /*
1046 * If this is a per-task counter, then we can use 1046 * If this is a per-task event, then we can use
1047 * PM_RUN_* events interchangeably with their non RUN_* 1047 * PM_RUN_* events interchangeably with their non RUN_*
1048 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. 1048 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1049 * XXX we should check if the task is an idle task. 1049 * XXX we should check if the task is an idle task.
1050 */ 1050 */
1051 flags = 0; 1051 flags = 0;
1052 if (counter->ctx->task) 1052 if (event->ctx->task)
1053 flags |= PPMU_ONLY_COUNT_RUN; 1053 flags |= PPMU_ONLY_COUNT_RUN;
1054 1054
1055 /* 1055 /*
1056 * If this machine has limited counters, check whether this 1056 * If this machine has limited events, check whether this
1057 * event could go on a limited counter. 1057 * event_id could go on a limited event.
1058 */ 1058 */
1059 if (ppmu->flags & PPMU_LIMITED_PMC5_6) { 1059 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1060 if (can_go_on_limited_pmc(counter, ev, flags)) { 1060 if (can_go_on_limited_pmc(event, ev, flags)) {
1061 flags |= PPMU_LIMITED_PMC_OK; 1061 flags |= PPMU_LIMITED_PMC_OK;
1062 } else if (ppmu->limited_pmc_event(ev)) { 1062 } else if (ppmu->limited_pmc_event(ev)) {
1063 /* 1063 /*
1064 * The requested event is on a limited PMC, 1064 * The requested event_id is on a limited PMC,
1065 * but we can't use a limited PMC; see if any 1065 * but we can't use a limited PMC; see if any
1066 * alternative goes on a normal PMC. 1066 * alternative goes on a normal PMC.
1067 */ 1067 */
@@ -1073,50 +1073,50 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1073 1073
1074 /* 1074 /*
1075 * If this is in a group, check if it can go on with all the 1075 * If this is in a group, check if it can go on with all the
1076 * other hardware counters in the group. We assume the counter 1076 * other hardware events in the group. We assume the event
1077 * hasn't been linked into its leader's sibling list at this point. 1077 * hasn't been linked into its leader's sibling list at this point.
1078 */ 1078 */
1079 n = 0; 1079 n = 0;
1080 if (counter->group_leader != counter) { 1080 if (event->group_leader != event) {
1081 n = collect_events(counter->group_leader, ppmu->n_counter - 1, 1081 n = collect_events(event->group_leader, ppmu->n_event - 1,
1082 ctrs, events, cflags); 1082 ctrs, events, cflags);
1083 if (n < 0) 1083 if (n < 0)
1084 return ERR_PTR(-EINVAL); 1084 return ERR_PTR(-EINVAL);
1085 } 1085 }
1086 events[n] = ev; 1086 events[n] = ev;
1087 ctrs[n] = counter; 1087 ctrs[n] = event;
1088 cflags[n] = flags; 1088 cflags[n] = flags;
1089 if (check_excludes(ctrs, cflags, n, 1)) 1089 if (check_excludes(ctrs, cflags, n, 1))
1090 return ERR_PTR(-EINVAL); 1090 return ERR_PTR(-EINVAL);
1091 1091
1092 cpuhw = &get_cpu_var(cpu_hw_counters); 1092 cpuhw = &get_cpu_var(cpu_hw_events);
1093 err = power_check_constraints(cpuhw, events, cflags, n + 1); 1093 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1094 put_cpu_var(cpu_hw_counters); 1094 put_cpu_var(cpu_hw_events);
1095 if (err) 1095 if (err)
1096 return ERR_PTR(-EINVAL); 1096 return ERR_PTR(-EINVAL);
1097 1097
1098 counter->hw.config = events[n]; 1098 event->hw.config = events[n];
1099 counter->hw.counter_base = cflags[n]; 1099 event->hw.event_base = cflags[n];
1100 counter->hw.last_period = counter->hw.sample_period; 1100 event->hw.last_period = event->hw.sample_period;
1101 atomic64_set(&counter->hw.period_left, counter->hw.last_period); 1101 atomic64_set(&event->hw.period_left, event->hw.last_period);
1102 1102
1103 /* 1103 /*
1104 * See if we need to reserve the PMU. 1104 * See if we need to reserve the PMU.
1105 * If no counters are currently in use, then we have to take a 1105 * If no events are currently in use, then we have to take a
1106 * mutex to ensure that we don't race with another task doing 1106 * mutex to ensure that we don't race with another task doing
1107 * reserve_pmc_hardware or release_pmc_hardware. 1107 * reserve_pmc_hardware or release_pmc_hardware.
1108 */ 1108 */
1109 err = 0; 1109 err = 0;
1110 if (!atomic_inc_not_zero(&num_counters)) { 1110 if (!atomic_inc_not_zero(&num_events)) {
1111 mutex_lock(&pmc_reserve_mutex); 1111 mutex_lock(&pmc_reserve_mutex);
1112 if (atomic_read(&num_counters) == 0 && 1112 if (atomic_read(&num_events) == 0 &&
1113 reserve_pmc_hardware(perf_counter_interrupt)) 1113 reserve_pmc_hardware(perf_event_interrupt))
1114 err = -EBUSY; 1114 err = -EBUSY;
1115 else 1115 else
1116 atomic_inc(&num_counters); 1116 atomic_inc(&num_events);
1117 mutex_unlock(&pmc_reserve_mutex); 1117 mutex_unlock(&pmc_reserve_mutex);
1118 } 1118 }
1119 counter->destroy = hw_perf_counter_destroy; 1119 event->destroy = hw_perf_event_destroy;
1120 1120
1121 if (err) 1121 if (err)
1122 return ERR_PTR(err); 1122 return ERR_PTR(err);
@@ -1128,24 +1128,24 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
1128 * things if requested. Note that interrupts are hard-disabled 1128 * things if requested. Note that interrupts are hard-disabled
1129 * here so there is no possibility of being interrupted. 1129 * here so there is no possibility of being interrupted.
1130 */ 1130 */
1131static void record_and_restart(struct perf_counter *counter, unsigned long val, 1131static void record_and_restart(struct perf_event *event, unsigned long val,
1132 struct pt_regs *regs, int nmi) 1132 struct pt_regs *regs, int nmi)
1133{ 1133{
1134 u64 period = counter->hw.sample_period; 1134 u64 period = event->hw.sample_period;
1135 s64 prev, delta, left; 1135 s64 prev, delta, left;
1136 int record = 0; 1136 int record = 0;
1137 1137
1138 /* we don't have to worry about interrupts here */ 1138 /* we don't have to worry about interrupts here */
1139 prev = atomic64_read(&counter->hw.prev_count); 1139 prev = atomic64_read(&event->hw.prev_count);
1140 delta = (val - prev) & 0xfffffffful; 1140 delta = (val - prev) & 0xfffffffful;
1141 atomic64_add(delta, &counter->count); 1141 atomic64_add(delta, &event->count);
1142 1142
1143 /* 1143 /*
1144 * See if the total period for this counter has expired, 1144 * See if the total period for this event has expired,
1145 * and update for the next period. 1145 * and update for the next period.
1146 */ 1146 */
1147 val = 0; 1147 val = 0;
1148 left = atomic64_read(&counter->hw.period_left) - delta; 1148 left = atomic64_read(&event->hw.period_left) - delta;
1149 if (period) { 1149 if (period) {
1150 if (left <= 0) { 1150 if (left <= 0) {
1151 left += period; 1151 left += period;
@@ -1163,18 +1163,18 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
1163 if (record) { 1163 if (record) {
1164 struct perf_sample_data data = { 1164 struct perf_sample_data data = {
1165 .addr = 0, 1165 .addr = 0,
1166 .period = counter->hw.last_period, 1166 .period = event->hw.last_period,
1167 }; 1167 };
1168 1168
1169 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) 1169 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1170 perf_get_data_addr(regs, &data.addr); 1170 perf_get_data_addr(regs, &data.addr);
1171 1171
1172 if (perf_counter_overflow(counter, nmi, &data, regs)) { 1172 if (perf_event_overflow(event, nmi, &data, regs)) {
1173 /* 1173 /*
1174 * Interrupts are coming too fast - throttle them 1174 * Interrupts are coming too fast - throttle them
1175 * by setting the counter to 0, so it will be 1175 * by setting the event to 0, so it will be
1176 * at least 2^30 cycles until the next interrupt 1176 * at least 2^30 cycles until the next interrupt
1177 * (assuming each counter counts at most 2 counts 1177 * (assuming each event counts at most 2 counts
1178 * per cycle). 1178 * per cycle).
1179 */ 1179 */
1180 val = 0; 1180 val = 0;
@@ -1182,15 +1182,15 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
1182 } 1182 }
1183 } 1183 }
1184 1184
1185 write_pmc(counter->hw.idx, val); 1185 write_pmc(event->hw.idx, val);
1186 atomic64_set(&counter->hw.prev_count, val); 1186 atomic64_set(&event->hw.prev_count, val);
1187 atomic64_set(&counter->hw.period_left, left); 1187 atomic64_set(&event->hw.period_left, left);
1188 perf_counter_update_userpage(counter); 1188 perf_event_update_userpage(event);
1189} 1189}
1190 1190
1191/* 1191/*
1192 * Called from generic code to get the misc flags (i.e. processor mode) 1192 * Called from generic code to get the misc flags (i.e. processor mode)
1193 * for an event. 1193 * for an event_id.
1194 */ 1194 */
1195unsigned long perf_misc_flags(struct pt_regs *regs) 1195unsigned long perf_misc_flags(struct pt_regs *regs)
1196{ 1196{
@@ -1198,13 +1198,13 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1198 1198
1199 if (flags) 1199 if (flags)
1200 return flags; 1200 return flags;
1201 return user_mode(regs) ? PERF_EVENT_MISC_USER : 1201 return user_mode(regs) ? PERF_RECORD_MISC_USER :
1202 PERF_EVENT_MISC_KERNEL; 1202 PERF_RECORD_MISC_KERNEL;
1203} 1203}
1204 1204
1205/* 1205/*
1206 * Called from generic code to get the instruction pointer 1206 * Called from generic code to get the instruction pointer
1207 * for an event. 1207 * for an event_id.
1208 */ 1208 */
1209unsigned long perf_instruction_pointer(struct pt_regs *regs) 1209unsigned long perf_instruction_pointer(struct pt_regs *regs)
1210{ 1210{
@@ -1220,17 +1220,17 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
1220/* 1220/*
1221 * Performance monitor interrupt stuff 1221 * Performance monitor interrupt stuff
1222 */ 1222 */
1223static void perf_counter_interrupt(struct pt_regs *regs) 1223static void perf_event_interrupt(struct pt_regs *regs)
1224{ 1224{
1225 int i; 1225 int i;
1226 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 1226 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1227 struct perf_counter *counter; 1227 struct perf_event *event;
1228 unsigned long val; 1228 unsigned long val;
1229 int found = 0; 1229 int found = 0;
1230 int nmi; 1230 int nmi;
1231 1231
1232 if (cpuhw->n_limited) 1232 if (cpuhw->n_limited)
1233 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), 1233 freeze_limited_events(cpuhw, mfspr(SPRN_PMC5),
1234 mfspr(SPRN_PMC6)); 1234 mfspr(SPRN_PMC6));
1235 1235
1236 perf_read_regs(regs); 1236 perf_read_regs(regs);
@@ -1241,26 +1241,26 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1241 else 1241 else
1242 irq_enter(); 1242 irq_enter();
1243 1243
1244 for (i = 0; i < cpuhw->n_counters; ++i) { 1244 for (i = 0; i < cpuhw->n_events; ++i) {
1245 counter = cpuhw->counter[i]; 1245 event = cpuhw->event[i];
1246 if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) 1246 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1247 continue; 1247 continue;
1248 val = read_pmc(counter->hw.idx); 1248 val = read_pmc(event->hw.idx);
1249 if ((int)val < 0) { 1249 if ((int)val < 0) {
1250 /* counter has overflowed */ 1250 /* event has overflowed */
1251 found = 1; 1251 found = 1;
1252 record_and_restart(counter, val, regs, nmi); 1252 record_and_restart(event, val, regs, nmi);
1253 } 1253 }
1254 } 1254 }
1255 1255
1256 /* 1256 /*
1257 * In case we didn't find and reset the counter that caused 1257 * In case we didn't find and reset the event that caused
1258 * the interrupt, scan all counters and reset any that are 1258 * the interrupt, scan all events and reset any that are
1259 * negative, to avoid getting continual interrupts. 1259 * negative, to avoid getting continual interrupts.
1260 * Any that we processed in the previous loop will not be negative. 1260 * Any that we processed in the previous loop will not be negative.
1261 */ 1261 */
1262 if (!found) { 1262 if (!found) {
1263 for (i = 0; i < ppmu->n_counter; ++i) { 1263 for (i = 0; i < ppmu->n_event; ++i) {
1264 if (is_limited_pmc(i + 1)) 1264 if (is_limited_pmc(i + 1))
1265 continue; 1265 continue;
1266 val = read_pmc(i + 1); 1266 val = read_pmc(i + 1);
@@ -1273,7 +1273,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1273 * Reset MMCR0 to its normal value. This will set PMXE and 1273 * Reset MMCR0 to its normal value. This will set PMXE and
1274 * clear FC (freeze counters) and PMAO (perf mon alert occurred) 1274 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1275 * and thus allow interrupts to occur again. 1275 * and thus allow interrupts to occur again.
1276 * XXX might want to use MSR.PM to keep the counters frozen until 1276 * XXX might want to use MSR.PM to keep the events frozen until
1277 * we get back out of this interrupt. 1277 * we get back out of this interrupt.
1278 */ 1278 */
1279 write_mmcr0(cpuhw, cpuhw->mmcr[0]); 1279 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
@@ -1284,9 +1284,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
1284 irq_exit(); 1284 irq_exit();
1285} 1285}
1286 1286
1287void hw_perf_counter_setup(int cpu) 1287void hw_perf_event_setup(int cpu)
1288{ 1288{
1289 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); 1289 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1290 1290
1291 if (!ppmu) 1291 if (!ppmu)
1292 return; 1292 return;
@@ -1308,7 +1308,7 @@ int register_power_pmu(struct power_pmu *pmu)
1308 * Use FCHV to ignore kernel events if MSR.HV is set. 1308 * Use FCHV to ignore kernel events if MSR.HV is set.
1309 */ 1309 */
1310 if (mfmsr() & MSR_HV) 1310 if (mfmsr() & MSR_HV)
1311 freeze_counters_kernel = MMCR0_FCHV; 1311 freeze_events_kernel = MMCR0_FCHV;
1312#endif /* CONFIG_PPC64 */ 1312#endif /* CONFIG_PPC64 */
1313 1313
1314 return 0; 1314 return 0;
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 3c90a3d9173e..2a361cdda635 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 31918af3e355..0f4c1c73a6ad 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 867f6f663963..c351b3a57fbb 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index fa21890531da..ca399ba5034c 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 018d094d92f9..28a4daacdc02 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <asm/reg.h> 14#include <asm/reg.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 75dccb71a043..479574413a93 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/perf_counter.h> 12#include <linux/perf_event.h>
13#include <asm/reg.h> 13#include <asm/reg.h>
14#include <asm/cputable.h> 14#include <asm/cputable.h>
15 15
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 465e498bcb33..df45a7449a66 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
53#include <linux/posix-timers.h> 53#include <linux/posix-timers.h>
54#include <linux/irq.h> 54#include <linux/irq.h>
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/perf_counter.h> 56#include <linux/perf_event.h>
57 57
58#include <asm/io.h> 58#include <asm/io.h>
59#include <asm/processor.h> 59#include <asm/processor.h>
@@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void)
527} 527}
528#endif /* CONFIG_PPC_ISERIES */ 528#endif /* CONFIG_PPC_ISERIES */
529 529
530#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32) 530#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
531DEFINE_PER_CPU(u8, perf_counter_pending); 531DEFINE_PER_CPU(u8, perf_event_pending);
532 532
533void set_perf_counter_pending(void) 533void set_perf_event_pending(void)
534{ 534{
535 get_cpu_var(perf_counter_pending) = 1; 535 get_cpu_var(perf_event_pending) = 1;
536 set_dec(1); 536 set_dec(1);
537 put_cpu_var(perf_counter_pending); 537 put_cpu_var(perf_event_pending);
538} 538}
539 539
540#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending) 540#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
541#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0 541#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
542 542
543#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 543#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
544 544
545#define test_perf_counter_pending() 0 545#define test_perf_event_pending() 0
546#define clear_perf_counter_pending() 546#define clear_perf_event_pending()
547 547
548#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 548#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
549 549
550/* 550/*
551 * For iSeries shared processors, we have to let the hypervisor 551 * For iSeries shared processors, we have to let the hypervisor
@@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs)
573 set_dec(DECREMENTER_MAX); 573 set_dec(DECREMENTER_MAX);
574 574
575#ifdef CONFIG_PPC32 575#ifdef CONFIG_PPC32
576 if (test_perf_counter_pending()) { 576 if (test_perf_event_pending()) {
577 clear_perf_counter_pending(); 577 clear_perf_event_pending();
578 perf_counter_do_pending(); 578 perf_event_do_pending();
579 } 579 }
580 if (atomic_read(&ppc_n_lost_interrupts) != 0) 580 if (atomic_read(&ppc_n_lost_interrupts) != 0)
581 do_IRQ(regs); 581 do_IRQ(regs);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 830bef0a1131..e7dae82c1285 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -29,7 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/kdebug.h> 31#include <linux/kdebug.h>
32#include <linux/perf_counter.h> 32#include <linux/perf_event.h>
33 33
34#include <asm/firmware.h> 34#include <asm/firmware.h>
35#include <asm/page.h> 35#include <asm/page.h>
@@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
171 die("Weird page fault", regs, SIGSEGV); 171 die("Weird page fault", regs, SIGSEGV);
172 } 172 }
173 173
174 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 174 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
175 175
176 /* When running in the kernel we expect faults to occur only to 176 /* When running in the kernel we expect faults to occur only to
177 * addresses in user space. All other faults represent errors in the 177 * addresses in user space. All other faults represent errors in the
@@ -312,7 +312,7 @@ good_area:
312 } 312 }
313 if (ret & VM_FAULT_MAJOR) { 313 if (ret & VM_FAULT_MAJOR) {
314 current->maj_flt++; 314 current->maj_flt++;
315 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 315 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
316 regs, address); 316 regs, address);
317#ifdef CONFIG_PPC_SMLPAR 317#ifdef CONFIG_PPC_SMLPAR
318 if (firmware_has_feature(FW_FEATURE_CMO)) { 318 if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -323,7 +323,7 @@ good_area:
323#endif 323#endif
324 } else { 324 } else {
325 current->min_flt++; 325 current->min_flt++;
326 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 326 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
327 regs, address); 327 regs, address);
328 } 328 }
329 up_read(&mm->mmap_sem); 329 up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 9efc8bda01b4..e382cae678b8 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT
280 280
281config PPC_PERF_CTRS 281config PPC_PERF_CTRS
282 def_bool y 282 def_bool y
283 depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT 283 depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
284 help 284 help
285 This enables the powerpc-specific perf_counter back-end. 285 This enables the powerpc-specific perf_event back-end.
286 286
287config SMP 287config SMP
288 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE 288 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1c866efd217d..43c0acad7160 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -94,7 +94,7 @@ config S390
94 select HAVE_KVM if 64BIT 94 select HAVE_KVM if 64BIT
95 select HAVE_ARCH_TRACEHOOK 95 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 96 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_COUNTERS 97 select HAVE_PERF_EVENTS
98 98
99config SCHED_OMIT_FRAME_POINTER 99config SCHED_OMIT_FRAME_POINTER
100 bool 100 bool
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
deleted file mode 100644
index 7015188c2cc2..000000000000
--- a/arch/s390/include/asm/perf_counter.h
+++ /dev/null
@@ -1,10 +0,0 @@
1/*
2 * Performance counter support - s390 specific definitions.
3 *
4 * Copyright 2009 Martin Schwidefsky, IBM Corporation.
5 */
6
7static inline void set_perf_counter_pending(void) {}
8static inline void clear_perf_counter_pending(void) {}
9
10#define PERF_COUNTER_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
new file mode 100644
index 000000000000..3840cbe77637
--- /dev/null
+++ b/arch/s390/include/asm/perf_event.h
@@ -0,0 +1,10 @@
1/*
2 * Performance event support - s390 specific definitions.
3 *
4 * Copyright 2009 Martin Schwidefsky, IBM Corporation.
5 */
6
7static inline void set_perf_event_pending(void) {}
8static inline void clear_perf_event_pending(void) {}
9
10#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index c80602d7c880..cb5232df151e 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -268,7 +268,7 @@
268#define __NR_preadv 328 268#define __NR_preadv 328
269#define __NR_pwritev 329 269#define __NR_pwritev 329
270#define __NR_rt_tgsigqueueinfo 330 270#define __NR_rt_tgsigqueueinfo 330
271#define __NR_perf_counter_open 331 271#define __NR_perf_event_open 331
272#define NR_syscalls 332 272#define NR_syscalls 332
273 273
274/* 274/*
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 88a83366819f..624790042d41 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1832,11 +1832,11 @@ compat_sys_rt_tgsigqueueinfo_wrapper:
1832 llgtr %r5,%r5 # struct compat_siginfo * 1832 llgtr %r5,%r5 # struct compat_siginfo *
1833 jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call 1833 jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
1834 1834
1835 .globl sys_perf_counter_open_wrapper 1835 .globl sys_perf_event_open_wrapper
1836sys_perf_counter_open_wrapper: 1836sys_perf_event_open_wrapper:
1837 llgtr %r2,%r2 # const struct perf_counter_attr * 1837 llgtr %r2,%r2 # const struct perf_event_attr *
1838 lgfr %r3,%r3 # pid_t 1838 lgfr %r3,%r3 # pid_t
1839 lgfr %r4,%r4 # int 1839 lgfr %r4,%r4 # int
1840 lgfr %r5,%r5 # int 1840 lgfr %r5,%r5 # int
1841 llgfr %r6,%r6 # unsigned long 1841 llgfr %r6,%r6 # unsigned long
1842 jg sys_perf_counter_open # branch to system call 1842 jg sys_perf_event_open # branch to system call
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index ad1acd200385..0b5083681e77 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
339SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) 339SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
340SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) 340SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
341SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ 341SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
342SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper) 342SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1abbadd497e1..6d507462967a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,7 +10,7 @@
10 * Copyright (C) 1995 Linus Torvalds 10 * Copyright (C) 1995 Linus Torvalds
11 */ 11 */
12 12
13#include <linux/perf_counter.h> 13#include <linux/perf_event.h>
14#include <linux/signal.h> 14#include <linux/signal.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
306 * interrupts again and then search the VMAs 306 * interrupts again and then search the VMAs
307 */ 307 */
308 local_irq_enable(); 308 local_irq_enable();
309 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
310 down_read(&mm->mmap_sem); 310 down_read(&mm->mmap_sem);
311 311
312 si_code = SEGV_MAPERR; 312 si_code = SEGV_MAPERR;
@@ -366,11 +366,11 @@ good_area:
366 } 366 }
367 if (fault & VM_FAULT_MAJOR) { 367 if (fault & VM_FAULT_MAJOR) {
368 tsk->maj_flt++; 368 tsk->maj_flt++;
369 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 369 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
370 regs, address); 370 regs, address);
371 } else { 371 } else {
372 tsk->min_flt++; 372 tsk->min_flt++;
373 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address); 374 regs, address);
375 } 375 }
376 up_read(&mm->mmap_sem); 376 up_read(&mm->mmap_sem);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 4df3570fe511..b940424f8ccc 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,7 +16,7 @@ config SUPERH
16 select HAVE_IOREMAP_PROT if MMU 16 select HAVE_IOREMAP_PROT if MMU
17 select HAVE_ARCH_TRACEHOOK 17 select HAVE_ARCH_TRACEHOOK
18 select HAVE_DMA_API_DEBUG 18 select HAVE_DMA_API_DEBUG
19 select HAVE_PERF_COUNTERS 19 select HAVE_PERF_EVENTS
20 select HAVE_KERNEL_GZIP 20 select HAVE_KERNEL_GZIP
21 select HAVE_KERNEL_BZIP2 21 select HAVE_KERNEL_BZIP2
22 select HAVE_KERNEL_LZMA 22 select HAVE_KERNEL_LZMA
diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h
deleted file mode 100644
index d8e6bb9c0ccc..000000000000
--- a/arch/sh/include/asm/perf_counter.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_SH_PERF_COUNTER_H
2#define __ASM_SH_PERF_COUNTER_H
3
4/* SH only supports software counters through this interface. */
5static inline void set_perf_counter_pending(void) {}
6
7#define PERF_COUNTER_INDEX_OFFSET 0
8
9#endif /* __ASM_SH_PERF_COUNTER_H */
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
new file mode 100644
index 000000000000..11a302297ab7
--- /dev/null
+++ b/arch/sh/include/asm/perf_event.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_SH_PERF_EVENT_H
2#define __ASM_SH_PERF_EVENT_H
3
4/* SH only supports software events through this interface. */
5static inline void set_perf_event_pending(void) {}
6
7#define PERF_EVENT_INDEX_OFFSET 0
8
9#endif /* __ASM_SH_PERF_EVENT_H */
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 925dd40d9d55..f3fd1b9eb6b1 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -344,7 +344,7 @@
344#define __NR_preadv 333 344#define __NR_preadv 333
345#define __NR_pwritev 334 345#define __NR_pwritev 334
346#define __NR_rt_tgsigqueueinfo 335 346#define __NR_rt_tgsigqueueinfo 335
347#define __NR_perf_counter_open 336 347#define __NR_perf_event_open 336
348 348
349#define NR_syscalls 337 349#define NR_syscalls 337
350 350
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 2b84bc916bc5..343ce8f073ea 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -384,7 +384,7 @@
384#define __NR_preadv 361 384#define __NR_preadv 361
385#define __NR_pwritev 362 385#define __NR_pwritev 362
386#define __NR_rt_tgsigqueueinfo 363 386#define __NR_rt_tgsigqueueinfo 363
387#define __NR_perf_counter_open 364 387#define __NR_perf_event_open 364
388 388
389#ifdef __KERNEL__ 389#ifdef __KERNEL__
390 390
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 16ba225ede89..19fd11dd9871 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -352,4 +352,4 @@ ENTRY(sys_call_table)
352 .long sys_preadv 352 .long sys_preadv
353 .long sys_pwritev 353 .long sys_pwritev
354 .long sys_rt_tgsigqueueinfo /* 335 */ 354 .long sys_rt_tgsigqueueinfo /* 335 */
355 .long sys_perf_counter_open 355 .long sys_perf_event_open
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index af6fb7410c21..5bfde6c77498 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -390,4 +390,4 @@ sys_call_table:
390 .long sys_preadv 390 .long sys_preadv
391 .long sys_pwritev 391 .long sys_pwritev
392 .long sys_rt_tgsigqueueinfo 392 .long sys_rt_tgsigqueueinfo
393 .long sys_perf_counter_open 393 .long sys_perf_event_open
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 781b413ff82d..47530104e0ad 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,7 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/perf_counter.h> 18#include <linux/perf_event.h>
19#include <asm/io_trapped.h> 19#include <asm/io_trapped.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
@@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
157 if ((regs->sr & SR_IMASK) != SR_IMASK) 157 if ((regs->sr & SR_IMASK) != SR_IMASK)
158 local_irq_enable(); 158 local_irq_enable();
159 159
160 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 160 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
161 161
162 /* 162 /*
163 * If we're in an interrupt, have no user context or are running 163 * If we're in an interrupt, have no user context or are running
@@ -208,11 +208,11 @@ survive:
208 } 208 }
209 if (fault & VM_FAULT_MAJOR) { 209 if (fault & VM_FAULT_MAJOR) {
210 tsk->maj_flt++; 210 tsk->maj_flt++;
211 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 211 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
212 regs, address); 212 regs, address);
213 } else { 213 } else {
214 tsk->min_flt++; 214 tsk->min_flt++;
215 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 215 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
216 regs, address); 216 regs, address);
217 } 217 }
218 218
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 2dcc48528f7a..de0b0e881823 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -20,7 +20,7 @@
20#include <linux/mman.h> 20#include <linux/mman.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/perf_counter.h> 23#include <linux/perf_event.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/io.h> 26#include <asm/io.h>
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
116 /* Not an IO address, so reenable interrupts */ 116 /* Not an IO address, so reenable interrupts */
117 local_irq_enable(); 117 local_irq_enable();
118 118
119 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
120 120
121 /* 121 /*
122 * If we're in an interrupt or have no user 122 * If we're in an interrupt or have no user
@@ -201,11 +201,11 @@ survive:
201 201
202 if (fault & VM_FAULT_MAJOR) { 202 if (fault & VM_FAULT_MAJOR) {
203 tsk->maj_flt++; 203 tsk->maj_flt++;
204 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 204 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
205 regs, address); 205 regs, address);
206 } else { 206 } else {
207 tsk->min_flt++; 207 tsk->min_flt++;
208 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 208 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
209 regs, address); 209 regs, address);
210 } 210 }
211 211
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 86b82348b97c..97fca4695e0b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,7 +25,7 @@ config SPARC
25 select ARCH_WANT_OPTIONAL_GPIOLIB 25 select ARCH_WANT_OPTIONAL_GPIOLIB
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_COUNTERS 28 select HAVE_PERF_EVENTS
29 select HAVE_DMA_ATTRS 29 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG 30 select HAVE_DMA_API_DEBUG
31 31
@@ -47,7 +47,7 @@ config SPARC64
47 select RTC_DRV_BQ4802 47 select RTC_DRV_BQ4802
48 select RTC_DRV_SUN4V 48 select RTC_DRV_SUN4V
49 select RTC_DRV_STARFIRE 49 select RTC_DRV_STARFIRE
50 select HAVE_PERF_COUNTERS 50 select HAVE_PERF_EVENTS
51 51
52config ARCH_DEFCONFIG 52config ARCH_DEFCONFIG
53 string 53 string
diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h
deleted file mode 100644
index 5d7a8ca0e491..000000000000
--- a/arch/sparc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __ASM_SPARC_PERF_COUNTER_H
2#define __ASM_SPARC_PERF_COUNTER_H
3
4extern void set_perf_counter_pending(void);
5
6#define PERF_COUNTER_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_COUNTERS
9extern void init_hw_perf_counters(void);
10#else
11static inline void init_hw_perf_counters(void) { }
12#endif
13
14#endif
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
new file mode 100644
index 000000000000..7e2669894ce8
--- /dev/null
+++ b/arch/sparc/include/asm/perf_event.h
@@ -0,0 +1,14 @@
1#ifndef __ASM_SPARC_PERF_EVENT_H
2#define __ASM_SPARC_PERF_EVENT_H
3
4extern void set_perf_event_pending(void);
5
6#define PERF_EVENT_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_EVENTS
9extern void init_hw_perf_events(void);
10#else
11static inline void init_hw_perf_events(void) { }
12#endif
13
14#endif
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 706df669f3b8..42f2316c3eaa 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -395,7 +395,7 @@
395#define __NR_preadv 324 395#define __NR_preadv 324
396#define __NR_pwritev 325 396#define __NR_pwritev 325
397#define __NR_rt_tgsigqueueinfo 326 397#define __NR_rt_tgsigqueueinfo 326
398#define __NR_perf_counter_open 327 398#define __NR_perf_event_open 327
399 399
400#define NR_SYSCALLS 328 400#define NR_SYSCALLS 328
401 401
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 247cc620cee5..3a048fad7ee2 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT) += audit.o
104audit--$(CONFIG_AUDIT) := compat_audit.o 104audit--$(CONFIG_AUDIT) := compat_audit.o
105obj-$(CONFIG_COMPAT) += $(audit--y) 105obj-$(CONFIG_COMPAT) += $(audit--y)
106 106
107pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o 107pc--$(CONFIG_PERF_EVENTS) := perf_event.o
108obj-$(CONFIG_SPARC64) += $(pc--y) 108obj-$(CONFIG_SPARC64) += $(pc--y)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 378eb53e0776..b129611590a4 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,7 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21 21
22#include <asm/perf_counter.h> 22#include <asm/perf_event.h>
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/local.h> 24#include <asm/local.h>
25#include <asm/pcr.h> 25#include <asm/pcr.h>
@@ -265,7 +265,7 @@ int __init nmi_init(void)
265 } 265 }
266 } 266 }
267 if (!err) 267 if (!err)
268 init_hw_perf_counters(); 268 init_hw_perf_events();
269 269
270 return err; 270 return err;
271} 271}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 68ff00107073..2d94e7a03af5 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_counter.h> 10#include <linux/perf_event.h>
11 11
12#include <asm/pil.h> 12#include <asm/pil.h>
13#include <asm/pcr.h> 13#include <asm/pcr.h>
@@ -15,7 +15,7 @@
15 15
16/* This code is shared between various users of the performance 16/* This code is shared between various users of the performance
17 * counters. Users will be oprofile, pseudo-NMI watchdog, and the 17 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
18 * perf_counter support layer. 18 * perf_event support layer.
19 */ 19 */
20 20
21#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) 21#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
@@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
42 42
43 old_regs = set_irq_regs(regs); 43 old_regs = set_irq_regs(regs);
44 irq_enter(); 44 irq_enter();
45#ifdef CONFIG_PERF_COUNTERS 45#ifdef CONFIG_PERF_EVENTS
46 perf_counter_do_pending(); 46 perf_event_do_pending();
47#endif 47#endif
48 irq_exit(); 48 irq_exit();
49 set_irq_regs(old_regs); 49 set_irq_regs(old_regs);
50} 50}
51 51
52void set_perf_counter_pending(void) 52void set_perf_event_pending(void)
53{ 53{
54 set_softint(1 << PIL_DEFERRED_PCR_WORK); 54 set_softint(1 << PIL_DEFERRED_PCR_WORK);
55} 55}
diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_event.c
index b1265ce8a053..2d6a1b10c81d 100644
--- a/arch/sparc/kernel/perf_counter.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,8 +1,8 @@
1/* Performance counter support for sparc64. 1/* Performance event support for sparc64.
2 * 2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 * 4 *
5 * This code is based almost entirely upon the x86 perf counter 5 * This code is based almost entirely upon the x86 perf event
6 * code, which is: 6 * code, which is:
7 * 7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
@@ -12,7 +12,7 @@
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */ 13 */
14 14
15#include <linux/perf_counter.h> 15#include <linux/perf_event.h>
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/kdebug.h> 18#include <linux/kdebug.h>
@@ -46,19 +46,19 @@
46 * normal code. 46 * normal code.
47 */ 47 */
48 48
49#define MAX_HWCOUNTERS 2 49#define MAX_HWEVENTS 2
50#define MAX_PERIOD ((1UL << 32) - 1) 50#define MAX_PERIOD ((1UL << 32) - 1)
51 51
52#define PIC_UPPER_INDEX 0 52#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1 53#define PIC_LOWER_INDEX 1
54 54
55struct cpu_hw_counters { 55struct cpu_hw_events {
56 struct perf_counter *counters[MAX_HWCOUNTERS]; 56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; 57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; 58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59 int enabled; 59 int enabled;
60}; 60};
61DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; 61DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62 62
63struct perf_event_map { 63struct perf_event_map {
64 u16 encoding; 64 u16 encoding;
@@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = {
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, 87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88}; 88};
89 89
90static const struct perf_event_map *ultra3i_event_map(int event) 90static const struct perf_event_map *ultra3i_event_map(int event_id)
91{ 91{
92 return &ultra3i_perfmon_event_map[event]; 92 return &ultra3i_perfmon_event_map[event_id];
93} 93}
94 94
95static const struct sparc_pmu ultra3i_pmu = { 95static const struct sparc_pmu ultra3i_pmu = {
@@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = {
111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, 111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
112}; 112};
113 113
114static const struct perf_event_map *niagara2_event_map(int event) 114static const struct perf_event_map *niagara2_event_map(int event_id)
115{ 115{
116 return &niagara2_perfmon_event_map[event]; 116 return &niagara2_perfmon_event_map[event_id];
117} 117}
118 118
119static const struct sparc_pmu niagara2_pmu = { 119static const struct sparc_pmu niagara2_pmu = {
@@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = {
130 130
131static const struct sparc_pmu *sparc_pmu __read_mostly; 131static const struct sparc_pmu *sparc_pmu __read_mostly;
132 132
133static u64 event_encoding(u64 event, int idx) 133static u64 event_encoding(u64 event_id, int idx)
134{ 134{
135 if (idx == PIC_UPPER_INDEX) 135 if (idx == PIC_UPPER_INDEX)
136 event <<= sparc_pmu->upper_shift; 136 event_id <<= sparc_pmu->upper_shift;
137 else 137 else
138 event <<= sparc_pmu->lower_shift; 138 event_id <<= sparc_pmu->lower_shift;
139 return event; 139 return event_id;
140} 140}
141 141
142static u64 mask_for_index(int idx) 142static u64 mask_for_index(int idx)
@@ -151,7 +151,7 @@ static u64 nop_for_index(int idx)
151 sparc_pmu->lower_nop, idx); 151 sparc_pmu->lower_nop, idx);
152} 152}
153 153
154static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, 154static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
155 int idx) 155 int idx)
156{ 156{
157 u64 val, mask = mask_for_index(idx); 157 u64 val, mask = mask_for_index(idx);
@@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
160 pcr_ops->write((val & ~mask) | hwc->config); 160 pcr_ops->write((val & ~mask) | hwc->config);
161} 161}
162 162
163static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, 163static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
164 int idx) 164 int idx)
165{ 165{
166 u64 mask = mask_for_index(idx); 166 u64 mask = mask_for_index(idx);
@@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
172 172
173void hw_perf_enable(void) 173void hw_perf_enable(void)
174{ 174{
175 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 175 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
176 u64 val; 176 u64 val;
177 int i; 177 int i;
178 178
@@ -184,9 +184,9 @@ void hw_perf_enable(void)
184 184
185 val = pcr_ops->read(); 185 val = pcr_ops->read();
186 186
187 for (i = 0; i < MAX_HWCOUNTERS; i++) { 187 for (i = 0; i < MAX_HWEVENTS; i++) {
188 struct perf_counter *cp = cpuc->counters[i]; 188 struct perf_event *cp = cpuc->events[i];
189 struct hw_perf_counter *hwc; 189 struct hw_perf_event *hwc;
190 190
191 if (!cp) 191 if (!cp)
192 continue; 192 continue;
@@ -199,7 +199,7 @@ void hw_perf_enable(void)
199 199
200void hw_perf_disable(void) 200void hw_perf_disable(void)
201{ 201{
202 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 202 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
203 u64 val; 203 u64 val;
204 204
205 if (!cpuc->enabled) 205 if (!cpuc->enabled)
@@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val)
241 write_pic(pic); 241 write_pic(pic);
242} 242}
243 243
244static int sparc_perf_counter_set_period(struct perf_counter *counter, 244static int sparc_perf_event_set_period(struct perf_event *event,
245 struct hw_perf_counter *hwc, int idx) 245 struct hw_perf_event *hwc, int idx)
246{ 246{
247 s64 left = atomic64_read(&hwc->period_left); 247 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period; 248 s64 period = hwc->sample_period;
@@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter,
268 268
269 write_pmc(idx, (u64)(-left) & 0xffffffff); 269 write_pmc(idx, (u64)(-left) & 0xffffffff);
270 270
271 perf_counter_update_userpage(counter); 271 perf_event_update_userpage(event);
272 272
273 return ret; 273 return ret;
274} 274}
275 275
276static int sparc_pmu_enable(struct perf_counter *counter) 276static int sparc_pmu_enable(struct perf_event *event)
277{ 277{
278 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 278 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
279 struct hw_perf_counter *hwc = &counter->hw; 279 struct hw_perf_event *hwc = &event->hw;
280 int idx = hwc->idx; 280 int idx = hwc->idx;
281 281
282 if (test_and_set_bit(idx, cpuc->used_mask)) 282 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN; 283 return -EAGAIN;
284 284
285 sparc_pmu_disable_counter(hwc, idx); 285 sparc_pmu_disable_event(hwc, idx);
286 286
287 cpuc->counters[idx] = counter; 287 cpuc->events[idx] = event;
288 set_bit(idx, cpuc->active_mask); 288 set_bit(idx, cpuc->active_mask);
289 289
290 sparc_perf_counter_set_period(counter, hwc, idx); 290 sparc_perf_event_set_period(event, hwc, idx);
291 sparc_pmu_enable_counter(hwc, idx); 291 sparc_pmu_enable_event(hwc, idx);
292 perf_counter_update_userpage(counter); 292 perf_event_update_userpage(event);
293 return 0; 293 return 0;
294} 294}
295 295
296static u64 sparc_perf_counter_update(struct perf_counter *counter, 296static u64 sparc_perf_event_update(struct perf_event *event,
297 struct hw_perf_counter *hwc, int idx) 297 struct hw_perf_event *hwc, int idx)
298{ 298{
299 int shift = 64 - 32; 299 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count; 300 u64 prev_raw_count, new_raw_count;
@@ -311,79 +311,79 @@ again:
311 delta = (new_raw_count << shift) - (prev_raw_count << shift); 311 delta = (new_raw_count << shift) - (prev_raw_count << shift);
312 delta >>= shift; 312 delta >>= shift;
313 313
314 atomic64_add(delta, &counter->count); 314 atomic64_add(delta, &event->count);
315 atomic64_sub(delta, &hwc->period_left); 315 atomic64_sub(delta, &hwc->period_left);
316 316
317 return new_raw_count; 317 return new_raw_count;
318} 318}
319 319
320static void sparc_pmu_disable(struct perf_counter *counter) 320static void sparc_pmu_disable(struct perf_event *event)
321{ 321{
322 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 322 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
323 struct hw_perf_counter *hwc = &counter->hw; 323 struct hw_perf_event *hwc = &event->hw;
324 int idx = hwc->idx; 324 int idx = hwc->idx;
325 325
326 clear_bit(idx, cpuc->active_mask); 326 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_counter(hwc, idx); 327 sparc_pmu_disable_event(hwc, idx);
328 328
329 barrier(); 329 barrier();
330 330
331 sparc_perf_counter_update(counter, hwc, idx); 331 sparc_perf_event_update(event, hwc, idx);
332 cpuc->counters[idx] = NULL; 332 cpuc->events[idx] = NULL;
333 clear_bit(idx, cpuc->used_mask); 333 clear_bit(idx, cpuc->used_mask);
334 334
335 perf_counter_update_userpage(counter); 335 perf_event_update_userpage(event);
336} 336}
337 337
338static void sparc_pmu_read(struct perf_counter *counter) 338static void sparc_pmu_read(struct perf_event *event)
339{ 339{
340 struct hw_perf_counter *hwc = &counter->hw; 340 struct hw_perf_event *hwc = &event->hw;
341 sparc_perf_counter_update(counter, hwc, hwc->idx); 341 sparc_perf_event_update(event, hwc, hwc->idx);
342} 342}
343 343
344static void sparc_pmu_unthrottle(struct perf_counter *counter) 344static void sparc_pmu_unthrottle(struct perf_event *event)
345{ 345{
346 struct hw_perf_counter *hwc = &counter->hw; 346 struct hw_perf_event *hwc = &event->hw;
347 sparc_pmu_enable_counter(hwc, hwc->idx); 347 sparc_pmu_enable_event(hwc, hwc->idx);
348} 348}
349 349
350static atomic_t active_counters = ATOMIC_INIT(0); 350static atomic_t active_events = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex); 351static DEFINE_MUTEX(pmc_grab_mutex);
352 352
353void perf_counter_grab_pmc(void) 353void perf_event_grab_pmc(void)
354{ 354{
355 if (atomic_inc_not_zero(&active_counters)) 355 if (atomic_inc_not_zero(&active_events))
356 return; 356 return;
357 357
358 mutex_lock(&pmc_grab_mutex); 358 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_counters) == 0) { 359 if (atomic_read(&active_events) == 0) {
360 if (atomic_read(&nmi_active) > 0) { 360 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1); 361 on_each_cpu(stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0); 362 BUG_ON(atomic_read(&nmi_active) != 0);
363 } 363 }
364 atomic_inc(&active_counters); 364 atomic_inc(&active_events);
365 } 365 }
366 mutex_unlock(&pmc_grab_mutex); 366 mutex_unlock(&pmc_grab_mutex);
367} 367}
368 368
369void perf_counter_release_pmc(void) 369void perf_event_release_pmc(void)
370{ 370{
371 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { 371 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
372 if (atomic_read(&nmi_active) == 0) 372 if (atomic_read(&nmi_active) == 0)
373 on_each_cpu(start_nmi_watchdog, NULL, 1); 373 on_each_cpu(start_nmi_watchdog, NULL, 1);
374 mutex_unlock(&pmc_grab_mutex); 374 mutex_unlock(&pmc_grab_mutex);
375 } 375 }
376} 376}
377 377
378static void hw_perf_counter_destroy(struct perf_counter *counter) 378static void hw_perf_event_destroy(struct perf_event *event)
379{ 379{
380 perf_counter_release_pmc(); 380 perf_event_release_pmc();
381} 381}
382 382
383static int __hw_perf_counter_init(struct perf_counter *counter) 383static int __hw_perf_event_init(struct perf_event *event)
384{ 384{
385 struct perf_counter_attr *attr = &counter->attr; 385 struct perf_event_attr *attr = &event->attr;
386 struct hw_perf_counter *hwc = &counter->hw; 386 struct hw_perf_event *hwc = &event->hw;
387 const struct perf_event_map *pmap; 387 const struct perf_event_map *pmap;
388 u64 enc; 388 u64 enc;
389 389
@@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
396 if (attr->config >= sparc_pmu->max_events) 396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL; 397 return -EINVAL;
398 398
399 perf_counter_grab_pmc(); 399 perf_event_grab_pmc();
400 counter->destroy = hw_perf_counter_destroy; 400 event->destroy = hw_perf_event_destroy;
401 401
402 /* We save the enable bits in the config_base. So to 402 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable 403 * turn off sampling just write 'config', and to enable
@@ -439,16 +439,16 @@ static const struct pmu pmu = {
439 .unthrottle = sparc_pmu_unthrottle, 439 .unthrottle = sparc_pmu_unthrottle,
440}; 440};
441 441
442const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 442const struct pmu *hw_perf_event_init(struct perf_event *event)
443{ 443{
444 int err = __hw_perf_counter_init(counter); 444 int err = __hw_perf_event_init(event);
445 445
446 if (err) 446 if (err)
447 return ERR_PTR(err); 447 return ERR_PTR(err);
448 return &pmu; 448 return &pmu;
449} 449}
450 450
451void perf_counter_print_debug(void) 451void perf_event_print_debug(void)
452{ 452{
453 unsigned long flags; 453 unsigned long flags;
454 u64 pcr, pic; 454 u64 pcr, pic;
@@ -471,16 +471,16 @@ void perf_counter_print_debug(void)
471 local_irq_restore(flags); 471 local_irq_restore(flags);
472} 472}
473 473
474static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, 474static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args) 475 unsigned long cmd, void *__args)
476{ 476{
477 struct die_args *args = __args; 477 struct die_args *args = __args;
478 struct perf_sample_data data; 478 struct perf_sample_data data;
479 struct cpu_hw_counters *cpuc; 479 struct cpu_hw_events *cpuc;
480 struct pt_regs *regs; 480 struct pt_regs *regs;
481 int idx; 481 int idx;
482 482
483 if (!atomic_read(&active_counters)) 483 if (!atomic_read(&active_events))
484 return NOTIFY_DONE; 484 return NOTIFY_DONE;
485 485
486 switch (cmd) { 486 switch (cmd) {
@@ -495,32 +495,32 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
495 495
496 data.addr = 0; 496 data.addr = 0;
497 497
498 cpuc = &__get_cpu_var(cpu_hw_counters); 498 cpuc = &__get_cpu_var(cpu_hw_events);
499 for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { 499 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
500 struct perf_counter *counter = cpuc->counters[idx]; 500 struct perf_event *event = cpuc->events[idx];
501 struct hw_perf_counter *hwc; 501 struct hw_perf_event *hwc;
502 u64 val; 502 u64 val;
503 503
504 if (!test_bit(idx, cpuc->active_mask)) 504 if (!test_bit(idx, cpuc->active_mask))
505 continue; 505 continue;
506 hwc = &counter->hw; 506 hwc = &event->hw;
507 val = sparc_perf_counter_update(counter, hwc, idx); 507 val = sparc_perf_event_update(event, hwc, idx);
508 if (val & (1ULL << 31)) 508 if (val & (1ULL << 31))
509 continue; 509 continue;
510 510
511 data.period = counter->hw.last_period; 511 data.period = event->hw.last_period;
512 if (!sparc_perf_counter_set_period(counter, hwc, idx)) 512 if (!sparc_perf_event_set_period(event, hwc, idx))
513 continue; 513 continue;
514 514
515 if (perf_counter_overflow(counter, 1, &data, regs)) 515 if (perf_event_overflow(event, 1, &data, regs))
516 sparc_pmu_disable_counter(hwc, idx); 516 sparc_pmu_disable_event(hwc, idx);
517 } 517 }
518 518
519 return NOTIFY_STOP; 519 return NOTIFY_STOP;
520} 520}
521 521
522static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 522static __read_mostly struct notifier_block perf_event_nmi_notifier = {
523 .notifier_call = perf_counter_nmi_handler, 523 .notifier_call = perf_event_nmi_handler,
524}; 524};
525 525
526static bool __init supported_pmu(void) 526static bool __init supported_pmu(void)
@@ -536,9 +536,9 @@ static bool __init supported_pmu(void)
536 return false; 536 return false;
537} 537}
538 538
539void __init init_hw_perf_counters(void) 539void __init init_hw_perf_events(void)
540{ 540{
541 pr_info("Performance counters: "); 541 pr_info("Performance events: ");
542 542
543 if (!supported_pmu()) { 543 if (!supported_pmu()) {
544 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); 544 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
@@ -547,10 +547,10 @@ void __init init_hw_perf_counters(void)
547 547
548 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 548 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
549 549
550 /* All sparc64 PMUs currently have 2 counters. But this simple 550 /* All sparc64 PMUs currently have 2 events. But this simple
551 * driver only supports one active counter at a time. 551 * driver only supports one active event at a time.
552 */ 552 */
553 perf_max_counters = 1; 553 perf_max_events = 1;
554 554
555 register_die_notifier(&perf_counter_nmi_notifier); 555 register_die_notifier(&perf_event_nmi_notifier);
556} 556}
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 04181577cb65..0f1658d37490 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,5 @@ sys_call_table:
82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
86 86
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 91b06b7f7acf..009825f6e73c 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,7 @@ sys_call_table32:
83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
87 87
88#endif /* CONFIG_COMPAT */ 88#endif /* CONFIG_COMPAT */
89 89
@@ -158,4 +158,4 @@ sys_call_table:
158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
161 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open 161 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 51c59015b280..e4ff5d1280ca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,7 +24,7 @@ config X86
24 select HAVE_UNSTABLE_SCHED_CLOCK 24 select HAVE_UNSTABLE_SCHED_CLOCK
25 select HAVE_IDE 25 select HAVE_IDE
26 select HAVE_OPROFILE 26 select HAVE_OPROFILE
27 select HAVE_PERF_COUNTERS if (!M386 && !M486) 27 select HAVE_PERF_EVENTS if (!M386 && !M486)
28 select HAVE_IOREMAP_PROT 28 select HAVE_IOREMAP_PROT
29 select HAVE_KPROBES 29 select HAVE_KPROBES
30 select ARCH_WANT_OPTIONAL_GPIOLIB 30 select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ba331bfd1112..74619c4f9fda 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -831,5 +831,5 @@ ia32_sys_call_table:
831 .quad compat_sys_preadv 831 .quad compat_sys_preadv
832 .quad compat_sys_pwritev 832 .quad compat_sys_pwritev
833 .quad compat_sys_rt_tgsigqueueinfo /* 335 */ 833 .quad compat_sys_rt_tgsigqueueinfo /* 335 */
834 .quad sys_perf_counter_open 834 .quad sys_perf_event_open
835ia32_syscall_end: 835ia32_syscall_end:
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 5e3f2044f0d3..f5693c81a1db 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
49BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 49BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
50BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 50BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
51 51
52#ifdef CONFIG_PERF_COUNTERS 52#ifdef CONFIG_PERF_EVENTS
53BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) 53BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
54#endif 54#endif
55 55
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_event.h
index e7b7c938ae27..ad7ce3fd5065 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_PERF_COUNTER_H 1#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_COUNTER_H 2#define _ASM_X86_PERF_EVENT_H
3 3
4/* 4/*
5 * Performance counter hw details: 5 * Performance event hw details:
6 */ 6 */
7 7
8#define X86_PMC_MAX_GENERIC 8 8#define X86_PMC_MAX_GENERIC 8
@@ -43,7 +43,7 @@
43union cpuid10_eax { 43union cpuid10_eax {
44 struct { 44 struct {
45 unsigned int version_id:8; 45 unsigned int version_id:8;
46 unsigned int num_counters:8; 46 unsigned int num_events:8;
47 unsigned int bit_width:8; 47 unsigned int bit_width:8;
48 unsigned int mask_length:8; 48 unsigned int mask_length:8;
49 } split; 49 } split;
@@ -52,7 +52,7 @@ union cpuid10_eax {
52 52
53union cpuid10_edx { 53union cpuid10_edx {
54 struct { 54 struct {
55 unsigned int num_counters_fixed:4; 55 unsigned int num_events_fixed:4;
56 unsigned int reserved:28; 56 unsigned int reserved:28;
57 } split; 57 } split;
58 unsigned int full; 58 unsigned int full;
@@ -60,7 +60,7 @@ union cpuid10_edx {
60 60
61 61
62/* 62/*
63 * Fixed-purpose performance counters: 63 * Fixed-purpose performance events:
64 */ 64 */
65 65
66/* 66/*
@@ -87,22 +87,22 @@ union cpuid10_edx {
87/* 87/*
88 * We model BTS tracing as another fixed-mode PMC. 88 * We model BTS tracing as another fixed-mode PMC.
89 * 89 *
90 * We choose a value in the middle of the fixed counter range, since lower 90 * We choose a value in the middle of the fixed event range, since lower
91 * values are used by actual fixed counters and higher values are used 91 * values are used by actual fixed events and higher values are used
92 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 92 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
93 */ 93 */
94#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 94#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
95 95
96 96
97#ifdef CONFIG_PERF_COUNTERS 97#ifdef CONFIG_PERF_EVENTS
98extern void init_hw_perf_counters(void); 98extern void init_hw_perf_events(void);
99extern void perf_counters_lapic_init(void); 99extern void perf_events_lapic_init(void);
100 100
101#define PERF_COUNTER_INDEX_OFFSET 0 101#define PERF_EVENT_INDEX_OFFSET 0
102 102
103#else 103#else
104static inline void init_hw_perf_counters(void) { } 104static inline void init_hw_perf_events(void) { }
105static inline void perf_counters_lapic_init(void) { } 105static inline void perf_events_lapic_init(void) { }
106#endif 106#endif
107 107
108#endif /* _ASM_X86_PERF_COUNTER_H */ 108#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 8deaada61bc8..6fb3c209a7e3 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -341,7 +341,7 @@
341#define __NR_preadv 333 341#define __NR_preadv 333
342#define __NR_pwritev 334 342#define __NR_pwritev 334
343#define __NR_rt_tgsigqueueinfo 335 343#define __NR_rt_tgsigqueueinfo 335
344#define __NR_perf_counter_open 336 344#define __NR_perf_event_open 336
345 345
346#ifdef __KERNEL__ 346#ifdef __KERNEL__
347 347
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index b9f3c60de5f7..8d3ad0adbc68 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
659__SYSCALL(__NR_pwritev, sys_pwritev) 659__SYSCALL(__NR_pwritev, sys_pwritev)
660#define __NR_rt_tgsigqueueinfo 297 660#define __NR_rt_tgsigqueueinfo 297
661__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 661__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
662#define __NR_perf_counter_open 298 662#define __NR_perf_event_open 298
663__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) 663__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
664 664
665#ifndef __NO_STUBS 665#ifndef __NO_STUBS
666#define __ARCH_WANT_OLD_READDIR 666#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a58ef98be155..894aa97f0717 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -14,7 +14,7 @@
14 * Mikael Pettersson : PM converted to driver model. 14 * Mikael Pettersson : PM converted to driver model.
15 */ 15 */
16 16
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
19#include <linux/mc146818rtc.h> 19#include <linux/mc146818rtc.h>
20#include <linux/acpi_pmtmr.h> 20#include <linux/acpi_pmtmr.h>
@@ -35,7 +35,7 @@
35#include <linux/smp.h> 35#include <linux/smp.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37 37
38#include <asm/perf_counter.h> 38#include <asm/perf_event.h>
39#include <asm/x86_init.h> 39#include <asm/x86_init.h>
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41#include <asm/atomic.h> 41#include <asm/atomic.h>
@@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
1189 apic_write(APIC_ESR, 0); 1189 apic_write(APIC_ESR, 0);
1190 } 1190 }
1191#endif 1191#endif
1192 perf_counters_lapic_init(); 1192 perf_events_lapic_init();
1193 1193
1194 preempt_disable(); 1194 preempt_disable();
1195 1195
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 8dd30638fe44..68537e957a9b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
27obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o 27obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
28obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o 28obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
29 29
30obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 30obj-$(CONFIG_PERF_EVENTS) += perf_event.o
31 31
32obj-$(CONFIG_X86_MCE) += mcheck/ 32obj-$(CONFIG_X86_MCE) += mcheck/
33obj-$(CONFIG_MTRR) += mtrr/ 33obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2fea97eccf77..cc25c2b4a567 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,7 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14 14
15#include <asm/stackprotector.h> 15#include <asm/stackprotector.h>
16#include <asm/perf_counter.h> 16#include <asm/perf_event.h>
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/hypervisor.h> 18#include <asm/hypervisor.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
@@ -869,7 +869,7 @@ void __init identify_boot_cpu(void)
869#else 869#else
870 vgetcpu_set_mode(); 870 vgetcpu_set_mode();
871#endif 871#endif
872 init_hw_perf_counters(); 872 init_hw_perf_events();
873} 873}
874 874
875void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 875void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c
index a6c8b27553cd..a3c7adb06b78 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Performance counter x86 architecture code 2 * Performance events x86 architecture code
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -11,7 +11,7 @@
11 * For licencing details see kernel-base/COPYING 11 * For licencing details see kernel-base/COPYING
12 */ 12 */
13 13
14#include <linux/perf_counter.h> 14#include <linux/perf_event.h>
15#include <linux/capability.h> 15#include <linux/capability.h>
16#include <linux/notifier.h> 16#include <linux/notifier.h>
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
@@ -27,10 +27,10 @@
27#include <asm/stacktrace.h> 27#include <asm/stacktrace.h>
28#include <asm/nmi.h> 28#include <asm/nmi.h>
29 29
30static u64 perf_counter_mask __read_mostly; 30static u64 perf_event_mask __read_mostly;
31 31
32/* The maximal number of PEBS counters: */ 32/* The maximal number of PEBS events: */
33#define MAX_PEBS_COUNTERS 4 33#define MAX_PEBS_EVENTS 4
34 34
35/* The size of a BTS record in bytes: */ 35/* The size of a BTS record in bytes: */
36#define BTS_RECORD_SIZE 24 36#define BTS_RECORD_SIZE 24
@@ -65,11 +65,11 @@ struct debug_store {
65 u64 pebs_index; 65 u64 pebs_index;
66 u64 pebs_absolute_maximum; 66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold; 67 u64 pebs_interrupt_threshold;
68 u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; 68 u64 pebs_event_reset[MAX_PEBS_EVENTS];
69}; 69};
70 70
71struct cpu_hw_counters { 71struct cpu_hw_events {
72 struct perf_counter *counters[X86_PMC_IDX_MAX]; 72 struct perf_event *events[X86_PMC_IDX_MAX];
73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 unsigned long interrupts; 75 unsigned long interrupts;
@@ -86,17 +86,17 @@ struct x86_pmu {
86 int (*handle_irq)(struct pt_regs *); 86 int (*handle_irq)(struct pt_regs *);
87 void (*disable_all)(void); 87 void (*disable_all)(void);
88 void (*enable_all)(void); 88 void (*enable_all)(void);
89 void (*enable)(struct hw_perf_counter *, int); 89 void (*enable)(struct hw_perf_event *, int);
90 void (*disable)(struct hw_perf_counter *, int); 90 void (*disable)(struct hw_perf_event *, int);
91 unsigned eventsel; 91 unsigned eventsel;
92 unsigned perfctr; 92 unsigned perfctr;
93 u64 (*event_map)(int); 93 u64 (*event_map)(int);
94 u64 (*raw_event)(u64); 94 u64 (*raw_event)(u64);
95 int max_events; 95 int max_events;
96 int num_counters; 96 int num_events;
97 int num_counters_fixed; 97 int num_events_fixed;
98 int counter_bits; 98 int event_bits;
99 u64 counter_mask; 99 u64 event_mask;
100 int apic; 100 int apic;
101 u64 max_period; 101 u64 max_period;
102 u64 intel_ctrl; 102 u64 intel_ctrl;
@@ -106,7 +106,7 @@ struct x86_pmu {
106 106
107static struct x86_pmu x86_pmu __read_mostly; 107static struct x86_pmu x86_pmu __read_mostly;
108 108
109static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { 109static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
110 .enabled = 1, 110 .enabled = 1,
111}; 111};
112 112
@@ -124,35 +124,35 @@ static const u64 p6_perfmon_event_map[] =
124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
125}; 125};
126 126
127static u64 p6_pmu_event_map(int event) 127static u64 p6_pmu_event_map(int hw_event)
128{ 128{
129 return p6_perfmon_event_map[event]; 129 return p6_perfmon_event_map[hw_event];
130} 130}
131 131
132/* 132/*
133 * Counter setting that is specified not to count anything. 133 * Event setting that is specified not to count anything.
134 * We use this to effectively disable a counter. 134 * We use this to effectively disable a counter.
135 * 135 *
136 * L2_RQSTS with 0 MESI unit mask. 136 * L2_RQSTS with 0 MESI unit mask.
137 */ 137 */
138#define P6_NOP_COUNTER 0x0000002EULL 138#define P6_NOP_EVENT 0x0000002EULL
139 139
140static u64 p6_pmu_raw_event(u64 event) 140static u64 p6_pmu_raw_event(u64 hw_event)
141{ 141{
142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL 142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL 143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL 144#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
145#define P6_EVNTSEL_INV_MASK 0x00800000ULL 145#define P6_EVNTSEL_INV_MASK 0x00800000ULL
146#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL 146#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
147 147
148#define P6_EVNTSEL_MASK \ 148#define P6_EVNTSEL_MASK \
149 (P6_EVNTSEL_EVENT_MASK | \ 149 (P6_EVNTSEL_EVENT_MASK | \
150 P6_EVNTSEL_UNIT_MASK | \ 150 P6_EVNTSEL_UNIT_MASK | \
151 P6_EVNTSEL_EDGE_MASK | \ 151 P6_EVNTSEL_EDGE_MASK | \
152 P6_EVNTSEL_INV_MASK | \ 152 P6_EVNTSEL_INV_MASK | \
153 P6_EVNTSEL_COUNTER_MASK) 153 P6_EVNTSEL_REG_MASK)
154 154
155 return event & P6_EVNTSEL_MASK; 155 return hw_event & P6_EVNTSEL_MASK;
156} 156}
157 157
158 158
@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
171}; 171};
172 172
173static u64 intel_pmu_event_map(int event) 173static u64 intel_pmu_event_map(int hw_event)
174{ 174{
175 return intel_perfmon_event_map[event]; 175 return intel_perfmon_event_map[hw_event];
176} 176}
177 177
178/* 178/*
179 * Generalized hw caching related event table, filled 179 * Generalized hw caching related hw_event table, filled
180 * in on a per model basis. A value of 0 means 180 * in on a per model basis. A value of 0 means
181 * 'not supported', -1 means 'event makes no sense on 181 * 'not supported', -1 means 'hw_event makes no sense on
182 * this CPU', any other value means the raw event 182 * this CPU', any other value means the raw hw_event
183 * ID. 183 * ID.
184 */ 184 */
185 185
@@ -463,22 +463,22 @@ static const u64 atom_hw_cache_event_ids
463 }, 463 },
464}; 464};
465 465
466static u64 intel_pmu_raw_event(u64 event) 466static u64 intel_pmu_raw_event(u64 hw_event)
467{ 467{
468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL 468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL 469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL 470#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL 471#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
472#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL 472#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
473 473
474#define CORE_EVNTSEL_MASK \ 474#define CORE_EVNTSEL_MASK \
475 (CORE_EVNTSEL_EVENT_MASK | \ 475 (CORE_EVNTSEL_EVENT_MASK | \
476 CORE_EVNTSEL_UNIT_MASK | \ 476 CORE_EVNTSEL_UNIT_MASK | \
477 CORE_EVNTSEL_EDGE_MASK | \ 477 CORE_EVNTSEL_EDGE_MASK | \
478 CORE_EVNTSEL_INV_MASK | \ 478 CORE_EVNTSEL_INV_MASK | \
479 CORE_EVNTSEL_COUNTER_MASK) 479 CORE_EVNTSEL_REG_MASK)
480 480
481 return event & CORE_EVNTSEL_MASK; 481 return hw_event & CORE_EVNTSEL_MASK;
482} 482}
483 483
484static const u64 amd_hw_cache_event_ids 484static const u64 amd_hw_cache_event_ids
@@ -585,39 +585,39 @@ static const u64 amd_perfmon_event_map[] =
585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
586}; 586};
587 587
588static u64 amd_pmu_event_map(int event) 588static u64 amd_pmu_event_map(int hw_event)
589{ 589{
590 return amd_perfmon_event_map[event]; 590 return amd_perfmon_event_map[hw_event];
591} 591}
592 592
593static u64 amd_pmu_raw_event(u64 event) 593static u64 amd_pmu_raw_event(u64 hw_event)
594{ 594{
595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL 595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL 596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL 597#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
598#define K7_EVNTSEL_INV_MASK 0x000800000ULL 598#define K7_EVNTSEL_INV_MASK 0x000800000ULL
599#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL 599#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
600 600
601#define K7_EVNTSEL_MASK \ 601#define K7_EVNTSEL_MASK \
602 (K7_EVNTSEL_EVENT_MASK | \ 602 (K7_EVNTSEL_EVENT_MASK | \
603 K7_EVNTSEL_UNIT_MASK | \ 603 K7_EVNTSEL_UNIT_MASK | \
604 K7_EVNTSEL_EDGE_MASK | \ 604 K7_EVNTSEL_EDGE_MASK | \
605 K7_EVNTSEL_INV_MASK | \ 605 K7_EVNTSEL_INV_MASK | \
606 K7_EVNTSEL_COUNTER_MASK) 606 K7_EVNTSEL_REG_MASK)
607 607
608 return event & K7_EVNTSEL_MASK; 608 return hw_event & K7_EVNTSEL_MASK;
609} 609}
610 610
611/* 611/*
612 * Propagate counter elapsed time into the generic counter. 612 * Propagate event elapsed time into the generic event.
613 * Can only be executed on the CPU where the counter is active. 613 * Can only be executed on the CPU where the event is active.
614 * Returns the delta events processed. 614 * Returns the delta events processed.
615 */ 615 */
616static u64 616static u64
617x86_perf_counter_update(struct perf_counter *counter, 617x86_perf_event_update(struct perf_event *event,
618 struct hw_perf_counter *hwc, int idx) 618 struct hw_perf_event *hwc, int idx)
619{ 619{
620 int shift = 64 - x86_pmu.counter_bits; 620 int shift = 64 - x86_pmu.event_bits;
621 u64 prev_raw_count, new_raw_count; 621 u64 prev_raw_count, new_raw_count;
622 s64 delta; 622 s64 delta;
623 623
@@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter,
625 return 0; 625 return 0;
626 626
627 /* 627 /*
628 * Careful: an NMI might modify the previous counter value. 628 * Careful: an NMI might modify the previous event value.
629 * 629 *
630 * Our tactic to handle this is to first atomically read and 630 * Our tactic to handle this is to first atomically read and
631 * exchange a new raw count - then add that new-prev delta 631 * exchange a new raw count - then add that new-prev delta
632 * count to the generic counter atomically: 632 * count to the generic event atomically:
633 */ 633 */
634again: 634again:
635 prev_raw_count = atomic64_read(&hwc->prev_count); 635 prev_raw_count = atomic64_read(&hwc->prev_count);
636 rdmsrl(hwc->counter_base + idx, new_raw_count); 636 rdmsrl(hwc->event_base + idx, new_raw_count);
637 637
638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
639 new_raw_count) != prev_raw_count) 639 new_raw_count) != prev_raw_count)
@@ -642,7 +642,7 @@ again:
642 /* 642 /*
643 * Now we have the new raw value and have updated the prev 643 * Now we have the new raw value and have updated the prev
644 * timestamp already. We can now calculate the elapsed delta 644 * timestamp already. We can now calculate the elapsed delta
645 * (counter-)time and add that to the generic counter. 645 * (event-)time and add that to the generic event.
646 * 646 *
647 * Careful, not all hw sign-extends above the physical width 647 * Careful, not all hw sign-extends above the physical width
648 * of the count. 648 * of the count.
@@ -650,13 +650,13 @@ again:
650 delta = (new_raw_count << shift) - (prev_raw_count << shift); 650 delta = (new_raw_count << shift) - (prev_raw_count << shift);
651 delta >>= shift; 651 delta >>= shift;
652 652
653 atomic64_add(delta, &counter->count); 653 atomic64_add(delta, &event->count);
654 atomic64_sub(delta, &hwc->period_left); 654 atomic64_sub(delta, &hwc->period_left);
655 655
656 return new_raw_count; 656 return new_raw_count;
657} 657}
658 658
659static atomic_t active_counters; 659static atomic_t active_events;
660static DEFINE_MUTEX(pmc_reserve_mutex); 660static DEFINE_MUTEX(pmc_reserve_mutex);
661 661
662static bool reserve_pmc_hardware(void) 662static bool reserve_pmc_hardware(void)
@@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void)
667 if (nmi_watchdog == NMI_LOCAL_APIC) 667 if (nmi_watchdog == NMI_LOCAL_APIC)
668 disable_lapic_nmi_watchdog(); 668 disable_lapic_nmi_watchdog();
669 669
670 for (i = 0; i < x86_pmu.num_counters; i++) { 670 for (i = 0; i < x86_pmu.num_events; i++) {
671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
672 goto perfctr_fail; 672 goto perfctr_fail;
673 } 673 }
674 674
675 for (i = 0; i < x86_pmu.num_counters; i++) { 675 for (i = 0; i < x86_pmu.num_events; i++) {
676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
677 goto eventsel_fail; 677 goto eventsel_fail;
678 } 678 }
@@ -685,7 +685,7 @@ eventsel_fail:
685 for (i--; i >= 0; i--) 685 for (i--; i >= 0; i--)
686 release_evntsel_nmi(x86_pmu.eventsel + i); 686 release_evntsel_nmi(x86_pmu.eventsel + i);
687 687
688 i = x86_pmu.num_counters; 688 i = x86_pmu.num_events;
689 689
690perfctr_fail: 690perfctr_fail:
691 for (i--; i >= 0; i--) 691 for (i--; i >= 0; i--)
@@ -703,7 +703,7 @@ static void release_pmc_hardware(void)
703#ifdef CONFIG_X86_LOCAL_APIC 703#ifdef CONFIG_X86_LOCAL_APIC
704 int i; 704 int i;
705 705
706 for (i = 0; i < x86_pmu.num_counters; i++) { 706 for (i = 0; i < x86_pmu.num_events; i++) {
707 release_perfctr_nmi(x86_pmu.perfctr + i); 707 release_perfctr_nmi(x86_pmu.perfctr + i);
708 release_evntsel_nmi(x86_pmu.eventsel + i); 708 release_evntsel_nmi(x86_pmu.eventsel + i);
709 } 709 }
@@ -720,7 +720,7 @@ static inline bool bts_available(void)
720 720
721static inline void init_debug_store_on_cpu(int cpu) 721static inline void init_debug_store_on_cpu(int cpu)
722{ 722{
723 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 723 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
724 724
725 if (!ds) 725 if (!ds)
726 return; 726 return;
@@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu)
732 732
733static inline void fini_debug_store_on_cpu(int cpu) 733static inline void fini_debug_store_on_cpu(int cpu)
734{ 734{
735 if (!per_cpu(cpu_hw_counters, cpu).ds) 735 if (!per_cpu(cpu_hw_events, cpu).ds)
736 return; 736 return;
737 737
738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); 738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
@@ -751,12 +751,12 @@ static void release_bts_hardware(void)
751 fini_debug_store_on_cpu(cpu); 751 fini_debug_store_on_cpu(cpu);
752 752
753 for_each_possible_cpu(cpu) { 753 for_each_possible_cpu(cpu) {
754 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 754 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
755 755
756 if (!ds) 756 if (!ds)
757 continue; 757 continue;
758 758
759 per_cpu(cpu_hw_counters, cpu).ds = NULL; 759 per_cpu(cpu_hw_events, cpu).ds = NULL;
760 760
761 kfree((void *)(unsigned long)ds->bts_buffer_base); 761 kfree((void *)(unsigned long)ds->bts_buffer_base);
762 kfree(ds); 762 kfree(ds);
@@ -796,7 +796,7 @@ static int reserve_bts_hardware(void)
796 ds->bts_interrupt_threshold = 796 ds->bts_interrupt_threshold =
797 ds->bts_absolute_maximum - BTS_OVFL_TH; 797 ds->bts_absolute_maximum - BTS_OVFL_TH;
798 798
799 per_cpu(cpu_hw_counters, cpu).ds = ds; 799 per_cpu(cpu_hw_events, cpu).ds = ds;
800 err = 0; 800 err = 0;
801 } 801 }
802 802
@@ -812,9 +812,9 @@ static int reserve_bts_hardware(void)
812 return err; 812 return err;
813} 813}
814 814
815static void hw_perf_counter_destroy(struct perf_counter *counter) 815static void hw_perf_event_destroy(struct perf_event *event)
816{ 816{
817 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { 817 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
818 release_pmc_hardware(); 818 release_pmc_hardware();
819 release_bts_hardware(); 819 release_bts_hardware();
820 mutex_unlock(&pmc_reserve_mutex); 820 mutex_unlock(&pmc_reserve_mutex);
@@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void)
827} 827}
828 828
829static inline int 829static inline int
830set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) 830set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
831{ 831{
832 unsigned int cache_type, cache_op, cache_result; 832 unsigned int cache_type, cache_op, cache_result;
833 u64 config, val; 833 u64 config, val;
@@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config)
880 880
881static void intel_pmu_disable_bts(void) 881static void intel_pmu_disable_bts(void)
882{ 882{
883 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
884 unsigned long debugctlmsr; 884 unsigned long debugctlmsr;
885 885
886 if (!cpuc->ds) 886 if (!cpuc->ds)
@@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void)
898/* 898/*
899 * Setup the hardware configuration for a given attr_type 899 * Setup the hardware configuration for a given attr_type
900 */ 900 */
901static int __hw_perf_counter_init(struct perf_counter *counter) 901static int __hw_perf_event_init(struct perf_event *event)
902{ 902{
903 struct perf_counter_attr *attr = &counter->attr; 903 struct perf_event_attr *attr = &event->attr;
904 struct hw_perf_counter *hwc = &counter->hw; 904 struct hw_perf_event *hwc = &event->hw;
905 u64 config; 905 u64 config;
906 int err; 906 int err;
907 907
@@ -909,22 +909,22 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
909 return -ENODEV; 909 return -ENODEV;
910 910
911 err = 0; 911 err = 0;
912 if (!atomic_inc_not_zero(&active_counters)) { 912 if (!atomic_inc_not_zero(&active_events)) {
913 mutex_lock(&pmc_reserve_mutex); 913 mutex_lock(&pmc_reserve_mutex);
914 if (atomic_read(&active_counters) == 0) { 914 if (atomic_read(&active_events) == 0) {
915 if (!reserve_pmc_hardware()) 915 if (!reserve_pmc_hardware())
916 err = -EBUSY; 916 err = -EBUSY;
917 else 917 else
918 err = reserve_bts_hardware(); 918 err = reserve_bts_hardware();
919 } 919 }
920 if (!err) 920 if (!err)
921 atomic_inc(&active_counters); 921 atomic_inc(&active_events);
922 mutex_unlock(&pmc_reserve_mutex); 922 mutex_unlock(&pmc_reserve_mutex);
923 } 923 }
924 if (err) 924 if (err)
925 return err; 925 return err;
926 926
927 counter->destroy = hw_perf_counter_destroy; 927 event->destroy = hw_perf_event_destroy;
928 928
929 /* 929 /*
930 * Generate PMC IRQs: 930 * Generate PMC IRQs:
@@ -948,15 +948,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
948 /* 948 /*
949 * If we have a PMU initialized but no APIC 949 * If we have a PMU initialized but no APIC
950 * interrupts, we cannot sample hardware 950 * interrupts, we cannot sample hardware
951 * counters (user-space has to fall back and 951 * events (user-space has to fall back and
952 * sample via a hrtimer based software counter): 952 * sample via a hrtimer based software event):
953 */ 953 */
954 if (!x86_pmu.apic) 954 if (!x86_pmu.apic)
955 return -EOPNOTSUPP; 955 return -EOPNOTSUPP;
956 } 956 }
957 957
958 /* 958 /*
959 * Raw event type provide the config in the event structure 959 * Raw hw_event type provide the config in the hw_event structure
960 */ 960 */
961 if (attr->type == PERF_TYPE_RAW) { 961 if (attr->type == PERF_TYPE_RAW) {
962 hwc->config |= x86_pmu.raw_event(attr->config); 962 hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
1001 1001
1002static void p6_pmu_disable_all(void) 1002static void p6_pmu_disable_all(void)
1003{ 1003{
1004 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1004 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005 u64 val; 1005 u64 val;
1006 1006
1007 if (!cpuc->enabled) 1007 if (!cpuc->enabled)
@@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void)
1018 1018
1019static void intel_pmu_disable_all(void) 1019static void intel_pmu_disable_all(void)
1020{ 1020{
1021 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1021 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1022 1022
1023 if (!cpuc->enabled) 1023 if (!cpuc->enabled)
1024 return; 1024 return;
@@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void)
1034 1034
1035static void amd_pmu_disable_all(void) 1035static void amd_pmu_disable_all(void)
1036{ 1036{
1037 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1037 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1038 int idx; 1038 int idx;
1039 1039
1040 if (!cpuc->enabled) 1040 if (!cpuc->enabled)
@@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void)
1043 cpuc->enabled = 0; 1043 cpuc->enabled = 0;
1044 /* 1044 /*
1045 * ensure we write the disable before we start disabling the 1045 * ensure we write the disable before we start disabling the
1046 * counters proper, so that amd_pmu_enable_counter() does the 1046 * events proper, so that amd_pmu_enable_event() does the
1047 * right thing. 1047 * right thing.
1048 */ 1048 */
1049 barrier(); 1049 barrier();
1050 1050
1051 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1051 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1052 u64 val; 1052 u64 val;
1053 1053
1054 if (!test_bit(idx, cpuc->active_mask)) 1054 if (!test_bit(idx, cpuc->active_mask))
@@ -1070,7 +1070,7 @@ void hw_perf_disable(void)
1070 1070
1071static void p6_pmu_enable_all(void) 1071static void p6_pmu_enable_all(void)
1072{ 1072{
1073 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1073 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1074 unsigned long val; 1074 unsigned long val;
1075 1075
1076 if (cpuc->enabled) 1076 if (cpuc->enabled)
@@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void)
1087 1087
1088static void intel_pmu_enable_all(void) 1088static void intel_pmu_enable_all(void)
1089{ 1089{
1090 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1090 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1091 1091
1092 if (cpuc->enabled) 1092 if (cpuc->enabled)
1093 return; 1093 return;
@@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void)
1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1099 1099
1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1101 struct perf_counter *counter = 1101 struct perf_event *event =
1102 cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1102 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1103 1103
1104 if (WARN_ON_ONCE(!counter)) 1104 if (WARN_ON_ONCE(!event))
1105 return; 1105 return;
1106 1106
1107 intel_pmu_enable_bts(counter->hw.config); 1107 intel_pmu_enable_bts(event->hw.config);
1108 } 1108 }
1109} 1109}
1110 1110
1111static void amd_pmu_enable_all(void) 1111static void amd_pmu_enable_all(void)
1112{ 1112{
1113 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1113 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1114 int idx; 1114 int idx;
1115 1115
1116 if (cpuc->enabled) 1116 if (cpuc->enabled)
@@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void)
1119 cpuc->enabled = 1; 1119 cpuc->enabled = 1;
1120 barrier(); 1120 barrier();
1121 1121
1122 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1122 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1123 struct perf_counter *counter = cpuc->counters[idx]; 1123 struct perf_event *event = cpuc->events[idx];
1124 u64 val; 1124 u64 val;
1125 1125
1126 if (!test_bit(idx, cpuc->active_mask)) 1126 if (!test_bit(idx, cpuc->active_mask))
1127 continue; 1127 continue;
1128 1128
1129 val = counter->hw.config; 1129 val = event->hw.config;
1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1132 } 1132 }
@@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack)
1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1154} 1154}
1155 1155
1156static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1156static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1157{ 1157{
1158 (void)checking_wrmsrl(hwc->config_base + idx, 1158 (void)checking_wrmsrl(hwc->config_base + idx,
1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1160} 1160}
1161 1161
1162static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1162static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1163{ 1163{
1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1165} 1165}
1166 1166
1167static inline void 1167static inline void
1168intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) 1168intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1169{ 1169{
1170 int idx = __idx - X86_PMC_IDX_FIXED; 1170 int idx = __idx - X86_PMC_IDX_FIXED;
1171 u64 ctrl_val, mask; 1171 u64 ctrl_val, mask;
@@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
1178} 1178}
1179 1179
1180static inline void 1180static inline void
1181p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1181p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1182{ 1182{
1183 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1183 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1184 u64 val = P6_NOP_COUNTER; 1184 u64 val = P6_NOP_EVENT;
1185 1185
1186 if (cpuc->enabled) 1186 if (cpuc->enabled)
1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1190} 1190}
1191 1191
1192static inline void 1192static inline void
1193intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1193intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1194{ 1194{
1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1196 intel_pmu_disable_bts(); 1196 intel_pmu_disable_bts();
@@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1202 return; 1202 return;
1203 } 1203 }
1204 1204
1205 x86_pmu_disable_counter(hwc, idx); 1205 x86_pmu_disable_event(hwc, idx);
1206} 1206}
1207 1207
1208static inline void 1208static inline void
1209amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1209amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1210{ 1210{
1211 x86_pmu_disable_counter(hwc, idx); 1211 x86_pmu_disable_event(hwc, idx);
1212} 1212}
1213 1213
1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1215 1215
1216/* 1216/*
1217 * Set the next IRQ period, based on the hwc->period_left value. 1217 * Set the next IRQ period, based on the hwc->period_left value.
1218 * To be called with the counter disabled in hw: 1218 * To be called with the event disabled in hw:
1219 */ 1219 */
1220static int 1220static int
1221x86_perf_counter_set_period(struct perf_counter *counter, 1221x86_perf_event_set_period(struct perf_event *event,
1222 struct hw_perf_counter *hwc, int idx) 1222 struct hw_perf_event *hwc, int idx)
1223{ 1223{
1224 s64 left = atomic64_read(&hwc->period_left); 1224 s64 left = atomic64_read(&hwc->period_left);
1225 s64 period = hwc->sample_period; 1225 s64 period = hwc->sample_period;
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1245 ret = 1; 1245 ret = 1;
1246 } 1246 }
1247 /* 1247 /*
1248 * Quirk: certain CPUs dont like it if just 1 event is left: 1248 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1249 */ 1249 */
1250 if (unlikely(left < 2)) 1250 if (unlikely(left < 2))
1251 left = 2; 1251 left = 2;
@@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1257 1257
1258 /* 1258 /*
1259 * The hw counter starts counting from this counter offset, 1259 * The hw event starts counting from this event offset,
1260 * mark it to be able to extra future deltas: 1260 * mark it to be able to extra future deltas:
1261 */ 1261 */
1262 atomic64_set(&hwc->prev_count, (u64)-left); 1262 atomic64_set(&hwc->prev_count, (u64)-left);
1263 1263
1264 err = checking_wrmsrl(hwc->counter_base + idx, 1264 err = checking_wrmsrl(hwc->event_base + idx,
1265 (u64)(-left) & x86_pmu.counter_mask); 1265 (u64)(-left) & x86_pmu.event_mask);
1266 1266
1267 perf_counter_update_userpage(counter); 1267 perf_event_update_userpage(event);
1268 1268
1269 return ret; 1269 return ret;
1270} 1270}
1271 1271
1272static inline void 1272static inline void
1273intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) 1273intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1274{ 1274{
1275 int idx = __idx - X86_PMC_IDX_FIXED; 1275 int idx = __idx - X86_PMC_IDX_FIXED;
1276 u64 ctrl_val, bits, mask; 1276 u64 ctrl_val, bits, mask;
@@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
1295 err = checking_wrmsrl(hwc->config_base, ctrl_val); 1295 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1296} 1296}
1297 1297
1298static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1298static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1299{ 1299{
1300 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1300 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1301 u64 val; 1301 u64 val;
1302 1302
1303 val = hwc->config; 1303 val = hwc->config;
@@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1308} 1308}
1309 1309
1310 1310
1311static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1311static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1312{ 1312{
1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1314 if (!__get_cpu_var(cpu_hw_counters).enabled) 1314 if (!__get_cpu_var(cpu_hw_events).enabled)
1315 return; 1315 return;
1316 1316
1317 intel_pmu_enable_bts(hwc->config); 1317 intel_pmu_enable_bts(hwc->config);
@@ -1323,134 +1323,134 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1323 return; 1323 return;
1324 } 1324 }
1325 1325
1326 x86_pmu_enable_counter(hwc, idx); 1326 x86_pmu_enable_event(hwc, idx);
1327} 1327}
1328 1328
1329static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1329static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1330{ 1330{
1331 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1331 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1332 1332
1333 if (cpuc->enabled) 1333 if (cpuc->enabled)
1334 x86_pmu_enable_counter(hwc, idx); 1334 x86_pmu_enable_event(hwc, idx);
1335} 1335}
1336 1336
1337static int 1337static int
1338fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) 1338fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
1339{ 1339{
1340 unsigned int event; 1340 unsigned int hw_event;
1341 1341
1342 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 1342 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1343 1343
1344 if (unlikely((event == 1344 if (unlikely((hw_event ==
1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && 1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1346 (hwc->sample_period == 1))) 1346 (hwc->sample_period == 1)))
1347 return X86_PMC_IDX_FIXED_BTS; 1347 return X86_PMC_IDX_FIXED_BTS;
1348 1348
1349 if (!x86_pmu.num_counters_fixed) 1349 if (!x86_pmu.num_events_fixed)
1350 return -1; 1350 return -1;
1351 1351
1352 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1352 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS; 1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1354 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) 1354 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1355 return X86_PMC_IDX_FIXED_CPU_CYCLES; 1355 return X86_PMC_IDX_FIXED_CPU_CYCLES;
1356 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) 1356 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1357 return X86_PMC_IDX_FIXED_BUS_CYCLES; 1357 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1358 1358
1359 return -1; 1359 return -1;
1360} 1360}
1361 1361
1362/* 1362/*
1363 * Find a PMC slot for the freshly enabled / scheduled in counter: 1363 * Find a PMC slot for the freshly enabled / scheduled in event:
1364 */ 1364 */
1365static int x86_pmu_enable(struct perf_counter *counter) 1365static int x86_pmu_enable(struct perf_event *event)
1366{ 1366{
1367 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1367 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1368 struct hw_perf_counter *hwc = &counter->hw; 1368 struct hw_perf_event *hwc = &event->hw;
1369 int idx; 1369 int idx;
1370 1370
1371 idx = fixed_mode_idx(counter, hwc); 1371 idx = fixed_mode_idx(event, hwc);
1372 if (idx == X86_PMC_IDX_FIXED_BTS) { 1372 if (idx == X86_PMC_IDX_FIXED_BTS) {
1373 /* BTS is already occupied. */ 1373 /* BTS is already occupied. */
1374 if (test_and_set_bit(idx, cpuc->used_mask)) 1374 if (test_and_set_bit(idx, cpuc->used_mask))
1375 return -EAGAIN; 1375 return -EAGAIN;
1376 1376
1377 hwc->config_base = 0; 1377 hwc->config_base = 0;
1378 hwc->counter_base = 0; 1378 hwc->event_base = 0;
1379 hwc->idx = idx; 1379 hwc->idx = idx;
1380 } else if (idx >= 0) { 1380 } else if (idx >= 0) {
1381 /* 1381 /*
1382 * Try to get the fixed counter, if that is already taken 1382 * Try to get the fixed event, if that is already taken
1383 * then try to get a generic counter: 1383 * then try to get a generic event:
1384 */ 1384 */
1385 if (test_and_set_bit(idx, cpuc->used_mask)) 1385 if (test_and_set_bit(idx, cpuc->used_mask))
1386 goto try_generic; 1386 goto try_generic;
1387 1387
1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1389 /* 1389 /*
1390 * We set it so that counter_base + idx in wrmsr/rdmsr maps to 1390 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: 1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1392 */ 1392 */
1393 hwc->counter_base = 1393 hwc->event_base =
1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; 1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1395 hwc->idx = idx; 1395 hwc->idx = idx;
1396 } else { 1396 } else {
1397 idx = hwc->idx; 1397 idx = hwc->idx;
1398 /* Try to get the previous generic counter again */ 1398 /* Try to get the previous generic event again */
1399 if (test_and_set_bit(idx, cpuc->used_mask)) { 1399 if (test_and_set_bit(idx, cpuc->used_mask)) {
1400try_generic: 1400try_generic:
1401 idx = find_first_zero_bit(cpuc->used_mask, 1401 idx = find_first_zero_bit(cpuc->used_mask,
1402 x86_pmu.num_counters); 1402 x86_pmu.num_events);
1403 if (idx == x86_pmu.num_counters) 1403 if (idx == x86_pmu.num_events)
1404 return -EAGAIN; 1404 return -EAGAIN;
1405 1405
1406 set_bit(idx, cpuc->used_mask); 1406 set_bit(idx, cpuc->used_mask);
1407 hwc->idx = idx; 1407 hwc->idx = idx;
1408 } 1408 }
1409 hwc->config_base = x86_pmu.eventsel; 1409 hwc->config_base = x86_pmu.eventsel;
1410 hwc->counter_base = x86_pmu.perfctr; 1410 hwc->event_base = x86_pmu.perfctr;
1411 } 1411 }
1412 1412
1413 perf_counters_lapic_init(); 1413 perf_events_lapic_init();
1414 1414
1415 x86_pmu.disable(hwc, idx); 1415 x86_pmu.disable(hwc, idx);
1416 1416
1417 cpuc->counters[idx] = counter; 1417 cpuc->events[idx] = event;
1418 set_bit(idx, cpuc->active_mask); 1418 set_bit(idx, cpuc->active_mask);
1419 1419
1420 x86_perf_counter_set_period(counter, hwc, idx); 1420 x86_perf_event_set_period(event, hwc, idx);
1421 x86_pmu.enable(hwc, idx); 1421 x86_pmu.enable(hwc, idx);
1422 1422
1423 perf_counter_update_userpage(counter); 1423 perf_event_update_userpage(event);
1424 1424
1425 return 0; 1425 return 0;
1426} 1426}
1427 1427
1428static void x86_pmu_unthrottle(struct perf_counter *counter) 1428static void x86_pmu_unthrottle(struct perf_event *event)
1429{ 1429{
1430 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1430 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1431 struct hw_perf_counter *hwc = &counter->hw; 1431 struct hw_perf_event *hwc = &event->hw;
1432 1432
1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || 1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1434 cpuc->counters[hwc->idx] != counter)) 1434 cpuc->events[hwc->idx] != event))
1435 return; 1435 return;
1436 1436
1437 x86_pmu.enable(hwc, hwc->idx); 1437 x86_pmu.enable(hwc, hwc->idx);
1438} 1438}
1439 1439
1440void perf_counter_print_debug(void) 1440void perf_event_print_debug(void)
1441{ 1441{
1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1443 struct cpu_hw_counters *cpuc; 1443 struct cpu_hw_events *cpuc;
1444 unsigned long flags; 1444 unsigned long flags;
1445 int cpu, idx; 1445 int cpu, idx;
1446 1446
1447 if (!x86_pmu.num_counters) 1447 if (!x86_pmu.num_events)
1448 return; 1448 return;
1449 1449
1450 local_irq_save(flags); 1450 local_irq_save(flags);
1451 1451
1452 cpu = smp_processor_id(); 1452 cpu = smp_processor_id();
1453 cpuc = &per_cpu(cpu_hw_counters, cpu); 1453 cpuc = &per_cpu(cpu_hw_events, cpu);
1454 1454
1455 if (x86_pmu.version >= 2) { 1455 if (x86_pmu.version >= 2) {
1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
@@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void)
1466 } 1466 }
1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); 1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
1468 1468
1469 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1469 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1472 1472
@@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void)
1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1480 cpu, idx, prev_left); 1480 cpu, idx, prev_left);
1481 } 1481 }
1482 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1482 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1484 1484
1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", 1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void)
1488 local_irq_restore(flags); 1488 local_irq_restore(flags);
1489} 1489}
1490 1490
1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) 1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1492{ 1492{
1493 struct debug_store *ds = cpuc->ds; 1493 struct debug_store *ds = cpuc->ds;
1494 struct bts_record { 1494 struct bts_record {
@@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1496 u64 to; 1496 u64 to;
1497 u64 flags; 1497 u64 flags;
1498 }; 1498 };
1499 struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1499 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1500 struct bts_record *at, *top; 1500 struct bts_record *at, *top;
1501 struct perf_output_handle handle; 1501 struct perf_output_handle handle;
1502 struct perf_event_header header; 1502 struct perf_event_header header;
1503 struct perf_sample_data data; 1503 struct perf_sample_data data;
1504 struct pt_regs regs; 1504 struct pt_regs regs;
1505 1505
1506 if (!counter) 1506 if (!event)
1507 return; 1507 return;
1508 1508
1509 if (!ds) 1509 if (!ds)
@@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1518 ds->bts_index = ds->bts_buffer_base; 1518 ds->bts_index = ds->bts_buffer_base;
1519 1519
1520 1520
1521 data.period = counter->hw.last_period; 1521 data.period = event->hw.last_period;
1522 data.addr = 0; 1522 data.addr = 0;
1523 regs.ip = 0; 1523 regs.ip = 0;
1524 1524
@@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1527 * We will overwrite the from and to address before we output 1527 * We will overwrite the from and to address before we output
1528 * the sample. 1528 * the sample.
1529 */ 1529 */
1530 perf_prepare_sample(&header, &data, counter, &regs); 1530 perf_prepare_sample(&header, &data, event, &regs);
1531 1531
1532 if (perf_output_begin(&handle, counter, 1532 if (perf_output_begin(&handle, event,
1533 header.size * (top - at), 1, 1)) 1533 header.size * (top - at), 1, 1))
1534 return; 1534 return;
1535 1535
@@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
1537 data.ip = at->from; 1537 data.ip = at->from;
1538 data.addr = at->to; 1538 data.addr = at->to;
1539 1539
1540 perf_output_sample(&handle, &header, &data, counter); 1540 perf_output_sample(&handle, &header, &data, event);
1541 } 1541 }
1542 1542
1543 perf_output_end(&handle); 1543 perf_output_end(&handle);
1544 1544
1545 /* There's new data available. */ 1545 /* There's new data available. */
1546 counter->hw.interrupts++; 1546 event->hw.interrupts++;
1547 counter->pending_kill = POLL_IN; 1547 event->pending_kill = POLL_IN;
1548} 1548}
1549 1549
1550static void x86_pmu_disable(struct perf_counter *counter) 1550static void x86_pmu_disable(struct perf_event *event)
1551{ 1551{
1552 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1552 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1553 struct hw_perf_counter *hwc = &counter->hw; 1553 struct hw_perf_event *hwc = &event->hw;
1554 int idx = hwc->idx; 1554 int idx = hwc->idx;
1555 1555
1556 /* 1556 /*
@@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter)
1562 1562
1563 /* 1563 /*
1564 * Make sure the cleared pointer becomes visible before we 1564 * Make sure the cleared pointer becomes visible before we
1565 * (potentially) free the counter: 1565 * (potentially) free the event:
1566 */ 1566 */
1567 barrier(); 1567 barrier();
1568 1568
1569 /* 1569 /*
1570 * Drain the remaining delta count out of a counter 1570 * Drain the remaining delta count out of a event
1571 * that we are disabling: 1571 * that we are disabling:
1572 */ 1572 */
1573 x86_perf_counter_update(counter, hwc, idx); 1573 x86_perf_event_update(event, hwc, idx);
1574 1574
1575 /* Drain the remaining BTS records. */ 1575 /* Drain the remaining BTS records. */
1576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) 1576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1577 intel_pmu_drain_bts_buffer(cpuc); 1577 intel_pmu_drain_bts_buffer(cpuc);
1578 1578
1579 cpuc->counters[idx] = NULL; 1579 cpuc->events[idx] = NULL;
1580 clear_bit(idx, cpuc->used_mask); 1580 clear_bit(idx, cpuc->used_mask);
1581 1581
1582 perf_counter_update_userpage(counter); 1582 perf_event_update_userpage(event);
1583} 1583}
1584 1584
1585/* 1585/*
1586 * Save and restart an expired counter. Called by NMI contexts, 1586 * Save and restart an expired event. Called by NMI contexts,
1587 * so it has to be careful about preempting normal counter ops: 1587 * so it has to be careful about preempting normal event ops:
1588 */ 1588 */
1589static int intel_pmu_save_and_restart(struct perf_counter *counter) 1589static int intel_pmu_save_and_restart(struct perf_event *event)
1590{ 1590{
1591 struct hw_perf_counter *hwc = &counter->hw; 1591 struct hw_perf_event *hwc = &event->hw;
1592 int idx = hwc->idx; 1592 int idx = hwc->idx;
1593 int ret; 1593 int ret;
1594 1594
1595 x86_perf_counter_update(counter, hwc, idx); 1595 x86_perf_event_update(event, hwc, idx);
1596 ret = x86_perf_counter_set_period(counter, hwc, idx); 1596 ret = x86_perf_event_set_period(event, hwc, idx);
1597 1597
1598 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 1598 if (event->state == PERF_EVENT_STATE_ACTIVE)
1599 intel_pmu_enable_counter(hwc, idx); 1599 intel_pmu_enable_event(hwc, idx);
1600 1600
1601 return ret; 1601 return ret;
1602} 1602}
1603 1603
1604static void intel_pmu_reset(void) 1604static void intel_pmu_reset(void)
1605{ 1605{
1606 struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; 1606 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1607 unsigned long flags; 1607 unsigned long flags;
1608 int idx; 1608 int idx;
1609 1609
1610 if (!x86_pmu.num_counters) 1610 if (!x86_pmu.num_events)
1611 return; 1611 return;
1612 1612
1613 local_irq_save(flags); 1613 local_irq_save(flags);
1614 1614
1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1616 1616
1617 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1617 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); 1618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); 1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1620 } 1620 }
1621 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1621 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 1622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1623 } 1623 }
1624 if (ds) 1624 if (ds)
@@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void)
1630static int p6_pmu_handle_irq(struct pt_regs *regs) 1630static int p6_pmu_handle_irq(struct pt_regs *regs)
1631{ 1631{
1632 struct perf_sample_data data; 1632 struct perf_sample_data data;
1633 struct cpu_hw_counters *cpuc; 1633 struct cpu_hw_events *cpuc;
1634 struct perf_counter *counter; 1634 struct perf_event *event;
1635 struct hw_perf_counter *hwc; 1635 struct hw_perf_event *hwc;
1636 int idx, handled = 0; 1636 int idx, handled = 0;
1637 u64 val; 1637 u64 val;
1638 1638
1639 data.addr = 0; 1639 data.addr = 0;
1640 1640
1641 cpuc = &__get_cpu_var(cpu_hw_counters); 1641 cpuc = &__get_cpu_var(cpu_hw_events);
1642 1642
1643 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1643 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1644 if (!test_bit(idx, cpuc->active_mask)) 1644 if (!test_bit(idx, cpuc->active_mask))
1645 continue; 1645 continue;
1646 1646
1647 counter = cpuc->counters[idx]; 1647 event = cpuc->events[idx];
1648 hwc = &counter->hw; 1648 hwc = &event->hw;
1649 1649
1650 val = x86_perf_counter_update(counter, hwc, idx); 1650 val = x86_perf_event_update(event, hwc, idx);
1651 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1651 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1652 continue; 1652 continue;
1653 1653
1654 /* 1654 /*
1655 * counter overflow 1655 * event overflow
1656 */ 1656 */
1657 handled = 1; 1657 handled = 1;
1658 data.period = counter->hw.last_period; 1658 data.period = event->hw.last_period;
1659 1659
1660 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1660 if (!x86_perf_event_set_period(event, hwc, idx))
1661 continue; 1661 continue;
1662 1662
1663 if (perf_counter_overflow(counter, 1, &data, regs)) 1663 if (perf_event_overflow(event, 1, &data, regs))
1664 p6_pmu_disable_counter(hwc, idx); 1664 p6_pmu_disable_event(hwc, idx);
1665 } 1665 }
1666 1666
1667 if (handled) 1667 if (handled)
@@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
1677static int intel_pmu_handle_irq(struct pt_regs *regs) 1677static int intel_pmu_handle_irq(struct pt_regs *regs)
1678{ 1678{
1679 struct perf_sample_data data; 1679 struct perf_sample_data data;
1680 struct cpu_hw_counters *cpuc; 1680 struct cpu_hw_events *cpuc;
1681 int bit, loops; 1681 int bit, loops;
1682 u64 ack, status; 1682 u64 ack, status;
1683 1683
1684 data.addr = 0; 1684 data.addr = 0;
1685 1685
1686 cpuc = &__get_cpu_var(cpu_hw_counters); 1686 cpuc = &__get_cpu_var(cpu_hw_events);
1687 1687
1688 perf_disable(); 1688 perf_disable();
1689 intel_pmu_drain_bts_buffer(cpuc); 1689 intel_pmu_drain_bts_buffer(cpuc);
@@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1696 loops = 0; 1696 loops = 0;
1697again: 1697again:
1698 if (++loops > 100) { 1698 if (++loops > 100) {
1699 WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); 1699 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1700 perf_counter_print_debug(); 1700 perf_event_print_debug();
1701 intel_pmu_reset(); 1701 intel_pmu_reset();
1702 perf_enable(); 1702 perf_enable();
1703 return 1; 1703 return 1;
@@ -1706,19 +1706,19 @@ again:
1706 inc_irq_stat(apic_perf_irqs); 1706 inc_irq_stat(apic_perf_irqs);
1707 ack = status; 1707 ack = status;
1708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1709 struct perf_counter *counter = cpuc->counters[bit]; 1709 struct perf_event *event = cpuc->events[bit];
1710 1710
1711 clear_bit(bit, (unsigned long *) &status); 1711 clear_bit(bit, (unsigned long *) &status);
1712 if (!test_bit(bit, cpuc->active_mask)) 1712 if (!test_bit(bit, cpuc->active_mask))
1713 continue; 1713 continue;
1714 1714
1715 if (!intel_pmu_save_and_restart(counter)) 1715 if (!intel_pmu_save_and_restart(event))
1716 continue; 1716 continue;
1717 1717
1718 data.period = counter->hw.last_period; 1718 data.period = event->hw.last_period;
1719 1719
1720 if (perf_counter_overflow(counter, 1, &data, regs)) 1720 if (perf_event_overflow(event, 1, &data, regs))
1721 intel_pmu_disable_counter(&counter->hw, bit); 1721 intel_pmu_disable_event(&event->hw, bit);
1722 } 1722 }
1723 1723
1724 intel_pmu_ack_status(ack); 1724 intel_pmu_ack_status(ack);
@@ -1738,38 +1738,38 @@ again:
1738static int amd_pmu_handle_irq(struct pt_regs *regs) 1738static int amd_pmu_handle_irq(struct pt_regs *regs)
1739{ 1739{
1740 struct perf_sample_data data; 1740 struct perf_sample_data data;
1741 struct cpu_hw_counters *cpuc; 1741 struct cpu_hw_events *cpuc;
1742 struct perf_counter *counter; 1742 struct perf_event *event;
1743 struct hw_perf_counter *hwc; 1743 struct hw_perf_event *hwc;
1744 int idx, handled = 0; 1744 int idx, handled = 0;
1745 u64 val; 1745 u64 val;
1746 1746
1747 data.addr = 0; 1747 data.addr = 0;
1748 1748
1749 cpuc = &__get_cpu_var(cpu_hw_counters); 1749 cpuc = &__get_cpu_var(cpu_hw_events);
1750 1750
1751 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1751 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1752 if (!test_bit(idx, cpuc->active_mask)) 1752 if (!test_bit(idx, cpuc->active_mask))
1753 continue; 1753 continue;
1754 1754
1755 counter = cpuc->counters[idx]; 1755 event = cpuc->events[idx];
1756 hwc = &counter->hw; 1756 hwc = &event->hw;
1757 1757
1758 val = x86_perf_counter_update(counter, hwc, idx); 1758 val = x86_perf_event_update(event, hwc, idx);
1759 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1759 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1760 continue; 1760 continue;
1761 1761
1762 /* 1762 /*
1763 * counter overflow 1763 * event overflow
1764 */ 1764 */
1765 handled = 1; 1765 handled = 1;
1766 data.period = counter->hw.last_period; 1766 data.period = event->hw.last_period;
1767 1767
1768 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1768 if (!x86_perf_event_set_period(event, hwc, idx))
1769 continue; 1769 continue;
1770 1770
1771 if (perf_counter_overflow(counter, 1, &data, regs)) 1771 if (perf_event_overflow(event, 1, &data, regs))
1772 amd_pmu_disable_counter(hwc, idx); 1772 amd_pmu_disable_event(hwc, idx);
1773 } 1773 }
1774 1774
1775 if (handled) 1775 if (handled)
@@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
1783 irq_enter(); 1783 irq_enter();
1784 ack_APIC_irq(); 1784 ack_APIC_irq();
1785 inc_irq_stat(apic_pending_irqs); 1785 inc_irq_stat(apic_pending_irqs);
1786 perf_counter_do_pending(); 1786 perf_event_do_pending();
1787 irq_exit(); 1787 irq_exit();
1788} 1788}
1789 1789
1790void set_perf_counter_pending(void) 1790void set_perf_event_pending(void)
1791{ 1791{
1792#ifdef CONFIG_X86_LOCAL_APIC 1792#ifdef CONFIG_X86_LOCAL_APIC
1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR); 1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1794#endif 1794#endif
1795} 1795}
1796 1796
1797void perf_counters_lapic_init(void) 1797void perf_events_lapic_init(void)
1798{ 1798{
1799#ifdef CONFIG_X86_LOCAL_APIC 1799#ifdef CONFIG_X86_LOCAL_APIC
1800 if (!x86_pmu.apic || !x86_pmu_initialized()) 1800 if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void)
1808} 1808}
1809 1809
1810static int __kprobes 1810static int __kprobes
1811perf_counter_nmi_handler(struct notifier_block *self, 1811perf_event_nmi_handler(struct notifier_block *self,
1812 unsigned long cmd, void *__args) 1812 unsigned long cmd, void *__args)
1813{ 1813{
1814 struct die_args *args = __args; 1814 struct die_args *args = __args;
1815 struct pt_regs *regs; 1815 struct pt_regs *regs;
1816 1816
1817 if (!atomic_read(&active_counters)) 1817 if (!atomic_read(&active_events))
1818 return NOTIFY_DONE; 1818 return NOTIFY_DONE;
1819 1819
1820 switch (cmd) { 1820 switch (cmd) {
@@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
1833#endif 1833#endif
1834 /* 1834 /*
1835 * Can't rely on the handled return value to say it was our NMI, two 1835 * Can't rely on the handled return value to say it was our NMI, two
1836 * counters could trigger 'simultaneously' raising two back-to-back NMIs. 1836 * events could trigger 'simultaneously' raising two back-to-back NMIs.
1837 * 1837 *
1838 * If the first NMI handles both, the latter will be empty and daze 1838 * If the first NMI handles both, the latter will be empty and daze
1839 * the CPU. 1839 * the CPU.
@@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self,
1843 return NOTIFY_STOP; 1843 return NOTIFY_STOP;
1844} 1844}
1845 1845
1846static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 1846static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1847 .notifier_call = perf_counter_nmi_handler, 1847 .notifier_call = perf_event_nmi_handler,
1848 .next = NULL, 1848 .next = NULL,
1849 .priority = 1 1849 .priority = 1
1850}; 1850};
@@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = {
1854 .handle_irq = p6_pmu_handle_irq, 1854 .handle_irq = p6_pmu_handle_irq,
1855 .disable_all = p6_pmu_disable_all, 1855 .disable_all = p6_pmu_disable_all,
1856 .enable_all = p6_pmu_enable_all, 1856 .enable_all = p6_pmu_enable_all,
1857 .enable = p6_pmu_enable_counter, 1857 .enable = p6_pmu_enable_event,
1858 .disable = p6_pmu_disable_counter, 1858 .disable = p6_pmu_disable_event,
1859 .eventsel = MSR_P6_EVNTSEL0, 1859 .eventsel = MSR_P6_EVNTSEL0,
1860 .perfctr = MSR_P6_PERFCTR0, 1860 .perfctr = MSR_P6_PERFCTR0,
1861 .event_map = p6_pmu_event_map, 1861 .event_map = p6_pmu_event_map,
@@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = {
1864 .apic = 1, 1864 .apic = 1,
1865 .max_period = (1ULL << 31) - 1, 1865 .max_period = (1ULL << 31) - 1,
1866 .version = 0, 1866 .version = 0,
1867 .num_counters = 2, 1867 .num_events = 2,
1868 /* 1868 /*
1869 * Counters have 40 bits implemented. However they are designed such 1869 * Events have 40 bits implemented. However they are designed such
1870 * that bits [32-39] are sign extensions of bit 31. As such the 1870 * that bits [32-39] are sign extensions of bit 31. As such the
1871 * effective width of a counter for P6-like PMU is 32 bits only. 1871 * effective width of a event for P6-like PMU is 32 bits only.
1872 * 1872 *
1873 * See IA-32 Intel Architecture Software developer manual Vol 3B 1873 * See IA-32 Intel Architecture Software developer manual Vol 3B
1874 */ 1874 */
1875 .counter_bits = 32, 1875 .event_bits = 32,
1876 .counter_mask = (1ULL << 32) - 1, 1876 .event_mask = (1ULL << 32) - 1,
1877}; 1877};
1878 1878
1879static struct x86_pmu intel_pmu = { 1879static struct x86_pmu intel_pmu = {
@@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = {
1881 .handle_irq = intel_pmu_handle_irq, 1881 .handle_irq = intel_pmu_handle_irq,
1882 .disable_all = intel_pmu_disable_all, 1882 .disable_all = intel_pmu_disable_all,
1883 .enable_all = intel_pmu_enable_all, 1883 .enable_all = intel_pmu_enable_all,
1884 .enable = intel_pmu_enable_counter, 1884 .enable = intel_pmu_enable_event,
1885 .disable = intel_pmu_disable_counter, 1885 .disable = intel_pmu_disable_event,
1886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 1886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1888 .event_map = intel_pmu_event_map, 1888 .event_map = intel_pmu_event_map,
@@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = {
1892 /* 1892 /*
1893 * Intel PMCs cannot be accessed sanely above 32 bit width, 1893 * Intel PMCs cannot be accessed sanely above 32 bit width,
1894 * so we install an artificial 1<<31 period regardless of 1894 * so we install an artificial 1<<31 period regardless of
1895 * the generic counter period: 1895 * the generic event period:
1896 */ 1896 */
1897 .max_period = (1ULL << 31) - 1, 1897 .max_period = (1ULL << 31) - 1,
1898 .enable_bts = intel_pmu_enable_bts, 1898 .enable_bts = intel_pmu_enable_bts,
@@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = {
1904 .handle_irq = amd_pmu_handle_irq, 1904 .handle_irq = amd_pmu_handle_irq,
1905 .disable_all = amd_pmu_disable_all, 1905 .disable_all = amd_pmu_disable_all,
1906 .enable_all = amd_pmu_enable_all, 1906 .enable_all = amd_pmu_enable_all,
1907 .enable = amd_pmu_enable_counter, 1907 .enable = amd_pmu_enable_event,
1908 .disable = amd_pmu_disable_counter, 1908 .disable = amd_pmu_disable_event,
1909 .eventsel = MSR_K7_EVNTSEL0, 1909 .eventsel = MSR_K7_EVNTSEL0,
1910 .perfctr = MSR_K7_PERFCTR0, 1910 .perfctr = MSR_K7_PERFCTR0,
1911 .event_map = amd_pmu_event_map, 1911 .event_map = amd_pmu_event_map,
1912 .raw_event = amd_pmu_raw_event, 1912 .raw_event = amd_pmu_raw_event,
1913 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 1913 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
1914 .num_counters = 4, 1914 .num_events = 4,
1915 .counter_bits = 48, 1915 .event_bits = 48,
1916 .counter_mask = (1ULL << 48) - 1, 1916 .event_mask = (1ULL << 48) - 1,
1917 .apic = 1, 1917 .apic = 1,
1918 /* use highest bit to detect overflow */ 1918 /* use highest bit to detect overflow */
1919 .max_period = (1ULL << 47) - 1, 1919 .max_period = (1ULL << 47) - 1,
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)
1970 1970
1971 /* 1971 /*
1972 * Check whether the Architectural PerfMon supports 1972 * Check whether the Architectural PerfMon supports
1973 * Branch Misses Retired Event or not. 1973 * Branch Misses Retired hw_event or not.
1974 */ 1974 */
1975 cpuid(10, &eax.full, &ebx, &unused, &edx.full); 1975 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
1976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 1976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
@@ -1982,15 +1982,15 @@ static int intel_pmu_init(void)
1982 1982
1983 x86_pmu = intel_pmu; 1983 x86_pmu = intel_pmu;
1984 x86_pmu.version = version; 1984 x86_pmu.version = version;
1985 x86_pmu.num_counters = eax.split.num_counters; 1985 x86_pmu.num_events = eax.split.num_events;
1986 x86_pmu.counter_bits = eax.split.bit_width; 1986 x86_pmu.event_bits = eax.split.bit_width;
1987 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; 1987 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
1988 1988
1989 /* 1989 /*
1990 * Quirk: v2 perfmon does not report fixed-purpose counters, so 1990 * Quirk: v2 perfmon does not report fixed-purpose events, so
1991 * assume at least 3 counters: 1991 * assume at least 3 events:
1992 */ 1992 */
1993 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1993 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
1994 1994
1995 /* 1995 /*
1996 * Install the hw-cache-events table: 1996 * Install the hw-cache-events table:
@@ -2037,11 +2037,11 @@ static int amd_pmu_init(void)
2037 return 0; 2037 return 0;
2038} 2038}
2039 2039
2040void __init init_hw_perf_counters(void) 2040void __init init_hw_perf_events(void)
2041{ 2041{
2042 int err; 2042 int err;
2043 2043
2044 pr_info("Performance Counters: "); 2044 pr_info("Performance Events: ");
2045 2045
2046 switch (boot_cpu_data.x86_vendor) { 2046 switch (boot_cpu_data.x86_vendor) {
2047 case X86_VENDOR_INTEL: 2047 case X86_VENDOR_INTEL:
@@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void)
2054 return; 2054 return;
2055 } 2055 }
2056 if (err != 0) { 2056 if (err != 0) {
2057 pr_cont("no PMU driver, software counters only.\n"); 2057 pr_cont("no PMU driver, software events only.\n");
2058 return; 2058 return;
2059 } 2059 }
2060 2060
2061 pr_cont("%s PMU driver.\n", x86_pmu.name); 2061 pr_cont("%s PMU driver.\n", x86_pmu.name);
2062 2062
2063 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 2063 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2064 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 2064 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2065 x86_pmu.num_counters, X86_PMC_MAX_GENERIC); 2065 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2066 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 2066 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2067 } 2067 }
2068 perf_counter_mask = (1 << x86_pmu.num_counters) - 1; 2068 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2069 perf_max_counters = x86_pmu.num_counters; 2069 perf_max_events = x86_pmu.num_events;
2070 2070
2071 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 2071 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2072 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 2072 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2073 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); 2073 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2074 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 2074 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2075 } 2075 }
2076 2076
2077 perf_counter_mask |= 2077 perf_event_mask |=
2078 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 2078 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2079 x86_pmu.intel_ctrl = perf_counter_mask; 2079 x86_pmu.intel_ctrl = perf_event_mask;
2080 2080
2081 perf_counters_lapic_init(); 2081 perf_events_lapic_init();
2082 register_die_notifier(&perf_counter_nmi_notifier); 2082 register_die_notifier(&perf_event_nmi_notifier);
2083 2083
2084 pr_info("... version: %d\n", x86_pmu.version); 2084 pr_info("... version: %d\n", x86_pmu.version);
2085 pr_info("... bit width: %d\n", x86_pmu.counter_bits); 2085 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2086 pr_info("... generic counters: %d\n", x86_pmu.num_counters); 2086 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2087 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); 2087 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2088 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 2088 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2089 pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); 2089 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2090 pr_info("... counter mask: %016Lx\n", perf_counter_mask); 2090 pr_info("... event mask: %016Lx\n", perf_event_mask);
2091} 2091}
2092 2092
2093static inline void x86_pmu_read(struct perf_counter *counter) 2093static inline void x86_pmu_read(struct perf_event *event)
2094{ 2094{
2095 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 2095 x86_perf_event_update(event, &event->hw, event->hw.idx);
2096} 2096}
2097 2097
2098static const struct pmu pmu = { 2098static const struct pmu pmu = {
@@ -2102,14 +2102,14 @@ static const struct pmu pmu = {
2102 .unthrottle = x86_pmu_unthrottle, 2102 .unthrottle = x86_pmu_unthrottle,
2103}; 2103};
2104 2104
2105const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 2105const struct pmu *hw_perf_event_init(struct perf_event *event)
2106{ 2106{
2107 int err; 2107 int err;
2108 2108
2109 err = __hw_perf_counter_init(counter); 2109 err = __hw_perf_event_init(event);
2110 if (err) { 2110 if (err) {
2111 if (counter->destroy) 2111 if (event->destroy)
2112 counter->destroy(counter); 2112 event->destroy(event);
2113 return ERR_PTR(err); 2113 return ERR_PTR(err);
2114 } 2114 }
2115 2115
@@ -2292,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2292 return entry; 2292 return entry;
2293} 2293}
2294 2294
2295void hw_perf_counter_setup_online(int cpu) 2295void hw_perf_event_setup_online(int cpu)
2296{ 2296{
2297 init_debug_store_on_cpu(cpu); 2297 init_debug_store_on_cpu(cpu);
2298} 2298}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 392bea43b890..fab786f60ed6 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -20,7 +20,7 @@
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21 21
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/perf_counter.h> 23#include <asm/perf_event.h>
24 24
25struct nmi_watchdog_ctlblk { 25struct nmi_watchdog_ctlblk {
26 unsigned int cccr_msr; 26 unsigned int cccr_msr;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index d59fe323807e..681c3fda7391 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \
1021apicinterrupt SPURIOUS_APIC_VECTOR \ 1021apicinterrupt SPURIOUS_APIC_VECTOR \
1022 spurious_interrupt smp_spurious_interrupt 1022 spurious_interrupt smp_spurious_interrupt
1023 1023
1024#ifdef CONFIG_PERF_COUNTERS 1024#ifdef CONFIG_PERF_EVENTS
1025apicinterrupt LOCAL_PENDING_VECTOR \ 1025apicinterrupt LOCAL_PENDING_VECTOR \
1026 perf_pending_interrupt smp_perf_pending_interrupt 1026 perf_pending_interrupt smp_perf_pending_interrupt
1027#endif 1027#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 300883112e3d..40f30773fb29 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -208,7 +208,7 @@ static void __init apic_intr_init(void)
208 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 208 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
209 209
210 /* Performance monitoring interrupts: */ 210 /* Performance monitoring interrupts: */
211# ifdef CONFIG_PERF_COUNTERS 211# ifdef CONFIG_PERF_EVENTS
212 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); 212 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
213# endif 213# endif
214 214
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d51321ddafda..0157cd26d7cc 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -335,4 +335,4 @@ ENTRY(sys_call_table)
335 .long sys_preadv 335 .long sys_preadv
336 .long sys_pwritev 336 .long sys_pwritev
337 .long sys_rt_tgsigqueueinfo /* 335 */ 337 .long sys_rt_tgsigqueueinfo /* 335 */
338 .long sys_perf_counter_open 338 .long sys_perf_event_open
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 775a020990a5..82728f2c6d55 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -10,7 +10,7 @@
10#include <linux/bootmem.h> /* max_low_pfn */ 10#include <linux/bootmem.h> /* max_low_pfn */
11#include <linux/kprobes.h> /* __kprobes, ... */ 11#include <linux/kprobes.h> /* __kprobes, ... */
12#include <linux/mmiotrace.h> /* kmmio_handler, ... */ 12#include <linux/mmiotrace.h> /* kmmio_handler, ... */
13#include <linux/perf_counter.h> /* perf_swcounter_event */ 13#include <linux/perf_event.h> /* perf_sw_event */
14 14
15#include <asm/traps.h> /* dotraplinkage, ... */ 15#include <asm/traps.h> /* dotraplinkage, ... */
16#include <asm/pgalloc.h> /* pgd_*(), ... */ 16#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -1017,7 +1017,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1017 if (unlikely(error_code & PF_RSVD)) 1017 if (unlikely(error_code & PF_RSVD))
1018 pgtable_bad(regs, error_code, address); 1018 pgtable_bad(regs, error_code, address);
1019 1019
1020 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 1020 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
1021 1021
1022 /* 1022 /*
1023 * If we're in an interrupt, have no user context or are running 1023 * If we're in an interrupt, have no user context or are running
@@ -1114,11 +1114,11 @@ good_area:
1114 1114
1115 if (fault & VM_FAULT_MAJOR) { 1115 if (fault & VM_FAULT_MAJOR) {
1116 tsk->maj_flt++; 1116 tsk->maj_flt++;
1117 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1117 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
1118 regs, address); 1118 regs, address);
1119 } else { 1119 } else {
1120 tsk->min_flt++; 1120 tsk->min_flt++;
1121 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1121 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
1122 regs, address); 1122 regs, address);
1123 } 1123 }
1124 1124
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 4899215999de..8eb05878554c 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void)
234 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && 234 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
235 current_cpu_data.x86_model == 15) { 235 current_cpu_data.x86_model == 15) {
236 eax.split.version_id = 2; 236 eax.split.version_id = 2;
237 eax.split.num_counters = 2; 237 eax.split.num_events = 2;
238 eax.split.bit_width = 40; 238 eax.split.bit_width = 40;
239 } 239 }
240 240
241 num_counters = eax.split.num_counters; 241 num_counters = eax.split.num_events;
242 242
243 op_arch_perfmon_spec.num_counters = num_counters; 243 op_arch_perfmon_spec.num_counters = num_counters;
244 op_arch_perfmon_spec.num_controls = num_counters; 244 op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index b83776180c7f..7b8e75d16081 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -13,7 +13,7 @@
13#define OP_X86_MODEL_H 13#define OP_X86_MODEL_H
14 14
15#include <asm/types.h> 15#include <asm/types.h>
16#include <asm/perf_counter.h> 16#include <asm/perf_event.h>
17 17
18struct op_msr { 18struct op_msr {
19 unsigned long addr; 19 unsigned long addr;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 50eecfe1d724..44203ff599da 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -26,7 +26,7 @@
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/nmi.h> 27#include <linux/nmi.h>
28#include <linux/quotaops.h> 28#include <linux/quotaops.h>
29#include <linux/perf_counter.h> 29#include <linux/perf_event.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/suspend.h> 32#include <linux/suspend.h>
@@ -252,7 +252,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
252 struct pt_regs *regs = get_irq_regs(); 252 struct pt_regs *regs = get_irq_regs();
253 if (regs) 253 if (regs)
254 show_regs(regs); 254 show_regs(regs);
255 perf_counter_print_debug(); 255 perf_event_print_debug();
256} 256}
257static struct sysrq_key_op sysrq_showregs_op = { 257static struct sysrq_key_op sysrq_showregs_op = {
258 .handler = sysrq_handle_showregs, 258 .handler = sysrq_handle_showregs,
diff --git a/fs/exec.c b/fs/exec.c
index 172ceb6edde4..434dba778ccc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -33,7 +33,7 @@
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/perf_counter.h> 36#include <linux/perf_event.h>
37#include <linux/highmem.h> 37#include <linux/highmem.h>
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/key.h> 39#include <linux/key.h>
@@ -923,7 +923,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
923 task_lock(tsk); 923 task_lock(tsk);
924 strlcpy(tsk->comm, buf, sizeof(tsk->comm)); 924 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
925 task_unlock(tsk); 925 task_unlock(tsk);
926 perf_counter_comm(tsk); 926 perf_event_comm(tsk);
927} 927}
928 928
929int flush_old_exec(struct linux_binprm * bprm) 929int flush_old_exec(struct linux_binprm * bprm)
@@ -997,7 +997,7 @@ int flush_old_exec(struct linux_binprm * bprm)
997 * security domain: 997 * security domain:
998 */ 998 */
999 if (!get_dumpable(current->mm)) 999 if (!get_dumpable(current->mm))
1000 perf_counter_exit_task(current); 1000 perf_event_exit_task(current);
1001 1001
1002 /* An exec changes our domain. We are no longer part of the thread 1002 /* An exec changes our domain. We are no longer part of the thread
1003 group */ 1003 group */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 1125e5a1ee5d..d76b66acea95 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
620 620
621#define __NR_rt_tgsigqueueinfo 240 621#define __NR_rt_tgsigqueueinfo 240
622__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 622__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
623#define __NR_perf_counter_open 241 623#define __NR_perf_event_open 241
624__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) 624__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
625 625
626#undef __NR_syscalls 626#undef __NR_syscalls
627#define __NR_syscalls 242 627#define __NR_syscalls 242
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9e7f2e8fc66e..21a6f5d9af22 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -106,13 +106,13 @@ extern struct group_info init_groups;
106 106
107extern struct cred init_cred; 107extern struct cred init_cred;
108 108
109#ifdef CONFIG_PERF_COUNTERS 109#ifdef CONFIG_PERF_EVENTS
110# define INIT_PERF_COUNTERS(tsk) \ 110# define INIT_PERF_EVENTS(tsk) \
111 .perf_counter_mutex = \ 111 .perf_event_mutex = \
112 __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ 112 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
113 .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), 113 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
114#else 114#else
115# define INIT_PERF_COUNTERS(tsk) 115# define INIT_PERF_EVENTS(tsk)
116#endif 116#endif
117 117
118/* 118/*
@@ -178,7 +178,7 @@ extern struct cred init_cred;
178 }, \ 178 }, \
179 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 179 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
180 INIT_IDS \ 180 INIT_IDS \
181 INIT_PERF_COUNTERS(tsk) \ 181 INIT_PERF_EVENTS(tsk) \
182 INIT_TRACE_IRQFLAGS \ 182 INIT_TRACE_IRQFLAGS \
183 INIT_LOCKDEP \ 183 INIT_LOCKDEP \
184 INIT_FTRACE_GRAPH \ 184 INIT_FTRACE_GRAPH \
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 740caad09a44..368bd70f1d2d 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -1,5 +1,9 @@
1/* 1/*
2 * Performance counters: 2 * NOTE: this file will be removed in a future kernel release, it is
3 * provided as a courtesy copy of user-space code that relies on the
4 * old (pre-rename) symbols and constants.
5 *
6 * Performance events:
3 * 7 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar 9 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
@@ -131,19 +135,19 @@ enum perf_counter_sample_format {
131 * as specified by attr.read_format: 135 * as specified by attr.read_format:
132 * 136 *
133 * struct read_format { 137 * struct read_format {
134 * { u64 value; 138 * { u64 value;
135 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 139 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
136 * { u64 time_running; } && PERF_FORMAT_RUNNING 140 * { u64 time_running; } && PERF_FORMAT_RUNNING
137 * { u64 id; } && PERF_FORMAT_ID 141 * { u64 id; } && PERF_FORMAT_ID
138 * } && !PERF_FORMAT_GROUP 142 * } && !PERF_FORMAT_GROUP
139 * 143 *
140 * { u64 nr; 144 * { u64 nr;
141 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 145 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
142 * { u64 time_running; } && PERF_FORMAT_RUNNING 146 * { u64 time_running; } && PERF_FORMAT_RUNNING
143 * { u64 value; 147 * { u64 value;
144 * { u64 id; } && PERF_FORMAT_ID 148 * { u64 id; } && PERF_FORMAT_ID
145 * } cntr[nr]; 149 * } cntr[nr];
146 * } && PERF_FORMAT_GROUP 150 * } && PERF_FORMAT_GROUP
147 * }; 151 * };
148 */ 152 */
149enum perf_counter_read_format { 153enum perf_counter_read_format {
@@ -314,9 +318,9 @@ enum perf_event_type {
314 318
315 /* 319 /*
316 * struct { 320 * struct {
317 * struct perf_event_header header; 321 * struct perf_event_header header;
318 * u64 id; 322 * u64 id;
319 * u64 lost; 323 * u64 lost;
320 * }; 324 * };
321 */ 325 */
322 PERF_EVENT_LOST = 2, 326 PERF_EVENT_LOST = 2,
@@ -364,10 +368,10 @@ enum perf_event_type {
364 368
365 /* 369 /*
366 * struct { 370 * struct {
367 * struct perf_event_header header; 371 * struct perf_event_header header;
368 * u32 pid, tid; 372 * u32 pid, tid;
369 * 373 *
370 * struct read_format values; 374 * struct read_format values;
371 * }; 375 * };
372 */ 376 */
373 PERF_EVENT_READ = 8, 377 PERF_EVENT_READ = 8,
@@ -383,23 +387,23 @@ enum perf_event_type {
383 * { u64 id; } && PERF_SAMPLE_ID 387 * { u64 id; } && PERF_SAMPLE_ID
384 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 388 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
385 * { u32 cpu, res; } && PERF_SAMPLE_CPU 389 * { u32 cpu, res; } && PERF_SAMPLE_CPU
386 * { u64 period; } && PERF_SAMPLE_PERIOD 390 * { u64 period; } && PERF_SAMPLE_PERIOD
387 * 391 *
388 * { struct read_format values; } && PERF_SAMPLE_READ 392 * { struct read_format values; } && PERF_SAMPLE_READ
389 * 393 *
390 * { u64 nr, 394 * { u64 nr,
391 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 395 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
392 * 396 *
393 * # 397 * #
394 * # The RAW record below is opaque data wrt the ABI 398 * # The RAW record below is opaque data wrt the ABI
395 * # 399 * #
396 * # That is, the ABI doesn't make any promises wrt to 400 * # That is, the ABI doesn't make any promises wrt to
397 * # the stability of its content, it may vary depending 401 * # the stability of its content, it may vary depending
398 * # on event, hardware, kernel version and phase of 402 * # on event, hardware, kernel version and phase of
399 * # the moon. 403 * # the moon.
400 * # 404 * #
401 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 405 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
402 * # 406 * #
403 * 407 *
404 * { u32 size; 408 * { u32 size;
405 * char data[size];}&& PERF_SAMPLE_RAW 409 * char data[size];}&& PERF_SAMPLE_RAW
@@ -422,437 +426,16 @@ enum perf_callchain_context {
422 PERF_CONTEXT_MAX = (__u64)-4095, 426 PERF_CONTEXT_MAX = (__u64)-4095,
423}; 427};
424 428
425#define PERF_FLAG_FD_NO_GROUP (1U << 0) 429#define PERF_FLAG_FD_NO_GROUP (1U << 0)
426#define PERF_FLAG_FD_OUTPUT (1U << 1) 430#define PERF_FLAG_FD_OUTPUT (1U << 1)
427 431
428#ifdef __KERNEL__
429/* 432/*
430 * Kernel-internal data types and definitions: 433 * In case some app still references the old symbols:
431 */
432
433#ifdef CONFIG_PERF_COUNTERS
434# include <asm/perf_counter.h>
435#endif
436
437#include <linux/list.h>
438#include <linux/mutex.h>
439#include <linux/rculist.h>
440#include <linux/rcupdate.h>
441#include <linux/spinlock.h>
442#include <linux/hrtimer.h>
443#include <linux/fs.h>
444#include <linux/pid_namespace.h>
445#include <asm/atomic.h>
446
447#define PERF_MAX_STACK_DEPTH 255
448
449struct perf_callchain_entry {
450 __u64 nr;
451 __u64 ip[PERF_MAX_STACK_DEPTH];
452};
453
454struct perf_raw_record {
455 u32 size;
456 void *data;
457};
458
459struct task_struct;
460
461/**
462 * struct hw_perf_counter - performance counter hardware details:
463 */ 434 */
464struct hw_perf_counter {
465#ifdef CONFIG_PERF_COUNTERS
466 union {
467 struct { /* hardware */
468 u64 config;
469 unsigned long config_base;
470 unsigned long counter_base;
471 int idx;
472 };
473 union { /* software */
474 atomic64_t count;
475 struct hrtimer hrtimer;
476 };
477 };
478 atomic64_t prev_count;
479 u64 sample_period;
480 u64 last_period;
481 atomic64_t period_left;
482 u64 interrupts;
483
484 u64 freq_count;
485 u64 freq_interrupts;
486 u64 freq_stamp;
487#endif
488};
489
490struct perf_counter;
491
492/**
493 * struct pmu - generic performance monitoring unit
494 */
495struct pmu {
496 int (*enable) (struct perf_counter *counter);
497 void (*disable) (struct perf_counter *counter);
498 void (*read) (struct perf_counter *counter);
499 void (*unthrottle) (struct perf_counter *counter);
500};
501
502/**
503 * enum perf_counter_active_state - the states of a counter
504 */
505enum perf_counter_active_state {
506 PERF_COUNTER_STATE_ERROR = -2,
507 PERF_COUNTER_STATE_OFF = -1,
508 PERF_COUNTER_STATE_INACTIVE = 0,
509 PERF_COUNTER_STATE_ACTIVE = 1,
510};
511
512struct file;
513 435
514struct perf_mmap_data { 436#define __NR_perf_counter_open __NR_perf_event_open
515 struct rcu_head rcu_head;
516 int nr_pages; /* nr of data pages */
517 int writable; /* are we writable */
518 int nr_locked; /* nr pages mlocked */
519 437
520 atomic_t poll; /* POLL_ for wakeups */ 438#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
521 atomic_t events; /* event limit */ 439#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
522 440
523 atomic_long_t head; /* write position */
524 atomic_long_t done_head; /* completed head */
525
526 atomic_t lock; /* concurrent writes */
527 atomic_t wakeup; /* needs a wakeup */
528 atomic_t lost; /* nr records lost */
529
530 long watermark; /* wakeup watermark */
531
532 struct perf_counter_mmap_page *user_page;
533 void *data_pages[0];
534};
535
536struct perf_pending_entry {
537 struct perf_pending_entry *next;
538 void (*func)(struct perf_pending_entry *);
539};
540
541/**
542 * struct perf_counter - performance counter kernel representation:
543 */
544struct perf_counter {
545#ifdef CONFIG_PERF_COUNTERS
546 struct list_head list_entry;
547 struct list_head event_entry;
548 struct list_head sibling_list;
549 int nr_siblings;
550 struct perf_counter *group_leader;
551 struct perf_counter *output;
552 const struct pmu *pmu;
553
554 enum perf_counter_active_state state;
555 atomic64_t count;
556
557 /*
558 * These are the total time in nanoseconds that the counter
559 * has been enabled (i.e. eligible to run, and the task has
560 * been scheduled in, if this is a per-task counter)
561 * and running (scheduled onto the CPU), respectively.
562 *
563 * They are computed from tstamp_enabled, tstamp_running and
564 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
565 */
566 u64 total_time_enabled;
567 u64 total_time_running;
568
569 /*
570 * These are timestamps used for computing total_time_enabled
571 * and total_time_running when the counter is in INACTIVE or
572 * ACTIVE state, measured in nanoseconds from an arbitrary point
573 * in time.
574 * tstamp_enabled: the notional time when the counter was enabled
575 * tstamp_running: the notional time when the counter was scheduled on
576 * tstamp_stopped: in INACTIVE state, the notional time when the
577 * counter was scheduled off.
578 */
579 u64 tstamp_enabled;
580 u64 tstamp_running;
581 u64 tstamp_stopped;
582
583 struct perf_counter_attr attr;
584 struct hw_perf_counter hw;
585
586 struct perf_counter_context *ctx;
587 struct file *filp;
588
589 /*
590 * These accumulate total time (in nanoseconds) that children
591 * counters have been enabled and running, respectively.
592 */
593 atomic64_t child_total_time_enabled;
594 atomic64_t child_total_time_running;
595
596 /*
597 * Protect attach/detach and child_list:
598 */
599 struct mutex child_mutex;
600 struct list_head child_list;
601 struct perf_counter *parent;
602
603 int oncpu;
604 int cpu;
605
606 struct list_head owner_entry;
607 struct task_struct *owner;
608
609 /* mmap bits */
610 struct mutex mmap_mutex;
611 atomic_t mmap_count;
612 struct perf_mmap_data *data;
613
614 /* poll related */
615 wait_queue_head_t waitq;
616 struct fasync_struct *fasync;
617
618 /* delayed work for NMIs and such */
619 int pending_wakeup;
620 int pending_kill;
621 int pending_disable;
622 struct perf_pending_entry pending;
623
624 atomic_t event_limit;
625
626 void (*destroy)(struct perf_counter *);
627 struct rcu_head rcu_head;
628
629 struct pid_namespace *ns;
630 u64 id;
631#endif
632};
633
634/**
635 * struct perf_counter_context - counter context structure
636 *
637 * Used as a container for task counters and CPU counters as well:
638 */
639struct perf_counter_context {
640 /*
641 * Protect the states of the counters in the list,
642 * nr_active, and the list:
643 */
644 spinlock_t lock;
645 /*
646 * Protect the list of counters. Locking either mutex or lock
647 * is sufficient to ensure the list doesn't change; to change
648 * the list you need to lock both the mutex and the spinlock.
649 */
650 struct mutex mutex;
651
652 struct list_head counter_list;
653 struct list_head event_list;
654 int nr_counters;
655 int nr_active;
656 int is_active;
657 int nr_stat;
658 atomic_t refcount;
659 struct task_struct *task;
660
661 /*
662 * Context clock, runs when context enabled.
663 */
664 u64 time;
665 u64 timestamp;
666
667 /*
668 * These fields let us detect when two contexts have both
669 * been cloned (inherited) from a common ancestor.
670 */
671 struct perf_counter_context *parent_ctx;
672 u64 parent_gen;
673 u64 generation;
674 int pin_count;
675 struct rcu_head rcu_head;
676};
677
678/**
679 * struct perf_counter_cpu_context - per cpu counter context structure
680 */
681struct perf_cpu_context {
682 struct perf_counter_context ctx;
683 struct perf_counter_context *task_ctx;
684 int active_oncpu;
685 int max_pertask;
686 int exclusive;
687
688 /*
689 * Recursion avoidance:
690 *
691 * task, softirq, irq, nmi context
692 */
693 int recursion[4];
694};
695
696struct perf_output_handle {
697 struct perf_counter *counter;
698 struct perf_mmap_data *data;
699 unsigned long head;
700 unsigned long offset;
701 int nmi;
702 int sample;
703 int locked;
704 unsigned long flags;
705};
706
707#ifdef CONFIG_PERF_COUNTERS
708
709/*
710 * Set by architecture code:
711 */
712extern int perf_max_counters;
713
714extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
715
716extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
717extern void perf_counter_task_sched_out(struct task_struct *task,
718 struct task_struct *next, int cpu);
719extern void perf_counter_task_tick(struct task_struct *task, int cpu);
720extern int perf_counter_init_task(struct task_struct *child);
721extern void perf_counter_exit_task(struct task_struct *child);
722extern void perf_counter_free_task(struct task_struct *task);
723extern void set_perf_counter_pending(void);
724extern void perf_counter_do_pending(void);
725extern void perf_counter_print_debug(void);
726extern void __perf_disable(void);
727extern bool __perf_enable(void);
728extern void perf_disable(void);
729extern void perf_enable(void);
730extern int perf_counter_task_disable(void);
731extern int perf_counter_task_enable(void);
732extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
733 struct perf_cpu_context *cpuctx,
734 struct perf_counter_context *ctx, int cpu);
735extern void perf_counter_update_userpage(struct perf_counter *counter);
736
737struct perf_sample_data {
738 u64 type;
739
740 u64 ip;
741 struct {
742 u32 pid;
743 u32 tid;
744 } tid_entry;
745 u64 time;
746 u64 addr;
747 u64 id;
748 u64 stream_id;
749 struct {
750 u32 cpu;
751 u32 reserved;
752 } cpu_entry;
753 u64 period;
754 struct perf_callchain_entry *callchain;
755 struct perf_raw_record *raw;
756};
757
758extern void perf_output_sample(struct perf_output_handle *handle,
759 struct perf_event_header *header,
760 struct perf_sample_data *data,
761 struct perf_counter *counter);
762extern void perf_prepare_sample(struct perf_event_header *header,
763 struct perf_sample_data *data,
764 struct perf_counter *counter,
765 struct pt_regs *regs);
766
767extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
768 struct perf_sample_data *data,
769 struct pt_regs *regs);
770
771/*
772 * Return 1 for a software counter, 0 for a hardware counter
773 */
774static inline int is_software_counter(struct perf_counter *counter)
775{
776 return (counter->attr.type != PERF_TYPE_RAW) &&
777 (counter->attr.type != PERF_TYPE_HARDWARE) &&
778 (counter->attr.type != PERF_TYPE_HW_CACHE);
779}
780
781extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
782
783extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
784
785static inline void
786perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
787{
788 if (atomic_read(&perf_swcounter_enabled[event]))
789 __perf_swcounter_event(event, nr, nmi, regs, addr);
790}
791
792extern void __perf_counter_mmap(struct vm_area_struct *vma);
793
794static inline void perf_counter_mmap(struct vm_area_struct *vma)
795{
796 if (vma->vm_flags & VM_EXEC)
797 __perf_counter_mmap(vma);
798}
799
800extern void perf_counter_comm(struct task_struct *tsk);
801extern void perf_counter_fork(struct task_struct *tsk);
802
803extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
804
805extern int sysctl_perf_counter_paranoid;
806extern int sysctl_perf_counter_mlock;
807extern int sysctl_perf_counter_sample_rate;
808
809extern void perf_counter_init(void);
810extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
811 void *record, int entry_size);
812
813#ifndef perf_misc_flags
814#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
815 PERF_EVENT_MISC_KERNEL)
816#define perf_instruction_pointer(regs) instruction_pointer(regs)
817#endif
818
819extern int perf_output_begin(struct perf_output_handle *handle,
820 struct perf_counter *counter, unsigned int size,
821 int nmi, int sample);
822extern void perf_output_end(struct perf_output_handle *handle);
823extern void perf_output_copy(struct perf_output_handle *handle,
824 const void *buf, unsigned int len);
825#else
826static inline void
827perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
828static inline void
829perf_counter_task_sched_out(struct task_struct *task,
830 struct task_struct *next, int cpu) { }
831static inline void
832perf_counter_task_tick(struct task_struct *task, int cpu) { }
833static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
834static inline void perf_counter_exit_task(struct task_struct *child) { }
835static inline void perf_counter_free_task(struct task_struct *task) { }
836static inline void perf_counter_do_pending(void) { }
837static inline void perf_counter_print_debug(void) { }
838static inline void perf_disable(void) { }
839static inline void perf_enable(void) { }
840static inline int perf_counter_task_disable(void) { return -EINVAL; }
841static inline int perf_counter_task_enable(void) { return -EINVAL; }
842
843static inline void
844perf_swcounter_event(u32 event, u64 nr, int nmi,
845 struct pt_regs *regs, u64 addr) { }
846
847static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
848static inline void perf_counter_comm(struct task_struct *tsk) { }
849static inline void perf_counter_fork(struct task_struct *tsk) { }
850static inline void perf_counter_init(void) { }
851
852#endif
853
854#define perf_output_put(handle, x) \
855 perf_output_copy((handle), &(x), sizeof(x))
856
857#endif /* __KERNEL__ */
858#endif /* _LINUX_PERF_COUNTER_H */ 441#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
new file mode 100644
index 000000000000..acefaf71e6dd
--- /dev/null
+++ b/include/linux/perf_event.h
@@ -0,0 +1,858 @@
1/*
2 * Performance events:
3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7 *
8 * Data type definitions, declarations, prototypes.
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
16
17#include <linux/types.h>
18#include <linux/ioctl.h>
19#include <asm/byteorder.h>
20
21/*
22 * User-space ABI bits:
23 */
24
25/*
26 * attr.type
27 */
28enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34
35 PERF_TYPE_MAX, /* non-ABI */
36};
37
38/*
39 * Generalized performance event event_id types, used by the
40 * attr.event_id parameter of the sys_perf_event_open()
41 * syscall:
42 */
43enum perf_hw_id {
44 /*
45 * Common hardware events, generalized by the kernel:
46 */
47 PERF_COUNT_HW_CPU_CYCLES = 0,
48 PERF_COUNT_HW_INSTRUCTIONS = 1,
49 PERF_COUNT_HW_CACHE_REFERENCES = 2,
50 PERF_COUNT_HW_CACHE_MISSES = 3,
51 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
52 PERF_COUNT_HW_BRANCH_MISSES = 5,
53 PERF_COUNT_HW_BUS_CYCLES = 6,
54
55 PERF_COUNT_HW_MAX, /* non-ABI */
56};
57
58/*
59 * Generalized hardware cache events:
60 *
61 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
62 * { read, write, prefetch } x
63 * { accesses, misses }
64 */
65enum perf_hw_cache_id {
66 PERF_COUNT_HW_CACHE_L1D = 0,
67 PERF_COUNT_HW_CACHE_L1I = 1,
68 PERF_COUNT_HW_CACHE_LL = 2,
69 PERF_COUNT_HW_CACHE_DTLB = 3,
70 PERF_COUNT_HW_CACHE_ITLB = 4,
71 PERF_COUNT_HW_CACHE_BPU = 5,
72
73 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
74};
75
76enum perf_hw_cache_op_id {
77 PERF_COUNT_HW_CACHE_OP_READ = 0,
78 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
79 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
80
81 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
82};
83
84enum perf_hw_cache_op_result_id {
85 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
86 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
87
88 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
89};
90
91/*
92 * Special "software" events provided by the kernel, even if the hardware
93 * does not support performance events. These events measure various
94 * physical and sw events of the kernel (and allow the profiling of them as
95 * well):
96 */
97enum perf_sw_ids {
98 PERF_COUNT_SW_CPU_CLOCK = 0,
99 PERF_COUNT_SW_TASK_CLOCK = 1,
100 PERF_COUNT_SW_PAGE_FAULTS = 2,
101 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
102 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
103 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
104 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
105
106 PERF_COUNT_SW_MAX, /* non-ABI */
107};
108
109/*
110 * Bits that can be set in attr.sample_type to request information
111 * in the overflow packets.
112 */
113enum perf_event_sample_format {
114 PERF_SAMPLE_IP = 1U << 0,
115 PERF_SAMPLE_TID = 1U << 1,
116 PERF_SAMPLE_TIME = 1U << 2,
117 PERF_SAMPLE_ADDR = 1U << 3,
118 PERF_SAMPLE_READ = 1U << 4,
119 PERF_SAMPLE_CALLCHAIN = 1U << 5,
120 PERF_SAMPLE_ID = 1U << 6,
121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8,
123 PERF_SAMPLE_STREAM_ID = 1U << 9,
124 PERF_SAMPLE_RAW = 1U << 10,
125
126 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
127};
128
129/*
130 * The format of the data returned by read() on a perf event fd,
131 * as specified by attr.read_format:
132 *
133 * struct read_format {
134 * { u64 value;
135 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
136 * { u64 time_running; } && PERF_FORMAT_RUNNING
137 * { u64 id; } && PERF_FORMAT_ID
138 * } && !PERF_FORMAT_GROUP
139 *
140 * { u64 nr;
141 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
142 * { u64 time_running; } && PERF_FORMAT_RUNNING
143 * { u64 value;
144 * { u64 id; } && PERF_FORMAT_ID
145 * } cntr[nr];
146 * } && PERF_FORMAT_GROUP
147 * };
148 */
149enum perf_event_read_format {
150 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
151 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
152 PERF_FORMAT_ID = 1U << 2,
153 PERF_FORMAT_GROUP = 1U << 3,
154
155 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
156};
157
158#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
159
160/*
161 * Hardware event_id to monitor via a performance monitoring event:
162 */
163struct perf_event_attr {
164
165 /*
166 * Major type: hardware/software/tracepoint/etc.
167 */
168 __u32 type;
169
170 /*
171 * Size of the attr structure, for fwd/bwd compat.
172 */
173 __u32 size;
174
175 /*
176 * Type specific configuration information.
177 */
178 __u64 config;
179
180 union {
181 __u64 sample_period;
182 __u64 sample_freq;
183 };
184
185 __u64 sample_type;
186 __u64 read_format;
187
188 __u64 disabled : 1, /* off by default */
189 inherit : 1, /* children inherit it */
190 pinned : 1, /* must always be on PMU */
191 exclusive : 1, /* only group on PMU */
192 exclude_user : 1, /* don't count user */
193 exclude_kernel : 1, /* ditto kernel */
194 exclude_hv : 1, /* ditto hypervisor */
195 exclude_idle : 1, /* don't count when idle */
196 mmap : 1, /* include mmap data */
197 comm : 1, /* include comm data */
198 freq : 1, /* use freq, not period */
199 inherit_stat : 1, /* per task counts */
200 enable_on_exec : 1, /* next exec enables */
201 task : 1, /* trace fork/exit */
202 watermark : 1, /* wakeup_watermark */
203
204 __reserved_1 : 49;
205
206 union {
207 __u32 wakeup_events; /* wakeup every n events */
208 __u32 wakeup_watermark; /* bytes before wakeup */
209 };
210 __u32 __reserved_2;
211
212 __u64 __reserved_3;
213};
214
215/*
216 * Ioctls that can be done on a perf event fd:
217 */
218#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
219#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
220#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
221#define PERF_EVENT_IOC_RESET _IO ('$', 3)
222#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
223#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
224
225enum perf_event_ioc_flags {
226 PERF_IOC_FLAG_GROUP = 1U << 0,
227};
228
229/*
230 * Structure of the page that can be mapped via mmap
231 */
232struct perf_event_mmap_page {
233 __u32 version; /* version number of this structure */
234 __u32 compat_version; /* lowest version this is compat with */
235
236 /*
237 * Bits needed to read the hw events in user-space.
238 *
239 * u32 seq;
240 * s64 count;
241 *
242 * do {
243 * seq = pc->lock;
244 *
245 * barrier()
246 * if (pc->index) {
247 * count = pmc_read(pc->index - 1);
248 * count += pc->offset;
249 * } else
250 * goto regular_read;
251 *
252 * barrier();
253 * } while (pc->lock != seq);
254 *
255 * NOTE: for obvious reason this only works on self-monitoring
256 * processes.
257 */
258 __u32 lock; /* seqlock for synchronization */
259 __u32 index; /* hardware event identifier */
260 __s64 offset; /* add to hardware event value */
261 __u64 time_enabled; /* time event active */
262 __u64 time_running; /* time event on cpu */
263
264 /*
265 * Hole for extension of the self monitor capabilities
266 */
267
268 __u64 __reserved[123]; /* align to 1k */
269
270 /*
271 * Control data for the mmap() data buffer.
272 *
273 * User-space reading the @data_head value should issue an rmb(), on
274 * SMP capable platforms, after reading this value -- see
275 * perf_event_wakeup().
276 *
277 * When the mapping is PROT_WRITE the @data_tail value should be
278 * written by userspace to reflect the last read data. In this case
279 * the kernel will not over-write unread data.
280 */
281 __u64 data_head; /* head in the data section */
282 __u64 data_tail; /* user-space written tail */
283};
284
285#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
286#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
287#define PERF_RECORD_MISC_KERNEL (1 << 0)
288#define PERF_RECORD_MISC_USER (2 << 0)
289#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
290
291struct perf_event_header {
292 __u32 type;
293 __u16 misc;
294 __u16 size;
295};
296
297enum perf_event_type {
298
299 /*
300 * The MMAP events record the PROT_EXEC mappings so that we can
301 * correlate userspace IPs to code. They have the following structure:
302 *
303 * struct {
304 * struct perf_event_header header;
305 *
306 * u32 pid, tid;
307 * u64 addr;
308 * u64 len;
309 * u64 pgoff;
310 * char filename[];
311 * };
312 */
313 PERF_RECORD_MMAP = 1,
314
315 /*
316 * struct {
317 * struct perf_event_header header;
318 * u64 id;
319 * u64 lost;
320 * };
321 */
322 PERF_RECORD_LOST = 2,
323
324 /*
325 * struct {
326 * struct perf_event_header header;
327 *
328 * u32 pid, tid;
329 * char comm[];
330 * };
331 */
332 PERF_RECORD_COMM = 3,
333
334 /*
335 * struct {
336 * struct perf_event_header header;
337 * u32 pid, ppid;
338 * u32 tid, ptid;
339 * u64 time;
340 * };
341 */
342 PERF_RECORD_EXIT = 4,
343
344 /*
345 * struct {
346 * struct perf_event_header header;
347 * u64 time;
348 * u64 id;
349 * u64 stream_id;
350 * };
351 */
352 PERF_RECORD_THROTTLE = 5,
353 PERF_RECORD_UNTHROTTLE = 6,
354
355 /*
356 * struct {
357 * struct perf_event_header header;
358 * u32 pid, ppid;
359 * u32 tid, ptid;
360 * { u64 time; } && PERF_SAMPLE_TIME
361 * };
362 */
363 PERF_RECORD_FORK = 7,
364
365 /*
366 * struct {
367 * struct perf_event_header header;
368 * u32 pid, tid;
369 *
370 * struct read_format values;
371 * };
372 */
373 PERF_RECORD_READ = 8,
374
375 /*
376 * struct {
377 * struct perf_event_header header;
378 *
379 * { u64 ip; } && PERF_SAMPLE_IP
380 * { u32 pid, tid; } && PERF_SAMPLE_TID
381 * { u64 time; } && PERF_SAMPLE_TIME
382 * { u64 addr; } && PERF_SAMPLE_ADDR
383 * { u64 id; } && PERF_SAMPLE_ID
384 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
385 * { u32 cpu, res; } && PERF_SAMPLE_CPU
386 * { u64 period; } && PERF_SAMPLE_PERIOD
387 *
388 * { struct read_format values; } && PERF_SAMPLE_READ
389 *
390 * { u64 nr,
391 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
392 *
393 * #
394 * # The RAW record below is opaque data wrt the ABI
395 * #
396 * # That is, the ABI doesn't make any promises wrt to
397 * # the stability of its content, it may vary depending
398 * # on event, hardware, kernel version and phase of
399 * # the moon.
400 * #
401 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
402 * #
403 *
404 * { u32 size;
405 * char data[size];}&& PERF_SAMPLE_RAW
406 * };
407 */
408 PERF_RECORD_SAMPLE = 9,
409
410 PERF_RECORD_MAX, /* non-ABI */
411};
412
413enum perf_callchain_context {
414 PERF_CONTEXT_HV = (__u64)-32,
415 PERF_CONTEXT_KERNEL = (__u64)-128,
416 PERF_CONTEXT_USER = (__u64)-512,
417
418 PERF_CONTEXT_GUEST = (__u64)-2048,
419 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
420 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
421
422 PERF_CONTEXT_MAX = (__u64)-4095,
423};
424
425#define PERF_FLAG_FD_NO_GROUP (1U << 0)
426#define PERF_FLAG_FD_OUTPUT (1U << 1)
427
428#ifdef __KERNEL__
429/*
430 * Kernel-internal data types and definitions:
431 */
432
433#ifdef CONFIG_PERF_EVENTS
434# include <asm/perf_event.h>
435#endif
436
437#include <linux/list.h>
438#include <linux/mutex.h>
439#include <linux/rculist.h>
440#include <linux/rcupdate.h>
441#include <linux/spinlock.h>
442#include <linux/hrtimer.h>
443#include <linux/fs.h>
444#include <linux/pid_namespace.h>
445#include <asm/atomic.h>
446
447#define PERF_MAX_STACK_DEPTH 255
448
449struct perf_callchain_entry {
450 __u64 nr;
451 __u64 ip[PERF_MAX_STACK_DEPTH];
452};
453
454struct perf_raw_record {
455 u32 size;
456 void *data;
457};
458
459struct task_struct;
460
461/**
462 * struct hw_perf_event - performance event hardware details:
463 */
464struct hw_perf_event {
465#ifdef CONFIG_PERF_EVENTS
466 union {
467 struct { /* hardware */
468 u64 config;
469 unsigned long config_base;
470 unsigned long event_base;
471 int idx;
472 };
473 union { /* software */
474 atomic64_t count;
475 struct hrtimer hrtimer;
476 };
477 };
478 atomic64_t prev_count;
479 u64 sample_period;
480 u64 last_period;
481 atomic64_t period_left;
482 u64 interrupts;
483
484 u64 freq_count;
485 u64 freq_interrupts;
486 u64 freq_stamp;
487#endif
488};
489
490struct perf_event;
491
492/**
493 * struct pmu - generic performance monitoring unit
494 */
495struct pmu {
496 int (*enable) (struct perf_event *event);
497 void (*disable) (struct perf_event *event);
498 void (*read) (struct perf_event *event);
499 void (*unthrottle) (struct perf_event *event);
500};
501
502/**
503 * enum perf_event_active_state - the states of a event
504 */
505enum perf_event_active_state {
506 PERF_EVENT_STATE_ERROR = -2,
507 PERF_EVENT_STATE_OFF = -1,
508 PERF_EVENT_STATE_INACTIVE = 0,
509 PERF_EVENT_STATE_ACTIVE = 1,
510};
511
512struct file;
513
514struct perf_mmap_data {
515 struct rcu_head rcu_head;
516 int nr_pages; /* nr of data pages */
517 int writable; /* are we writable */
518 int nr_locked; /* nr pages mlocked */
519
520 atomic_t poll; /* POLL_ for wakeups */
521 atomic_t events; /* event_id limit */
522
523 atomic_long_t head; /* write position */
524 atomic_long_t done_head; /* completed head */
525
526 atomic_t lock; /* concurrent writes */
527 atomic_t wakeup; /* needs a wakeup */
528 atomic_t lost; /* nr records lost */
529
530 long watermark; /* wakeup watermark */
531
532 struct perf_event_mmap_page *user_page;
533 void *data_pages[0];
534};
535
536struct perf_pending_entry {
537 struct perf_pending_entry *next;
538 void (*func)(struct perf_pending_entry *);
539};
540
541/**
542 * struct perf_event - performance event kernel representation:
543 */
544struct perf_event {
545#ifdef CONFIG_PERF_EVENTS
546 struct list_head group_entry;
547 struct list_head event_entry;
548 struct list_head sibling_list;
549 int nr_siblings;
550 struct perf_event *group_leader;
551 struct perf_event *output;
552 const struct pmu *pmu;
553
554 enum perf_event_active_state state;
555 atomic64_t count;
556
557 /*
558 * These are the total time in nanoseconds that the event
559 * has been enabled (i.e. eligible to run, and the task has
560 * been scheduled in, if this is a per-task event)
561 * and running (scheduled onto the CPU), respectively.
562 *
563 * They are computed from tstamp_enabled, tstamp_running and
564 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
565 */
566 u64 total_time_enabled;
567 u64 total_time_running;
568
569 /*
570 * These are timestamps used for computing total_time_enabled
571 * and total_time_running when the event is in INACTIVE or
572 * ACTIVE state, measured in nanoseconds from an arbitrary point
573 * in time.
574 * tstamp_enabled: the notional time when the event was enabled
575 * tstamp_running: the notional time when the event was scheduled on
576 * tstamp_stopped: in INACTIVE state, the notional time when the
577 * event was scheduled off.
578 */
579 u64 tstamp_enabled;
580 u64 tstamp_running;
581 u64 tstamp_stopped;
582
583 struct perf_event_attr attr;
584 struct hw_perf_event hw;
585
586 struct perf_event_context *ctx;
587 struct file *filp;
588
589 /*
590 * These accumulate total time (in nanoseconds) that children
591 * events have been enabled and running, respectively.
592 */
593 atomic64_t child_total_time_enabled;
594 atomic64_t child_total_time_running;
595
596 /*
597 * Protect attach/detach and child_list:
598 */
599 struct mutex child_mutex;
600 struct list_head child_list;
601 struct perf_event *parent;
602
603 int oncpu;
604 int cpu;
605
606 struct list_head owner_entry;
607 struct task_struct *owner;
608
609 /* mmap bits */
610 struct mutex mmap_mutex;
611 atomic_t mmap_count;
612 struct perf_mmap_data *data;
613
614 /* poll related */
615 wait_queue_head_t waitq;
616 struct fasync_struct *fasync;
617
618 /* delayed work for NMIs and such */
619 int pending_wakeup;
620 int pending_kill;
621 int pending_disable;
622 struct perf_pending_entry pending;
623
624 atomic_t event_limit;
625
626 void (*destroy)(struct perf_event *);
627 struct rcu_head rcu_head;
628
629 struct pid_namespace *ns;
630 u64 id;
631#endif
632};
633
634/**
635 * struct perf_event_context - event context structure
636 *
637 * Used as a container for task events and CPU events as well:
638 */
639struct perf_event_context {
640 /*
641 * Protect the states of the events in the list,
642 * nr_active, and the list:
643 */
644 spinlock_t lock;
645 /*
646 * Protect the list of events. Locking either mutex or lock
647 * is sufficient to ensure the list doesn't change; to change
648 * the list you need to lock both the mutex and the spinlock.
649 */
650 struct mutex mutex;
651
652 struct list_head group_list;
653 struct list_head event_list;
654 int nr_events;
655 int nr_active;
656 int is_active;
657 int nr_stat;
658 atomic_t refcount;
659 struct task_struct *task;
660
661 /*
662 * Context clock, runs when context enabled.
663 */
664 u64 time;
665 u64 timestamp;
666
667 /*
668 * These fields let us detect when two contexts have both
669 * been cloned (inherited) from a common ancestor.
670 */
671 struct perf_event_context *parent_ctx;
672 u64 parent_gen;
673 u64 generation;
674 int pin_count;
675 struct rcu_head rcu_head;
676};
677
678/**
679 * struct perf_event_cpu_context - per cpu event context structure
680 */
681struct perf_cpu_context {
682 struct perf_event_context ctx;
683 struct perf_event_context *task_ctx;
684 int active_oncpu;
685 int max_pertask;
686 int exclusive;
687
688 /*
689 * Recursion avoidance:
690 *
691 * task, softirq, irq, nmi context
692 */
693 int recursion[4];
694};
695
696struct perf_output_handle {
697 struct perf_event *event;
698 struct perf_mmap_data *data;
699 unsigned long head;
700 unsigned long offset;
701 int nmi;
702 int sample;
703 int locked;
704 unsigned long flags;
705};
706
707#ifdef CONFIG_PERF_EVENTS
708
709/*
710 * Set by architecture code:
711 */
712extern int perf_max_events;
713
714extern const struct pmu *hw_perf_event_init(struct perf_event *event);
715
716extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
717extern void perf_event_task_sched_out(struct task_struct *task,
718 struct task_struct *next, int cpu);
719extern void perf_event_task_tick(struct task_struct *task, int cpu);
720extern int perf_event_init_task(struct task_struct *child);
721extern void perf_event_exit_task(struct task_struct *child);
722extern void perf_event_free_task(struct task_struct *task);
723extern void set_perf_event_pending(void);
724extern void perf_event_do_pending(void);
725extern void perf_event_print_debug(void);
726extern void __perf_disable(void);
727extern bool __perf_enable(void);
728extern void perf_disable(void);
729extern void perf_enable(void);
730extern int perf_event_task_disable(void);
731extern int perf_event_task_enable(void);
732extern int hw_perf_group_sched_in(struct perf_event *group_leader,
733 struct perf_cpu_context *cpuctx,
734 struct perf_event_context *ctx, int cpu);
735extern void perf_event_update_userpage(struct perf_event *event);
736
737struct perf_sample_data {
738 u64 type;
739
740 u64 ip;
741 struct {
742 u32 pid;
743 u32 tid;
744 } tid_entry;
745 u64 time;
746 u64 addr;
747 u64 id;
748 u64 stream_id;
749 struct {
750 u32 cpu;
751 u32 reserved;
752 } cpu_entry;
753 u64 period;
754 struct perf_callchain_entry *callchain;
755 struct perf_raw_record *raw;
756};
757
758extern void perf_output_sample(struct perf_output_handle *handle,
759 struct perf_event_header *header,
760 struct perf_sample_data *data,
761 struct perf_event *event);
762extern void perf_prepare_sample(struct perf_event_header *header,
763 struct perf_sample_data *data,
764 struct perf_event *event,
765 struct pt_regs *regs);
766
767extern int perf_event_overflow(struct perf_event *event, int nmi,
768 struct perf_sample_data *data,
769 struct pt_regs *regs);
770
771/*
772 * Return 1 for a software event, 0 for a hardware event
773 */
774static inline int is_software_event(struct perf_event *event)
775{
776 return (event->attr.type != PERF_TYPE_RAW) &&
777 (event->attr.type != PERF_TYPE_HARDWARE) &&
778 (event->attr.type != PERF_TYPE_HW_CACHE);
779}
780
781extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
782
783extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
784
785static inline void
786perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
787{
788 if (atomic_read(&perf_swevent_enabled[event_id]))
789 __perf_sw_event(event_id, nr, nmi, regs, addr);
790}
791
792extern void __perf_event_mmap(struct vm_area_struct *vma);
793
794static inline void perf_event_mmap(struct vm_area_struct *vma)
795{
796 if (vma->vm_flags & VM_EXEC)
797 __perf_event_mmap(vma);
798}
799
800extern void perf_event_comm(struct task_struct *tsk);
801extern void perf_event_fork(struct task_struct *tsk);
802
803extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
804
805extern int sysctl_perf_event_paranoid;
806extern int sysctl_perf_event_mlock;
807extern int sysctl_perf_event_sample_rate;
808
809extern void perf_event_init(void);
810extern void perf_tp_event(int event_id, u64 addr, u64 count,
811 void *record, int entry_size);
812
813#ifndef perf_misc_flags
814#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
815 PERF_RECORD_MISC_KERNEL)
816#define perf_instruction_pointer(regs) instruction_pointer(regs)
817#endif
818
819extern int perf_output_begin(struct perf_output_handle *handle,
820 struct perf_event *event, unsigned int size,
821 int nmi, int sample);
822extern void perf_output_end(struct perf_output_handle *handle);
823extern void perf_output_copy(struct perf_output_handle *handle,
824 const void *buf, unsigned int len);
825#else
826static inline void
827perf_event_task_sched_in(struct task_struct *task, int cpu) { }
828static inline void
829perf_event_task_sched_out(struct task_struct *task,
830 struct task_struct *next, int cpu) { }
831static inline void
832perf_event_task_tick(struct task_struct *task, int cpu) { }
833static inline int perf_event_init_task(struct task_struct *child) { return 0; }
834static inline void perf_event_exit_task(struct task_struct *child) { }
835static inline void perf_event_free_task(struct task_struct *task) { }
836static inline void perf_event_do_pending(void) { }
837static inline void perf_event_print_debug(void) { }
838static inline void perf_disable(void) { }
839static inline void perf_enable(void) { }
840static inline int perf_event_task_disable(void) { return -EINVAL; }
841static inline int perf_event_task_enable(void) { return -EINVAL; }
842
843static inline void
844perf_sw_event(u32 event_id, u64 nr, int nmi,
845 struct pt_regs *regs, u64 addr) { }
846
847static inline void perf_event_mmap(struct vm_area_struct *vma) { }
848static inline void perf_event_comm(struct task_struct *tsk) { }
849static inline void perf_event_fork(struct task_struct *tsk) { }
850static inline void perf_event_init(void) { }
851
852#endif
853
854#define perf_output_put(handle, x) \
855 perf_output_copy((handle), &(x), sizeof(x))
856
857#endif /* __KERNEL__ */
858#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index b00df4c79c63..07bff666e65b 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -85,7 +85,7 @@
85#define PR_SET_TIMERSLACK 29 85#define PR_SET_TIMERSLACK 29
86#define PR_GET_TIMERSLACK 30 86#define PR_GET_TIMERSLACK 30
87 87
88#define PR_TASK_PERF_COUNTERS_DISABLE 31 88#define PR_TASK_PERF_EVENTS_DISABLE 31
89#define PR_TASK_PERF_COUNTERS_ENABLE 32 89#define PR_TASK_PERF_EVENTS_ENABLE 32
90 90
91#endif /* _LINUX_PRCTL_H */ 91#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 115af05ecabd..8fe351c3914a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,7 +100,7 @@ struct robust_list_head;
100struct bio; 100struct bio;
101struct fs_struct; 101struct fs_struct;
102struct bts_context; 102struct bts_context;
103struct perf_counter_context; 103struct perf_event_context;
104 104
105/* 105/*
106 * List of flags we want to share for kernel threads, 106 * List of flags we want to share for kernel threads,
@@ -701,7 +701,7 @@ struct user_struct {
701#endif 701#endif
702#endif 702#endif
703 703
704#ifdef CONFIG_PERF_COUNTERS 704#ifdef CONFIG_PERF_EVENTS
705 atomic_long_t locked_vm; 705 atomic_long_t locked_vm;
706#endif 706#endif
707}; 707};
@@ -1451,10 +1451,10 @@ struct task_struct {
1451 struct list_head pi_state_list; 1451 struct list_head pi_state_list;
1452 struct futex_pi_state *pi_state_cache; 1452 struct futex_pi_state *pi_state_cache;
1453#endif 1453#endif
1454#ifdef CONFIG_PERF_COUNTERS 1454#ifdef CONFIG_PERF_EVENTS
1455 struct perf_counter_context *perf_counter_ctxp; 1455 struct perf_event_context *perf_event_ctxp;
1456 struct mutex perf_counter_mutex; 1456 struct mutex perf_event_mutex;
1457 struct list_head perf_counter_list; 1457 struct list_head perf_event_list;
1458#endif 1458#endif
1459#ifdef CONFIG_NUMA 1459#ifdef CONFIG_NUMA
1460 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1460 struct mempolicy *mempolicy; /* Protected by alloc_lock */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 7d9803cbb20f..8d8285a10db9 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -55,7 +55,7 @@ struct compat_timeval;
55struct robust_list_head; 55struct robust_list_head;
56struct getcpu_cache; 56struct getcpu_cache;
57struct old_linux_dirent; 57struct old_linux_dirent;
58struct perf_counter_attr; 58struct perf_event_attr;
59 59
60#include <linux/types.h> 60#include <linux/types.h>
61#include <linux/aio_abi.h> 61#include <linux/aio_abi.h>
@@ -877,7 +877,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
877int kernel_execve(const char *filename, char *const argv[], char *const envp[]); 877int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
878 878
879 879
880asmlinkage long sys_perf_counter_open( 880asmlinkage long sys_perf_event_open(
881 struct perf_counter_attr __user *attr_uptr, 881 struct perf_event_attr __user *attr_uptr,
882 pid_t pid, int cpu, int group_fd, unsigned long flags); 882 pid_t pid, int cpu, int group_fd, unsigned long flags);
883#endif 883#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index a0361cb69769..cc0d9667e182 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -378,7 +378,7 @@ static inline int ftrace_get_offsets_##call( \
378#ifdef CONFIG_EVENT_PROFILE 378#ifdef CONFIG_EVENT_PROFILE
379 379
380/* 380/*
381 * Generate the functions needed for tracepoint perf_counter support. 381 * Generate the functions needed for tracepoint perf_event support.
382 * 382 *
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
384 * 384 *
@@ -644,7 +644,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
644 * { 644 * {
645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
646 * struct ftrace_event_call *event_call = &event_<call>; 646 * struct ftrace_event_call *event_call = &event_<call>;
647 * extern void perf_tpcounter_event(int, u64, u64, void *, int); 647 * extern void perf_tp_event(int, u64, u64, void *, int);
648 * struct ftrace_raw_##call *entry; 648 * struct ftrace_raw_##call *entry;
649 * u64 __addr = 0, __count = 1; 649 * u64 __addr = 0, __count = 1;
650 * unsigned long irq_flags; 650 * unsigned long irq_flags;
@@ -690,7 +690,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
690 * 690 *
691 * <assign> <- affect our values 691 * <assign> <- affect our values
692 * 692 *
693 * perf_tpcounter_event(event_call->id, __addr, __count, entry, 693 * perf_tp_event(event_call->id, __addr, __count, entry,
694 * __entry_size); <- submit them to perf counter 694 * __entry_size); <- submit them to perf counter
695 * 695 *
696 * } 696 * }
@@ -710,7 +710,7 @@ static void ftrace_profile_##call(proto) \
710{ \ 710{ \
711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
712 struct ftrace_event_call *event_call = &event_##call; \ 712 struct ftrace_event_call *event_call = &event_##call; \
713 extern void perf_tpcounter_event(int, u64, u64, void *, int); \ 713 extern void perf_tp_event(int, u64, u64, void *, int); \
714 struct ftrace_raw_##call *entry; \ 714 struct ftrace_raw_##call *entry; \
715 u64 __addr = 0, __count = 1; \ 715 u64 __addr = 0, __count = 1; \
716 unsigned long irq_flags; \ 716 unsigned long irq_flags; \
@@ -755,7 +755,7 @@ static void ftrace_profile_##call(proto) \
755 \ 755 \
756 { assign; } \ 756 { assign; } \
757 \ 757 \
758 perf_tpcounter_event(event_call->id, __addr, __count, entry, \ 758 perf_tp_event(event_call->id, __addr, __count, entry, \
759 __entry_size); \ 759 __entry_size); \
760 \ 760 \
761end: \ 761end: \
diff --git a/init/Kconfig b/init/Kconfig
index 0121c0ea3e03..0aa6579504cc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -916,31 +916,36 @@ config AIO
916 by some high performance threaded applications. Disabling 916 by some high performance threaded applications. Disabling
917 this option saves about 7k. 917 this option saves about 7k.
918 918
919config HAVE_PERF_COUNTERS 919config HAVE_PERF_EVENTS
920 bool 920 bool
921 help 921 help
922 See tools/perf/design.txt for details. 922 See tools/perf/design.txt for details.
923 923
924menu "Performance Counters" 924menu "Kernel Performance Events And Counters"
925 925
926config PERF_COUNTERS 926config PERF_EVENTS
927 bool "Kernel Performance Counters" 927 bool "Kernel performance events and counters"
928 default y if PROFILING 928 default y if (PROFILING || PERF_COUNTERS)
929 depends on HAVE_PERF_COUNTERS 929 depends on HAVE_PERF_EVENTS
930 select ANON_INODES 930 select ANON_INODES
931 help 931 help
932 Enable kernel support for performance counter hardware. 932 Enable kernel support for various performance events provided
933 by software and hardware.
934
935 Software events are supported either build-in or via the
936 use of generic tracepoints.
933 937
934 Performance counters are special hardware registers available 938 Most modern CPUs support performance events via performance
935 on most modern CPUs. These registers count the number of certain 939 counter registers. These registers count the number of certain
936 types of hw events: such as instructions executed, cachemisses 940 types of hw events: such as instructions executed, cachemisses
937 suffered, or branches mis-predicted - without slowing down the 941 suffered, or branches mis-predicted - without slowing down the
938 kernel or applications. These registers can also trigger interrupts 942 kernel or applications. These registers can also trigger interrupts
939 when a threshold number of events have passed - and can thus be 943 when a threshold number of events have passed - and can thus be
940 used to profile the code that runs on that CPU. 944 used to profile the code that runs on that CPU.
941 945
942 The Linux Performance Counter subsystem provides an abstraction of 946 The Linux Performance Event subsystem provides an abstraction of
943 these hardware capabilities, available via a system call. It 947 these software and hardware cevent apabilities, available via a
948 system call and used by the "perf" utility in tools/perf/. It
944 provides per task and per CPU counters, and it provides event 949 provides per task and per CPU counters, and it provides event
945 capabilities on top of those. 950 capabilities on top of those.
946 951
@@ -948,17 +953,29 @@ config PERF_COUNTERS
948 953
949config EVENT_PROFILE 954config EVENT_PROFILE
950 bool "Tracepoint profiling sources" 955 bool "Tracepoint profiling sources"
951 depends on PERF_COUNTERS && EVENT_TRACING 956 depends on PERF_EVENTS && EVENT_TRACING
952 default y 957 default y
953 help 958 help
954 Allow the use of tracepoints as software performance counters. 959 Allow the use of tracepoints as software performance events.
955 960
956 When this is enabled, you can create perf counters based on 961 When this is enabled, you can create perf events based on
957 tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID 962 tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
958 found in debugfs://tracing/events/*/*/id. (The -e/--events 963 found in debugfs://tracing/events/*/*/id. (The -e/--events
959 option to the perf tool can parse and interpret symbolic 964 option to the perf tool can parse and interpret symbolic
960 tracepoints, in the subsystem:tracepoint_name format.) 965 tracepoints, in the subsystem:tracepoint_name format.)
961 966
967config PERF_COUNTERS
968 bool "Kernel performance counters (old config option)"
969 depends on HAVE_PERF_EVENTS
970 help
971 This config has been obsoleted by the PERF_EVENTS
972 config option - please see that one for details.
973
974 It has no effect on the kernel whether you enable
975 it or not, it is a compatibility placeholder.
976
977 Say N if unsure.
978
962endmenu 979endmenu
963 980
964config VM_EVENT_COUNTERS 981config VM_EVENT_COUNTERS
diff --git a/kernel/Makefile b/kernel/Makefile
index 7c9b0a585502..187c89b4783d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -95,7 +95,7 @@ obj-$(CONFIG_X86_DS) += trace/
95obj-$(CONFIG_RING_BUFFER) += trace/ 95obj-$(CONFIG_RING_BUFFER) += trace/
96obj-$(CONFIG_SMP) += sched_cpupri.o 96obj-$(CONFIG_SMP) += sched_cpupri.o
97obj-$(CONFIG_SLOW_WORK) += slow-work.o 97obj-$(CONFIG_SLOW_WORK) += slow-work.o
98obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 98obj-$(CONFIG_PERF_EVENTS) += perf_event.o
99 99
100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
101# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 101# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/exit.c b/kernel/exit.c
index ae5d8660ddff..e47ee8a06135 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,7 +47,7 @@
47#include <linux/tracehook.h> 47#include <linux/tracehook.h>
48#include <linux/fs_struct.h> 48#include <linux/fs_struct.h>
49#include <linux/init_task.h> 49#include <linux/init_task.h>
50#include <linux/perf_counter.h> 50#include <linux/perf_event.h>
51#include <trace/events/sched.h> 51#include <trace/events/sched.h>
52 52
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
@@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
154{ 154{
155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
156 156
157#ifdef CONFIG_PERF_COUNTERS 157#ifdef CONFIG_PERF_EVENTS
158 WARN_ON_ONCE(tsk->perf_counter_ctxp); 158 WARN_ON_ONCE(tsk->perf_event_ctxp);
159#endif 159#endif
160 trace_sched_process_free(tsk); 160 trace_sched_process_free(tsk);
161 put_task_struct(tsk); 161 put_task_struct(tsk);
@@ -981,7 +981,7 @@ NORET_TYPE void do_exit(long code)
981 * Flush inherited counters to the parent - before the parent 981 * Flush inherited counters to the parent - before the parent
982 * gets woken up by child-exit notifications. 982 * gets woken up by child-exit notifications.
983 */ 983 */
984 perf_counter_exit_task(tsk); 984 perf_event_exit_task(tsk);
985 985
986 exit_notify(tsk, group_dead); 986 exit_notify(tsk, group_dead);
987#ifdef CONFIG_NUMA 987#ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index bfee931ee3fb..2cebfb23b0b8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,7 +61,7 @@
61#include <linux/blkdev.h> 61#include <linux/blkdev.h>
62#include <linux/fs_struct.h> 62#include <linux/fs_struct.h>
63#include <linux/magic.h> 63#include <linux/magic.h>
64#include <linux/perf_counter.h> 64#include <linux/perf_event.h>
65 65
66#include <asm/pgtable.h> 66#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 67#include <asm/pgalloc.h>
@@ -1078,7 +1078,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1078 /* Perform scheduler related setup. Assign this task to a CPU. */ 1078 /* Perform scheduler related setup. Assign this task to a CPU. */
1079 sched_fork(p, clone_flags); 1079 sched_fork(p, clone_flags);
1080 1080
1081 retval = perf_counter_init_task(p); 1081 retval = perf_event_init_task(p);
1082 if (retval) 1082 if (retval)
1083 goto bad_fork_cleanup_policy; 1083 goto bad_fork_cleanup_policy;
1084 1084
@@ -1253,7 +1253,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1253 write_unlock_irq(&tasklist_lock); 1253 write_unlock_irq(&tasklist_lock);
1254 proc_fork_connector(p); 1254 proc_fork_connector(p);
1255 cgroup_post_fork(p); 1255 cgroup_post_fork(p);
1256 perf_counter_fork(p); 1256 perf_event_fork(p);
1257 return p; 1257 return p;
1258 1258
1259bad_fork_free_pid: 1259bad_fork_free_pid:
@@ -1280,7 +1280,7 @@ bad_fork_cleanup_semundo:
1280bad_fork_cleanup_audit: 1280bad_fork_cleanup_audit:
1281 audit_free(p); 1281 audit_free(p);
1282bad_fork_cleanup_policy: 1282bad_fork_cleanup_policy:
1283 perf_counter_free_task(p); 1283 perf_event_free_task(p);
1284#ifdef CONFIG_NUMA 1284#ifdef CONFIG_NUMA
1285 mpol_put(p->mempolicy); 1285 mpol_put(p->mempolicy);
1286bad_fork_cleanup_cgroup: 1286bad_fork_cleanup_cgroup:
diff --git a/kernel/perf_counter.c b/kernel/perf_event.c
index cc768ab81ac8..76ac4db405e9 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_event.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * Performance counter core code 2 * Performance events core code:
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
10 */ 10 */
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
@@ -26,66 +26,66 @@
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h> 27#include <linux/anon_inodes.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/perf_counter.h> 29#include <linux/perf_event.h>
30 30
31#include <asm/irq_regs.h> 31#include <asm/irq_regs.h>
32 32
33/* 33/*
34 * Each CPU has a list of per CPU counters: 34 * Each CPU has a list of per CPU events:
35 */ 35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37 37
38int perf_max_counters __read_mostly = 1; 38int perf_max_events __read_mostly = 1;
39static int perf_reserved_percpu __read_mostly; 39static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1; 40static int perf_overcommit __read_mostly = 1;
41 41
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_events __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_events __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_events __read_mostly;
45static atomic_t nr_task_counters __read_mostly; 45static atomic_t nr_task_events __read_mostly;
46 46
47/* 47/*
48 * perf counter paranoia level: 48 * perf event paranoia level:
49 * -1 - not paranoid at all 49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv 50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu counters for unpriv 51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv 52 * 2 - disallow kernel profiling for unpriv
53 */ 53 */
54int sysctl_perf_counter_paranoid __read_mostly = 1; 54int sysctl_perf_event_paranoid __read_mostly = 1;
55 55
56static inline bool perf_paranoid_tracepoint_raw(void) 56static inline bool perf_paranoid_tracepoint_raw(void)
57{ 57{
58 return sysctl_perf_counter_paranoid > -1; 58 return sysctl_perf_event_paranoid > -1;
59} 59}
60 60
61static inline bool perf_paranoid_cpu(void) 61static inline bool perf_paranoid_cpu(void)
62{ 62{
63 return sysctl_perf_counter_paranoid > 0; 63 return sysctl_perf_event_paranoid > 0;
64} 64}
65 65
66static inline bool perf_paranoid_kernel(void) 66static inline bool perf_paranoid_kernel(void)
67{ 67{
68 return sysctl_perf_counter_paranoid > 1; 68 return sysctl_perf_event_paranoid > 1;
69} 69}
70 70
71int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 71int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
72 72
73/* 73/*
74 * max perf counter sample rate 74 * max perf event sample rate
75 */ 75 */
76int sysctl_perf_counter_sample_rate __read_mostly = 100000; 76int sysctl_perf_event_sample_rate __read_mostly = 100000;
77 77
78static atomic64_t perf_counter_id; 78static atomic64_t perf_event_id;
79 79
80/* 80/*
81 * Lock for (sysadmin-configurable) counter reservations: 81 * Lock for (sysadmin-configurable) event reservations:
82 */ 82 */
83static DEFINE_SPINLOCK(perf_resource_lock); 83static DEFINE_SPINLOCK(perf_resource_lock);
84 84
85/* 85/*
86 * Architecture provided APIs - weak aliases: 86 * Architecture provided APIs - weak aliases:
87 */ 87 */
88extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 88extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
89{ 89{
90 return NULL; 90 return NULL;
91} 91}
@@ -93,18 +93,18 @@ extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counte
93void __weak hw_perf_disable(void) { barrier(); } 93void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); } 94void __weak hw_perf_enable(void) { barrier(); }
95 95
96void __weak hw_perf_counter_setup(int cpu) { barrier(); } 96void __weak hw_perf_event_setup(int cpu) { barrier(); }
97void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } 97void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
98 98
99int __weak 99int __weak
100hw_perf_group_sched_in(struct perf_counter *group_leader, 100hw_perf_group_sched_in(struct perf_event *group_leader,
101 struct perf_cpu_context *cpuctx, 101 struct perf_cpu_context *cpuctx,
102 struct perf_counter_context *ctx, int cpu) 102 struct perf_event_context *ctx, int cpu)
103{ 103{
104 return 0; 104 return 0;
105} 105}
106 106
107void __weak perf_counter_print_debug(void) { } 107void __weak perf_event_print_debug(void) { }
108 108
109static DEFINE_PER_CPU(int, perf_disable_count); 109static DEFINE_PER_CPU(int, perf_disable_count);
110 110
@@ -130,20 +130,20 @@ void perf_enable(void)
130 hw_perf_enable(); 130 hw_perf_enable();
131} 131}
132 132
133static void get_ctx(struct perf_counter_context *ctx) 133static void get_ctx(struct perf_event_context *ctx)
134{ 134{
135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
136} 136}
137 137
138static void free_ctx(struct rcu_head *head) 138static void free_ctx(struct rcu_head *head)
139{ 139{
140 struct perf_counter_context *ctx; 140 struct perf_event_context *ctx;
141 141
142 ctx = container_of(head, struct perf_counter_context, rcu_head); 142 ctx = container_of(head, struct perf_event_context, rcu_head);
143 kfree(ctx); 143 kfree(ctx);
144} 144}
145 145
146static void put_ctx(struct perf_counter_context *ctx) 146static void put_ctx(struct perf_event_context *ctx)
147{ 147{
148 if (atomic_dec_and_test(&ctx->refcount)) { 148 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx) 149 if (ctx->parent_ctx)
@@ -154,7 +154,7 @@ static void put_ctx(struct perf_counter_context *ctx)
154 } 154 }
155} 155}
156 156
157static void unclone_ctx(struct perf_counter_context *ctx) 157static void unclone_ctx(struct perf_event_context *ctx)
158{ 158{
159 if (ctx->parent_ctx) { 159 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx); 160 put_ctx(ctx->parent_ctx);
@@ -163,37 +163,37 @@ static void unclone_ctx(struct perf_counter_context *ctx)
163} 163}
164 164
165/* 165/*
166 * If we inherit counters we want to return the parent counter id 166 * If we inherit events we want to return the parent event id
167 * to userspace. 167 * to userspace.
168 */ 168 */
169static u64 primary_counter_id(struct perf_counter *counter) 169static u64 primary_event_id(struct perf_event *event)
170{ 170{
171 u64 id = counter->id; 171 u64 id = event->id;
172 172
173 if (counter->parent) 173 if (event->parent)
174 id = counter->parent->id; 174 id = event->parent->id;
175 175
176 return id; 176 return id;
177} 177}
178 178
179/* 179/*
180 * Get the perf_counter_context for a task and lock it. 180 * Get the perf_event_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked, 181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task. 182 * the context could get moved to another task.
183 */ 183 */
184static struct perf_counter_context * 184static struct perf_event_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags) 185perf_lock_task_context(struct task_struct *task, unsigned long *flags)
186{ 186{
187 struct perf_counter_context *ctx; 187 struct perf_event_context *ctx;
188 188
189 rcu_read_lock(); 189 rcu_read_lock();
190 retry: 190 retry:
191 ctx = rcu_dereference(task->perf_counter_ctxp); 191 ctx = rcu_dereference(task->perf_event_ctxp);
192 if (ctx) { 192 if (ctx) {
193 /* 193 /*
194 * If this context is a clone of another, it might 194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by 195 * get swapped for another underneath us by
196 * perf_counter_task_sched_out, though the 196 * perf_event_task_sched_out, though the
197 * rcu_read_lock() protects us from any context 197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it 198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry 199 * got swapped before we could get the lock, and retry
@@ -201,7 +201,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
201 * can't get swapped on us any more. 201 * can't get swapped on us any more.
202 */ 202 */
203 spin_lock_irqsave(&ctx->lock, *flags); 203 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_counter_ctxp)) { 204 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags); 205 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry; 206 goto retry;
207 } 207 }
@@ -220,9 +220,9 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
220 * can't get swapped to another task. This also increments its 220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed. 221 * reference count so that the context can't get freed.
222 */ 222 */
223static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) 223static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
224{ 224{
225 struct perf_counter_context *ctx; 225 struct perf_event_context *ctx;
226 unsigned long flags; 226 unsigned long flags;
227 227
228 ctx = perf_lock_task_context(task, &flags); 228 ctx = perf_lock_task_context(task, &flags);
@@ -233,7 +233,7 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
233 return ctx; 233 return ctx;
234} 234}
235 235
236static void perf_unpin_context(struct perf_counter_context *ctx) 236static void perf_unpin_context(struct perf_event_context *ctx)
237{ 237{
238 unsigned long flags; 238 unsigned long flags;
239 239
@@ -244,123 +244,122 @@ static void perf_unpin_context(struct perf_counter_context *ctx)
244} 244}
245 245
246/* 246/*
247 * Add a counter from the lists for its context. 247 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held. 248 * Must be called with ctx->mutex and ctx->lock held.
249 */ 249 */
250static void 250static void
251list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 251list_add_event(struct perf_event *event, struct perf_event_context *ctx)
252{ 252{
253 struct perf_counter *group_leader = counter->group_leader; 253 struct perf_event *group_leader = event->group_leader;
254 254
255 /* 255 /*
256 * Depending on whether it is a standalone or sibling counter, 256 * Depending on whether it is a standalone or sibling event,
257 * add it straight to the context's counter list, or to the group 257 * add it straight to the context's event list, or to the group
258 * leader's sibling list: 258 * leader's sibling list:
259 */ 259 */
260 if (group_leader == counter) 260 if (group_leader == event)
261 list_add_tail(&counter->list_entry, &ctx->counter_list); 261 list_add_tail(&event->group_entry, &ctx->group_list);
262 else { 262 else {
263 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 263 list_add_tail(&event->group_entry, &group_leader->sibling_list);
264 group_leader->nr_siblings++; 264 group_leader->nr_siblings++;
265 } 265 }
266 266
267 list_add_rcu(&counter->event_entry, &ctx->event_list); 267 list_add_rcu(&event->event_entry, &ctx->event_list);
268 ctx->nr_counters++; 268 ctx->nr_events++;
269 if (counter->attr.inherit_stat) 269 if (event->attr.inherit_stat)
270 ctx->nr_stat++; 270 ctx->nr_stat++;
271} 271}
272 272
273/* 273/*
274 * Remove a counter from the lists for its context. 274 * Remove a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held. 275 * Must be called with ctx->mutex and ctx->lock held.
276 */ 276 */
277static void 277static void
278list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 278list_del_event(struct perf_event *event, struct perf_event_context *ctx)
279{ 279{
280 struct perf_counter *sibling, *tmp; 280 struct perf_event *sibling, *tmp;
281 281
282 if (list_empty(&counter->list_entry)) 282 if (list_empty(&event->group_entry))
283 return; 283 return;
284 ctx->nr_counters--; 284 ctx->nr_events--;
285 if (counter->attr.inherit_stat) 285 if (event->attr.inherit_stat)
286 ctx->nr_stat--; 286 ctx->nr_stat--;
287 287
288 list_del_init(&counter->list_entry); 288 list_del_init(&event->group_entry);
289 list_del_rcu(&counter->event_entry); 289 list_del_rcu(&event->event_entry);
290 290
291 if (counter->group_leader != counter) 291 if (event->group_leader != event)
292 counter->group_leader->nr_siblings--; 292 event->group_leader->nr_siblings--;
293 293
294 /* 294 /*
295 * If this was a group counter with sibling counters then 295 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton counters by adding them 296 * upgrade the siblings to singleton events by adding them
297 * to the context list directly: 297 * to the context list directly:
298 */ 298 */
299 list_for_each_entry_safe(sibling, tmp, 299 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
300 &counter->sibling_list, list_entry) {
301 300
302 list_move_tail(&sibling->list_entry, &ctx->counter_list); 301 list_move_tail(&sibling->group_entry, &ctx->group_list);
303 sibling->group_leader = sibling; 302 sibling->group_leader = sibling;
304 } 303 }
305} 304}
306 305
307static void 306static void
308counter_sched_out(struct perf_counter *counter, 307event_sched_out(struct perf_event *event,
309 struct perf_cpu_context *cpuctx, 308 struct perf_cpu_context *cpuctx,
310 struct perf_counter_context *ctx) 309 struct perf_event_context *ctx)
311{ 310{
312 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 311 if (event->state != PERF_EVENT_STATE_ACTIVE)
313 return; 312 return;
314 313
315 counter->state = PERF_COUNTER_STATE_INACTIVE; 314 event->state = PERF_EVENT_STATE_INACTIVE;
316 if (counter->pending_disable) { 315 if (event->pending_disable) {
317 counter->pending_disable = 0; 316 event->pending_disable = 0;
318 counter->state = PERF_COUNTER_STATE_OFF; 317 event->state = PERF_EVENT_STATE_OFF;
319 } 318 }
320 counter->tstamp_stopped = ctx->time; 319 event->tstamp_stopped = ctx->time;
321 counter->pmu->disable(counter); 320 event->pmu->disable(event);
322 counter->oncpu = -1; 321 event->oncpu = -1;
323 322
324 if (!is_software_counter(counter)) 323 if (!is_software_event(event))
325 cpuctx->active_oncpu--; 324 cpuctx->active_oncpu--;
326 ctx->nr_active--; 325 ctx->nr_active--;
327 if (counter->attr.exclusive || !cpuctx->active_oncpu) 326 if (event->attr.exclusive || !cpuctx->active_oncpu)
328 cpuctx->exclusive = 0; 327 cpuctx->exclusive = 0;
329} 328}
330 329
331static void 330static void
332group_sched_out(struct perf_counter *group_counter, 331group_sched_out(struct perf_event *group_event,
333 struct perf_cpu_context *cpuctx, 332 struct perf_cpu_context *cpuctx,
334 struct perf_counter_context *ctx) 333 struct perf_event_context *ctx)
335{ 334{
336 struct perf_counter *counter; 335 struct perf_event *event;
337 336
338 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) 337 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
339 return; 338 return;
340 339
341 counter_sched_out(group_counter, cpuctx, ctx); 340 event_sched_out(group_event, cpuctx, ctx);
342 341
343 /* 342 /*
344 * Schedule out siblings (if any): 343 * Schedule out siblings (if any):
345 */ 344 */
346 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 345 list_for_each_entry(event, &group_event->sibling_list, group_entry)
347 counter_sched_out(counter, cpuctx, ctx); 346 event_sched_out(event, cpuctx, ctx);
348 347
349 if (group_counter->attr.exclusive) 348 if (group_event->attr.exclusive)
350 cpuctx->exclusive = 0; 349 cpuctx->exclusive = 0;
351} 350}
352 351
353/* 352/*
354 * Cross CPU call to remove a performance counter 353 * Cross CPU call to remove a performance event
355 * 354 *
356 * We disable the counter on the hardware level first. After that we 355 * We disable the event on the hardware level first. After that we
357 * remove it from the context list. 356 * remove it from the context list.
358 */ 357 */
359static void __perf_counter_remove_from_context(void *info) 358static void __perf_event_remove_from_context(void *info)
360{ 359{
361 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 360 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
362 struct perf_counter *counter = info; 361 struct perf_event *event = info;
363 struct perf_counter_context *ctx = counter->ctx; 362 struct perf_event_context *ctx = event->ctx;
364 363
365 /* 364 /*
366 * If this is a task context, we need to check whether it is 365 * If this is a task context, we need to check whether it is
@@ -373,22 +372,22 @@ static void __perf_counter_remove_from_context(void *info)
373 spin_lock(&ctx->lock); 372 spin_lock(&ctx->lock);
374 /* 373 /*
375 * Protect the list operation against NMI by disabling the 374 * Protect the list operation against NMI by disabling the
376 * counters on a global level. 375 * events on a global level.
377 */ 376 */
378 perf_disable(); 377 perf_disable();
379 378
380 counter_sched_out(counter, cpuctx, ctx); 379 event_sched_out(event, cpuctx, ctx);
381 380
382 list_del_counter(counter, ctx); 381 list_del_event(event, ctx);
383 382
384 if (!ctx->task) { 383 if (!ctx->task) {
385 /* 384 /*
386 * Allow more per task counters with respect to the 385 * Allow more per task events with respect to the
387 * reservation: 386 * reservation:
388 */ 387 */
389 cpuctx->max_pertask = 388 cpuctx->max_pertask =
390 min(perf_max_counters - ctx->nr_counters, 389 min(perf_max_events - ctx->nr_events,
391 perf_max_counters - perf_reserved_percpu); 390 perf_max_events - perf_reserved_percpu);
392 } 391 }
393 392
394 perf_enable(); 393 perf_enable();
@@ -397,56 +396,56 @@ static void __perf_counter_remove_from_context(void *info)
397 396
398 397
399/* 398/*
400 * Remove the counter from a task's (or a CPU's) list of counters. 399 * Remove the event from a task's (or a CPU's) list of events.
401 * 400 *
402 * Must be called with ctx->mutex held. 401 * Must be called with ctx->mutex held.
403 * 402 *
404 * CPU counters are removed with a smp call. For task counters we only 403 * CPU events are removed with a smp call. For task events we only
405 * call when the task is on a CPU. 404 * call when the task is on a CPU.
406 * 405 *
407 * If counter->ctx is a cloned context, callers must make sure that 406 * If event->ctx is a cloned context, callers must make sure that
408 * every task struct that counter->ctx->task could possibly point to 407 * every task struct that event->ctx->task could possibly point to
409 * remains valid. This is OK when called from perf_release since 408 * remains valid. This is OK when called from perf_release since
410 * that only calls us on the top-level context, which can't be a clone. 409 * that only calls us on the top-level context, which can't be a clone.
411 * When called from perf_counter_exit_task, it's OK because the 410 * When called from perf_event_exit_task, it's OK because the
412 * context has been detached from its task. 411 * context has been detached from its task.
413 */ 412 */
414static void perf_counter_remove_from_context(struct perf_counter *counter) 413static void perf_event_remove_from_context(struct perf_event *event)
415{ 414{
416 struct perf_counter_context *ctx = counter->ctx; 415 struct perf_event_context *ctx = event->ctx;
417 struct task_struct *task = ctx->task; 416 struct task_struct *task = ctx->task;
418 417
419 if (!task) { 418 if (!task) {
420 /* 419 /*
421 * Per cpu counters are removed via an smp call and 420 * Per cpu events are removed via an smp call and
422 * the removal is always sucessful. 421 * the removal is always sucessful.
423 */ 422 */
424 smp_call_function_single(counter->cpu, 423 smp_call_function_single(event->cpu,
425 __perf_counter_remove_from_context, 424 __perf_event_remove_from_context,
426 counter, 1); 425 event, 1);
427 return; 426 return;
428 } 427 }
429 428
430retry: 429retry:
431 task_oncpu_function_call(task, __perf_counter_remove_from_context, 430 task_oncpu_function_call(task, __perf_event_remove_from_context,
432 counter); 431 event);
433 432
434 spin_lock_irq(&ctx->lock); 433 spin_lock_irq(&ctx->lock);
435 /* 434 /*
436 * If the context is active we need to retry the smp call. 435 * If the context is active we need to retry the smp call.
437 */ 436 */
438 if (ctx->nr_active && !list_empty(&counter->list_entry)) { 437 if (ctx->nr_active && !list_empty(&event->group_entry)) {
439 spin_unlock_irq(&ctx->lock); 438 spin_unlock_irq(&ctx->lock);
440 goto retry; 439 goto retry;
441 } 440 }
442 441
443 /* 442 /*
444 * The lock prevents that this context is scheduled in so we 443 * The lock prevents that this context is scheduled in so we
445 * can remove the counter safely, if the call above did not 444 * can remove the event safely, if the call above did not
446 * succeed. 445 * succeed.
447 */ 446 */
448 if (!list_empty(&counter->list_entry)) { 447 if (!list_empty(&event->group_entry)) {
449 list_del_counter(counter, ctx); 448 list_del_event(event, ctx);
450 } 449 }
451 spin_unlock_irq(&ctx->lock); 450 spin_unlock_irq(&ctx->lock);
452} 451}
@@ -459,7 +458,7 @@ static inline u64 perf_clock(void)
459/* 458/*
460 * Update the record of the current time in a context. 459 * Update the record of the current time in a context.
461 */ 460 */
462static void update_context_time(struct perf_counter_context *ctx) 461static void update_context_time(struct perf_event_context *ctx)
463{ 462{
464 u64 now = perf_clock(); 463 u64 now = perf_clock();
465 464
@@ -468,51 +467,51 @@ static void update_context_time(struct perf_counter_context *ctx)
468} 467}
469 468
470/* 469/*
471 * Update the total_time_enabled and total_time_running fields for a counter. 470 * Update the total_time_enabled and total_time_running fields for a event.
472 */ 471 */
473static void update_counter_times(struct perf_counter *counter) 472static void update_event_times(struct perf_event *event)
474{ 473{
475 struct perf_counter_context *ctx = counter->ctx; 474 struct perf_event_context *ctx = event->ctx;
476 u64 run_end; 475 u64 run_end;
477 476
478 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 477 if (event->state < PERF_EVENT_STATE_INACTIVE ||
479 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) 478 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
480 return; 479 return;
481 480
482 counter->total_time_enabled = ctx->time - counter->tstamp_enabled; 481 event->total_time_enabled = ctx->time - event->tstamp_enabled;
483 482
484 if (counter->state == PERF_COUNTER_STATE_INACTIVE) 483 if (event->state == PERF_EVENT_STATE_INACTIVE)
485 run_end = counter->tstamp_stopped; 484 run_end = event->tstamp_stopped;
486 else 485 else
487 run_end = ctx->time; 486 run_end = ctx->time;
488 487
489 counter->total_time_running = run_end - counter->tstamp_running; 488 event->total_time_running = run_end - event->tstamp_running;
490} 489}
491 490
492/* 491/*
493 * Update total_time_enabled and total_time_running for all counters in a group. 492 * Update total_time_enabled and total_time_running for all events in a group.
494 */ 493 */
495static void update_group_times(struct perf_counter *leader) 494static void update_group_times(struct perf_event *leader)
496{ 495{
497 struct perf_counter *counter; 496 struct perf_event *event;
498 497
499 update_counter_times(leader); 498 update_event_times(leader);
500 list_for_each_entry(counter, &leader->sibling_list, list_entry) 499 list_for_each_entry(event, &leader->sibling_list, group_entry)
501 update_counter_times(counter); 500 update_event_times(event);
502} 501}
503 502
504/* 503/*
505 * Cross CPU call to disable a performance counter 504 * Cross CPU call to disable a performance event
506 */ 505 */
507static void __perf_counter_disable(void *info) 506static void __perf_event_disable(void *info)
508{ 507{
509 struct perf_counter *counter = info; 508 struct perf_event *event = info;
510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 509 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
511 struct perf_counter_context *ctx = counter->ctx; 510 struct perf_event_context *ctx = event->ctx;
512 511
513 /* 512 /*
514 * If this is a per-task counter, need to check whether this 513 * If this is a per-task event, need to check whether this
515 * counter's task is the current task on this cpu. 514 * event's task is the current task on this cpu.
516 */ 515 */
517 if (ctx->task && cpuctx->task_ctx != ctx) 516 if (ctx->task && cpuctx->task_ctx != ctx)
518 return; 517 return;
@@ -520,57 +519,57 @@ static void __perf_counter_disable(void *info)
520 spin_lock(&ctx->lock); 519 spin_lock(&ctx->lock);
521 520
522 /* 521 /*
523 * If the counter is on, turn it off. 522 * If the event is on, turn it off.
524 * If it is in error state, leave it in error state. 523 * If it is in error state, leave it in error state.
525 */ 524 */
526 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { 525 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
527 update_context_time(ctx); 526 update_context_time(ctx);
528 update_group_times(counter); 527 update_group_times(event);
529 if (counter == counter->group_leader) 528 if (event == event->group_leader)
530 group_sched_out(counter, cpuctx, ctx); 529 group_sched_out(event, cpuctx, ctx);
531 else 530 else
532 counter_sched_out(counter, cpuctx, ctx); 531 event_sched_out(event, cpuctx, ctx);
533 counter->state = PERF_COUNTER_STATE_OFF; 532 event->state = PERF_EVENT_STATE_OFF;
534 } 533 }
535 534
536 spin_unlock(&ctx->lock); 535 spin_unlock(&ctx->lock);
537} 536}
538 537
539/* 538/*
540 * Disable a counter. 539 * Disable a event.
541 * 540 *
542 * If counter->ctx is a cloned context, callers must make sure that 541 * If event->ctx is a cloned context, callers must make sure that
543 * every task struct that counter->ctx->task could possibly point to 542 * every task struct that event->ctx->task could possibly point to
544 * remains valid. This condition is satisifed when called through 543 * remains valid. This condition is satisifed when called through
545 * perf_counter_for_each_child or perf_counter_for_each because they 544 * perf_event_for_each_child or perf_event_for_each because they
546 * hold the top-level counter's child_mutex, so any descendant that 545 * hold the top-level event's child_mutex, so any descendant that
547 * goes to exit will block in sync_child_counter. 546 * goes to exit will block in sync_child_event.
548 * When called from perf_pending_counter it's OK because counter->ctx 547 * When called from perf_pending_event it's OK because event->ctx
549 * is the current context on this CPU and preemption is disabled, 548 * is the current context on this CPU and preemption is disabled,
550 * hence we can't get into perf_counter_task_sched_out for this context. 549 * hence we can't get into perf_event_task_sched_out for this context.
551 */ 550 */
552static void perf_counter_disable(struct perf_counter *counter) 551static void perf_event_disable(struct perf_event *event)
553{ 552{
554 struct perf_counter_context *ctx = counter->ctx; 553 struct perf_event_context *ctx = event->ctx;
555 struct task_struct *task = ctx->task; 554 struct task_struct *task = ctx->task;
556 555
557 if (!task) { 556 if (!task) {
558 /* 557 /*
559 * Disable the counter on the cpu that it's on 558 * Disable the event on the cpu that it's on
560 */ 559 */
561 smp_call_function_single(counter->cpu, __perf_counter_disable, 560 smp_call_function_single(event->cpu, __perf_event_disable,
562 counter, 1); 561 event, 1);
563 return; 562 return;
564 } 563 }
565 564
566 retry: 565 retry:
567 task_oncpu_function_call(task, __perf_counter_disable, counter); 566 task_oncpu_function_call(task, __perf_event_disable, event);
568 567
569 spin_lock_irq(&ctx->lock); 568 spin_lock_irq(&ctx->lock);
570 /* 569 /*
571 * If the counter is still active, we need to retry the cross-call. 570 * If the event is still active, we need to retry the cross-call.
572 */ 571 */
573 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 572 if (event->state == PERF_EVENT_STATE_ACTIVE) {
574 spin_unlock_irq(&ctx->lock); 573 spin_unlock_irq(&ctx->lock);
575 goto retry; 574 goto retry;
576 } 575 }
@@ -579,73 +578,73 @@ static void perf_counter_disable(struct perf_counter *counter)
579 * Since we have the lock this context can't be scheduled 578 * Since we have the lock this context can't be scheduled
580 * in, so we can change the state safely. 579 * in, so we can change the state safely.
581 */ 580 */
582 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 581 if (event->state == PERF_EVENT_STATE_INACTIVE) {
583 update_group_times(counter); 582 update_group_times(event);
584 counter->state = PERF_COUNTER_STATE_OFF; 583 event->state = PERF_EVENT_STATE_OFF;
585 } 584 }
586 585
587 spin_unlock_irq(&ctx->lock); 586 spin_unlock_irq(&ctx->lock);
588} 587}
589 588
590static int 589static int
591counter_sched_in(struct perf_counter *counter, 590event_sched_in(struct perf_event *event,
592 struct perf_cpu_context *cpuctx, 591 struct perf_cpu_context *cpuctx,
593 struct perf_counter_context *ctx, 592 struct perf_event_context *ctx,
594 int cpu) 593 int cpu)
595{ 594{
596 if (counter->state <= PERF_COUNTER_STATE_OFF) 595 if (event->state <= PERF_EVENT_STATE_OFF)
597 return 0; 596 return 0;
598 597
599 counter->state = PERF_COUNTER_STATE_ACTIVE; 598 event->state = PERF_EVENT_STATE_ACTIVE;
600 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 599 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
601 /* 600 /*
602 * The new state must be visible before we turn it on in the hardware: 601 * The new state must be visible before we turn it on in the hardware:
603 */ 602 */
604 smp_wmb(); 603 smp_wmb();
605 604
606 if (counter->pmu->enable(counter)) { 605 if (event->pmu->enable(event)) {
607 counter->state = PERF_COUNTER_STATE_INACTIVE; 606 event->state = PERF_EVENT_STATE_INACTIVE;
608 counter->oncpu = -1; 607 event->oncpu = -1;
609 return -EAGAIN; 608 return -EAGAIN;
610 } 609 }
611 610
612 counter->tstamp_running += ctx->time - counter->tstamp_stopped; 611 event->tstamp_running += ctx->time - event->tstamp_stopped;
613 612
614 if (!is_software_counter(counter)) 613 if (!is_software_event(event))
615 cpuctx->active_oncpu++; 614 cpuctx->active_oncpu++;
616 ctx->nr_active++; 615 ctx->nr_active++;
617 616
618 if (counter->attr.exclusive) 617 if (event->attr.exclusive)
619 cpuctx->exclusive = 1; 618 cpuctx->exclusive = 1;
620 619
621 return 0; 620 return 0;
622} 621}
623 622
624static int 623static int
625group_sched_in(struct perf_counter *group_counter, 624group_sched_in(struct perf_event *group_event,
626 struct perf_cpu_context *cpuctx, 625 struct perf_cpu_context *cpuctx,
627 struct perf_counter_context *ctx, 626 struct perf_event_context *ctx,
628 int cpu) 627 int cpu)
629{ 628{
630 struct perf_counter *counter, *partial_group; 629 struct perf_event *event, *partial_group;
631 int ret; 630 int ret;
632 631
633 if (group_counter->state == PERF_COUNTER_STATE_OFF) 632 if (group_event->state == PERF_EVENT_STATE_OFF)
634 return 0; 633 return 0;
635 634
636 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); 635 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
637 if (ret) 636 if (ret)
638 return ret < 0 ? ret : 0; 637 return ret < 0 ? ret : 0;
639 638
640 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 639 if (event_sched_in(group_event, cpuctx, ctx, cpu))
641 return -EAGAIN; 640 return -EAGAIN;
642 641
643 /* 642 /*
644 * Schedule in siblings as one group (if any): 643 * Schedule in siblings as one group (if any):
645 */ 644 */
646 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 645 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
647 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 646 if (event_sched_in(event, cpuctx, ctx, cpu)) {
648 partial_group = counter; 647 partial_group = event;
649 goto group_error; 648 goto group_error;
650 } 649 }
651 } 650 }
@@ -657,57 +656,57 @@ group_error:
657 * Groups can be scheduled in as one unit only, so undo any 656 * Groups can be scheduled in as one unit only, so undo any
658 * partial group before returning: 657 * partial group before returning:
659 */ 658 */
660 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 659 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
661 if (counter == partial_group) 660 if (event == partial_group)
662 break; 661 break;
663 counter_sched_out(counter, cpuctx, ctx); 662 event_sched_out(event, cpuctx, ctx);
664 } 663 }
665 counter_sched_out(group_counter, cpuctx, ctx); 664 event_sched_out(group_event, cpuctx, ctx);
666 665
667 return -EAGAIN; 666 return -EAGAIN;
668} 667}
669 668
670/* 669/*
671 * Return 1 for a group consisting entirely of software counters, 670 * Return 1 for a group consisting entirely of software events,
672 * 0 if the group contains any hardware counters. 671 * 0 if the group contains any hardware events.
673 */ 672 */
674static int is_software_only_group(struct perf_counter *leader) 673static int is_software_only_group(struct perf_event *leader)
675{ 674{
676 struct perf_counter *counter; 675 struct perf_event *event;
677 676
678 if (!is_software_counter(leader)) 677 if (!is_software_event(leader))
679 return 0; 678 return 0;
680 679
681 list_for_each_entry(counter, &leader->sibling_list, list_entry) 680 list_for_each_entry(event, &leader->sibling_list, group_entry)
682 if (!is_software_counter(counter)) 681 if (!is_software_event(event))
683 return 0; 682 return 0;
684 683
685 return 1; 684 return 1;
686} 685}
687 686
688/* 687/*
689 * Work out whether we can put this counter group on the CPU now. 688 * Work out whether we can put this event group on the CPU now.
690 */ 689 */
691static int group_can_go_on(struct perf_counter *counter, 690static int group_can_go_on(struct perf_event *event,
692 struct perf_cpu_context *cpuctx, 691 struct perf_cpu_context *cpuctx,
693 int can_add_hw) 692 int can_add_hw)
694{ 693{
695 /* 694 /*
696 * Groups consisting entirely of software counters can always go on. 695 * Groups consisting entirely of software events can always go on.
697 */ 696 */
698 if (is_software_only_group(counter)) 697 if (is_software_only_group(event))
699 return 1; 698 return 1;
700 /* 699 /*
701 * If an exclusive group is already on, no other hardware 700 * If an exclusive group is already on, no other hardware
702 * counters can go on. 701 * events can go on.
703 */ 702 */
704 if (cpuctx->exclusive) 703 if (cpuctx->exclusive)
705 return 0; 704 return 0;
706 /* 705 /*
707 * If this group is exclusive and there are already 706 * If this group is exclusive and there are already
708 * counters on the CPU, it can't go on. 707 * events on the CPU, it can't go on.
709 */ 708 */
710 if (counter->attr.exclusive && cpuctx->active_oncpu) 709 if (event->attr.exclusive && cpuctx->active_oncpu)
711 return 0; 710 return 0;
712 /* 711 /*
713 * Otherwise, try to add it if all previous groups were able 712 * Otherwise, try to add it if all previous groups were able
@@ -716,26 +715,26 @@ static int group_can_go_on(struct perf_counter *counter,
716 return can_add_hw; 715 return can_add_hw;
717} 716}
718 717
719static void add_counter_to_ctx(struct perf_counter *counter, 718static void add_event_to_ctx(struct perf_event *event,
720 struct perf_counter_context *ctx) 719 struct perf_event_context *ctx)
721{ 720{
722 list_add_counter(counter, ctx); 721 list_add_event(event, ctx);
723 counter->tstamp_enabled = ctx->time; 722 event->tstamp_enabled = ctx->time;
724 counter->tstamp_running = ctx->time; 723 event->tstamp_running = ctx->time;
725 counter->tstamp_stopped = ctx->time; 724 event->tstamp_stopped = ctx->time;
726} 725}
727 726
728/* 727/*
729 * Cross CPU call to install and enable a performance counter 728 * Cross CPU call to install and enable a performance event
730 * 729 *
731 * Must be called with ctx->mutex held 730 * Must be called with ctx->mutex held
732 */ 731 */
733static void __perf_install_in_context(void *info) 732static void __perf_install_in_context(void *info)
734{ 733{
735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 734 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
736 struct perf_counter *counter = info; 735 struct perf_event *event = info;
737 struct perf_counter_context *ctx = counter->ctx; 736 struct perf_event_context *ctx = event->ctx;
738 struct perf_counter *leader = counter->group_leader; 737 struct perf_event *leader = event->group_leader;
739 int cpu = smp_processor_id(); 738 int cpu = smp_processor_id();
740 int err; 739 int err;
741 740
@@ -744,7 +743,7 @@ static void __perf_install_in_context(void *info)
744 * the current task context of this cpu. If not it has been 743 * the current task context of this cpu. If not it has been
745 * scheduled out before the smp call arrived. 744 * scheduled out before the smp call arrived.
746 * Or possibly this is the right context but it isn't 745 * Or possibly this is the right context but it isn't
747 * on this cpu because it had no counters. 746 * on this cpu because it had no events.
748 */ 747 */
749 if (ctx->task && cpuctx->task_ctx != ctx) { 748 if (ctx->task && cpuctx->task_ctx != ctx) {
750 if (cpuctx->task_ctx || ctx->task != current) 749 if (cpuctx->task_ctx || ctx->task != current)
@@ -758,41 +757,41 @@ static void __perf_install_in_context(void *info)
758 757
759 /* 758 /*
760 * Protect the list operation against NMI by disabling the 759 * Protect the list operation against NMI by disabling the
761 * counters on a global level. NOP for non NMI based counters. 760 * events on a global level. NOP for non NMI based events.
762 */ 761 */
763 perf_disable(); 762 perf_disable();
764 763
765 add_counter_to_ctx(counter, ctx); 764 add_event_to_ctx(event, ctx);
766 765
767 /* 766 /*
768 * Don't put the counter on if it is disabled or if 767 * Don't put the event on if it is disabled or if
769 * it is in a group and the group isn't on. 768 * it is in a group and the group isn't on.
770 */ 769 */
771 if (counter->state != PERF_COUNTER_STATE_INACTIVE || 770 if (event->state != PERF_EVENT_STATE_INACTIVE ||
772 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) 771 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
773 goto unlock; 772 goto unlock;
774 773
775 /* 774 /*
776 * An exclusive counter can't go on if there are already active 775 * An exclusive event can't go on if there are already active
777 * hardware counters, and no hardware counter can go on if there 776 * hardware events, and no hardware event can go on if there
778 * is already an exclusive counter on. 777 * is already an exclusive event on.
779 */ 778 */
780 if (!group_can_go_on(counter, cpuctx, 1)) 779 if (!group_can_go_on(event, cpuctx, 1))
781 err = -EEXIST; 780 err = -EEXIST;
782 else 781 else
783 err = counter_sched_in(counter, cpuctx, ctx, cpu); 782 err = event_sched_in(event, cpuctx, ctx, cpu);
784 783
785 if (err) { 784 if (err) {
786 /* 785 /*
787 * This counter couldn't go on. If it is in a group 786 * This event couldn't go on. If it is in a group
788 * then we have to pull the whole group off. 787 * then we have to pull the whole group off.
789 * If the counter group is pinned then put it in error state. 788 * If the event group is pinned then put it in error state.
790 */ 789 */
791 if (leader != counter) 790 if (leader != event)
792 group_sched_out(leader, cpuctx, ctx); 791 group_sched_out(leader, cpuctx, ctx);
793 if (leader->attr.pinned) { 792 if (leader->attr.pinned) {
794 update_group_times(leader); 793 update_group_times(leader);
795 leader->state = PERF_COUNTER_STATE_ERROR; 794 leader->state = PERF_EVENT_STATE_ERROR;
796 } 795 }
797 } 796 }
798 797
@@ -806,92 +805,92 @@ static void __perf_install_in_context(void *info)
806} 805}
807 806
808/* 807/*
809 * Attach a performance counter to a context 808 * Attach a performance event to a context
810 * 809 *
811 * First we add the counter to the list with the hardware enable bit 810 * First we add the event to the list with the hardware enable bit
812 * in counter->hw_config cleared. 811 * in event->hw_config cleared.
813 * 812 *
814 * If the counter is attached to a task which is on a CPU we use a smp 813 * If the event is attached to a task which is on a CPU we use a smp
815 * call to enable it in the task context. The task might have been 814 * call to enable it in the task context. The task might have been
816 * scheduled away, but we check this in the smp call again. 815 * scheduled away, but we check this in the smp call again.
817 * 816 *
818 * Must be called with ctx->mutex held. 817 * Must be called with ctx->mutex held.
819 */ 818 */
820static void 819static void
821perf_install_in_context(struct perf_counter_context *ctx, 820perf_install_in_context(struct perf_event_context *ctx,
822 struct perf_counter *counter, 821 struct perf_event *event,
823 int cpu) 822 int cpu)
824{ 823{
825 struct task_struct *task = ctx->task; 824 struct task_struct *task = ctx->task;
826 825
827 if (!task) { 826 if (!task) {
828 /* 827 /*
829 * Per cpu counters are installed via an smp call and 828 * Per cpu events are installed via an smp call and
830 * the install is always sucessful. 829 * the install is always sucessful.
831 */ 830 */
832 smp_call_function_single(cpu, __perf_install_in_context, 831 smp_call_function_single(cpu, __perf_install_in_context,
833 counter, 1); 832 event, 1);
834 return; 833 return;
835 } 834 }
836 835
837retry: 836retry:
838 task_oncpu_function_call(task, __perf_install_in_context, 837 task_oncpu_function_call(task, __perf_install_in_context,
839 counter); 838 event);
840 839
841 spin_lock_irq(&ctx->lock); 840 spin_lock_irq(&ctx->lock);
842 /* 841 /*
843 * we need to retry the smp call. 842 * we need to retry the smp call.
844 */ 843 */
845 if (ctx->is_active && list_empty(&counter->list_entry)) { 844 if (ctx->is_active && list_empty(&event->group_entry)) {
846 spin_unlock_irq(&ctx->lock); 845 spin_unlock_irq(&ctx->lock);
847 goto retry; 846 goto retry;
848 } 847 }
849 848
850 /* 849 /*
851 * The lock prevents that this context is scheduled in so we 850 * The lock prevents that this context is scheduled in so we
852 * can add the counter safely, if it the call above did not 851 * can add the event safely, if it the call above did not
853 * succeed. 852 * succeed.
854 */ 853 */
855 if (list_empty(&counter->list_entry)) 854 if (list_empty(&event->group_entry))
856 add_counter_to_ctx(counter, ctx); 855 add_event_to_ctx(event, ctx);
857 spin_unlock_irq(&ctx->lock); 856 spin_unlock_irq(&ctx->lock);
858} 857}
859 858
860/* 859/*
861 * Put a counter into inactive state and update time fields. 860 * Put a event into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all 861 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we 862 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also. 863 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders 864 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty. 865 * since the non-leader members' sibling_lists will be empty.
867 */ 866 */
868static void __perf_counter_mark_enabled(struct perf_counter *counter, 867static void __perf_event_mark_enabled(struct perf_event *event,
869 struct perf_counter_context *ctx) 868 struct perf_event_context *ctx)
870{ 869{
871 struct perf_counter *sub; 870 struct perf_event *sub;
872 871
873 counter->state = PERF_COUNTER_STATE_INACTIVE; 872 event->state = PERF_EVENT_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled; 873 event->tstamp_enabled = ctx->time - event->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry) 874 list_for_each_entry(sub, &event->sibling_list, group_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE) 875 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
877 sub->tstamp_enabled = 876 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled; 877 ctx->time - sub->total_time_enabled;
879} 878}
880 879
881/* 880/*
882 * Cross CPU call to enable a performance counter 881 * Cross CPU call to enable a performance event
883 */ 882 */
884static void __perf_counter_enable(void *info) 883static void __perf_event_enable(void *info)
885{ 884{
886 struct perf_counter *counter = info; 885 struct perf_event *event = info;
887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 886 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
888 struct perf_counter_context *ctx = counter->ctx; 887 struct perf_event_context *ctx = event->ctx;
889 struct perf_counter *leader = counter->group_leader; 888 struct perf_event *leader = event->group_leader;
890 int err; 889 int err;
891 890
892 /* 891 /*
893 * If this is a per-task counter, need to check whether this 892 * If this is a per-task event, need to check whether this
894 * counter's task is the current task on this cpu. 893 * event's task is the current task on this cpu.
895 */ 894 */
896 if (ctx->task && cpuctx->task_ctx != ctx) { 895 if (ctx->task && cpuctx->task_ctx != ctx) {
897 if (cpuctx->task_ctx || ctx->task != current) 896 if (cpuctx->task_ctx || ctx->task != current)
@@ -903,40 +902,40 @@ static void __perf_counter_enable(void *info)
903 ctx->is_active = 1; 902 ctx->is_active = 1;
904 update_context_time(ctx); 903 update_context_time(ctx);
905 904
906 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 905 if (event->state >= PERF_EVENT_STATE_INACTIVE)
907 goto unlock; 906 goto unlock;
908 __perf_counter_mark_enabled(counter, ctx); 907 __perf_event_mark_enabled(event, ctx);
909 908
910 /* 909 /*
911 * If the counter is in a group and isn't the group leader, 910 * If the event is in a group and isn't the group leader,
912 * then don't put it on unless the group is on. 911 * then don't put it on unless the group is on.
913 */ 912 */
914 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) 913 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
915 goto unlock; 914 goto unlock;
916 915
917 if (!group_can_go_on(counter, cpuctx, 1)) { 916 if (!group_can_go_on(event, cpuctx, 1)) {
918 err = -EEXIST; 917 err = -EEXIST;
919 } else { 918 } else {
920 perf_disable(); 919 perf_disable();
921 if (counter == leader) 920 if (event == leader)
922 err = group_sched_in(counter, cpuctx, ctx, 921 err = group_sched_in(event, cpuctx, ctx,
923 smp_processor_id()); 922 smp_processor_id());
924 else 923 else
925 err = counter_sched_in(counter, cpuctx, ctx, 924 err = event_sched_in(event, cpuctx, ctx,
926 smp_processor_id()); 925 smp_processor_id());
927 perf_enable(); 926 perf_enable();
928 } 927 }
929 928
930 if (err) { 929 if (err) {
931 /* 930 /*
932 * If this counter can't go on and it's part of a 931 * If this event can't go on and it's part of a
933 * group, then the whole group has to come off. 932 * group, then the whole group has to come off.
934 */ 933 */
935 if (leader != counter) 934 if (leader != event)
936 group_sched_out(leader, cpuctx, ctx); 935 group_sched_out(leader, cpuctx, ctx);
937 if (leader->attr.pinned) { 936 if (leader->attr.pinned) {
938 update_group_times(leader); 937 update_group_times(leader);
939 leader->state = PERF_COUNTER_STATE_ERROR; 938 leader->state = PERF_EVENT_STATE_ERROR;
940 } 939 }
941 } 940 }
942 941
@@ -945,98 +944,98 @@ static void __perf_counter_enable(void *info)
945} 944}
946 945
947/* 946/*
948 * Enable a counter. 947 * Enable a event.
949 * 948 *
950 * If counter->ctx is a cloned context, callers must make sure that 949 * If event->ctx is a cloned context, callers must make sure that
951 * every task struct that counter->ctx->task could possibly point to 950 * every task struct that event->ctx->task could possibly point to
952 * remains valid. This condition is satisfied when called through 951 * remains valid. This condition is satisfied when called through
953 * perf_counter_for_each_child or perf_counter_for_each as described 952 * perf_event_for_each_child or perf_event_for_each as described
954 * for perf_counter_disable. 953 * for perf_event_disable.
955 */ 954 */
956static void perf_counter_enable(struct perf_counter *counter) 955static void perf_event_enable(struct perf_event *event)
957{ 956{
958 struct perf_counter_context *ctx = counter->ctx; 957 struct perf_event_context *ctx = event->ctx;
959 struct task_struct *task = ctx->task; 958 struct task_struct *task = ctx->task;
960 959
961 if (!task) { 960 if (!task) {
962 /* 961 /*
963 * Enable the counter on the cpu that it's on 962 * Enable the event on the cpu that it's on
964 */ 963 */
965 smp_call_function_single(counter->cpu, __perf_counter_enable, 964 smp_call_function_single(event->cpu, __perf_event_enable,
966 counter, 1); 965 event, 1);
967 return; 966 return;
968 } 967 }
969 968
970 spin_lock_irq(&ctx->lock); 969 spin_lock_irq(&ctx->lock);
971 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 970 if (event->state >= PERF_EVENT_STATE_INACTIVE)
972 goto out; 971 goto out;
973 972
974 /* 973 /*
975 * If the counter is in error state, clear that first. 974 * If the event is in error state, clear that first.
976 * That way, if we see the counter in error state below, we 975 * That way, if we see the event in error state below, we
977 * know that it has gone back into error state, as distinct 976 * know that it has gone back into error state, as distinct
978 * from the task having been scheduled away before the 977 * from the task having been scheduled away before the
979 * cross-call arrived. 978 * cross-call arrived.
980 */ 979 */
981 if (counter->state == PERF_COUNTER_STATE_ERROR) 980 if (event->state == PERF_EVENT_STATE_ERROR)
982 counter->state = PERF_COUNTER_STATE_OFF; 981 event->state = PERF_EVENT_STATE_OFF;
983 982
984 retry: 983 retry:
985 spin_unlock_irq(&ctx->lock); 984 spin_unlock_irq(&ctx->lock);
986 task_oncpu_function_call(task, __perf_counter_enable, counter); 985 task_oncpu_function_call(task, __perf_event_enable, event);
987 986
988 spin_lock_irq(&ctx->lock); 987 spin_lock_irq(&ctx->lock);
989 988
990 /* 989 /*
991 * If the context is active and the counter is still off, 990 * If the context is active and the event is still off,
992 * we need to retry the cross-call. 991 * we need to retry the cross-call.
993 */ 992 */
994 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) 993 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
995 goto retry; 994 goto retry;
996 995
997 /* 996 /*
998 * Since we have the lock this context can't be scheduled 997 * Since we have the lock this context can't be scheduled
999 * in, so we can change the state safely. 998 * in, so we can change the state safely.
1000 */ 999 */
1001 if (counter->state == PERF_COUNTER_STATE_OFF) 1000 if (event->state == PERF_EVENT_STATE_OFF)
1002 __perf_counter_mark_enabled(counter, ctx); 1001 __perf_event_mark_enabled(event, ctx);
1003 1002
1004 out: 1003 out:
1005 spin_unlock_irq(&ctx->lock); 1004 spin_unlock_irq(&ctx->lock);
1006} 1005}
1007 1006
1008static int perf_counter_refresh(struct perf_counter *counter, int refresh) 1007static int perf_event_refresh(struct perf_event *event, int refresh)
1009{ 1008{
1010 /* 1009 /*
1011 * not supported on inherited counters 1010 * not supported on inherited events
1012 */ 1011 */
1013 if (counter->attr.inherit) 1012 if (event->attr.inherit)
1014 return -EINVAL; 1013 return -EINVAL;
1015 1014
1016 atomic_add(refresh, &counter->event_limit); 1015 atomic_add(refresh, &event->event_limit);
1017 perf_counter_enable(counter); 1016 perf_event_enable(event);
1018 1017
1019 return 0; 1018 return 0;
1020} 1019}
1021 1020
1022void __perf_counter_sched_out(struct perf_counter_context *ctx, 1021void __perf_event_sched_out(struct perf_event_context *ctx,
1023 struct perf_cpu_context *cpuctx) 1022 struct perf_cpu_context *cpuctx)
1024{ 1023{
1025 struct perf_counter *counter; 1024 struct perf_event *event;
1026 1025
1027 spin_lock(&ctx->lock); 1026 spin_lock(&ctx->lock);
1028 ctx->is_active = 0; 1027 ctx->is_active = 0;
1029 if (likely(!ctx->nr_counters)) 1028 if (likely(!ctx->nr_events))
1030 goto out; 1029 goto out;
1031 update_context_time(ctx); 1030 update_context_time(ctx);
1032 1031
1033 perf_disable(); 1032 perf_disable();
1034 if (ctx->nr_active) { 1033 if (ctx->nr_active) {
1035 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1034 list_for_each_entry(event, &ctx->group_list, group_entry) {
1036 if (counter != counter->group_leader) 1035 if (event != event->group_leader)
1037 counter_sched_out(counter, cpuctx, ctx); 1036 event_sched_out(event, cpuctx, ctx);
1038 else 1037 else
1039 group_sched_out(counter, cpuctx, ctx); 1038 group_sched_out(event, cpuctx, ctx);
1040 } 1039 }
1041 } 1040 }
1042 perf_enable(); 1041 perf_enable();
@@ -1047,46 +1046,46 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
1047/* 1046/*
1048 * Test whether two contexts are equivalent, i.e. whether they 1047 * Test whether two contexts are equivalent, i.e. whether they
1049 * have both been cloned from the same version of the same context 1048 * have both been cloned from the same version of the same context
1050 * and they both have the same number of enabled counters. 1049 * and they both have the same number of enabled events.
1051 * If the number of enabled counters is the same, then the set 1050 * If the number of enabled events is the same, then the set
1052 * of enabled counters should be the same, because these are both 1051 * of enabled events should be the same, because these are both
1053 * inherited contexts, therefore we can't access individual counters 1052 * inherited contexts, therefore we can't access individual events
1054 * in them directly with an fd; we can only enable/disable all 1053 * in them directly with an fd; we can only enable/disable all
1055 * counters via prctl, or enable/disable all counters in a family 1054 * events via prctl, or enable/disable all events in a family
1056 * via ioctl, which will have the same effect on both contexts. 1055 * via ioctl, which will have the same effect on both contexts.
1057 */ 1056 */
1058static int context_equiv(struct perf_counter_context *ctx1, 1057static int context_equiv(struct perf_event_context *ctx1,
1059 struct perf_counter_context *ctx2) 1058 struct perf_event_context *ctx2)
1060{ 1059{
1061 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx 1060 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1062 && ctx1->parent_gen == ctx2->parent_gen 1061 && ctx1->parent_gen == ctx2->parent_gen
1063 && !ctx1->pin_count && !ctx2->pin_count; 1062 && !ctx1->pin_count && !ctx2->pin_count;
1064} 1063}
1065 1064
1066static void __perf_counter_read(void *counter); 1065static void __perf_event_read(void *event);
1067 1066
1068static void __perf_counter_sync_stat(struct perf_counter *counter, 1067static void __perf_event_sync_stat(struct perf_event *event,
1069 struct perf_counter *next_counter) 1068 struct perf_event *next_event)
1070{ 1069{
1071 u64 value; 1070 u64 value;
1072 1071
1073 if (!counter->attr.inherit_stat) 1072 if (!event->attr.inherit_stat)
1074 return; 1073 return;
1075 1074
1076 /* 1075 /*
1077 * Update the counter value, we cannot use perf_counter_read() 1076 * Update the event value, we cannot use perf_event_read()
1078 * because we're in the middle of a context switch and have IRQs 1077 * because we're in the middle of a context switch and have IRQs
1079 * disabled, which upsets smp_call_function_single(), however 1078 * disabled, which upsets smp_call_function_single(), however
1080 * we know the counter must be on the current CPU, therefore we 1079 * we know the event must be on the current CPU, therefore we
1081 * don't need to use it. 1080 * don't need to use it.
1082 */ 1081 */
1083 switch (counter->state) { 1082 switch (event->state) {
1084 case PERF_COUNTER_STATE_ACTIVE: 1083 case PERF_EVENT_STATE_ACTIVE:
1085 __perf_counter_read(counter); 1084 __perf_event_read(event);
1086 break; 1085 break;
1087 1086
1088 case PERF_COUNTER_STATE_INACTIVE: 1087 case PERF_EVENT_STATE_INACTIVE:
1089 update_counter_times(counter); 1088 update_event_times(event);
1090 break; 1089 break;
1091 1090
1092 default: 1091 default:
@@ -1094,73 +1093,73 @@ static void __perf_counter_sync_stat(struct perf_counter *counter,
1094 } 1093 }
1095 1094
1096 /* 1095 /*
1097 * In order to keep per-task stats reliable we need to flip the counter 1096 * In order to keep per-task stats reliable we need to flip the event
1098 * values when we flip the contexts. 1097 * values when we flip the contexts.
1099 */ 1098 */
1100 value = atomic64_read(&next_counter->count); 1099 value = atomic64_read(&next_event->count);
1101 value = atomic64_xchg(&counter->count, value); 1100 value = atomic64_xchg(&event->count, value);
1102 atomic64_set(&next_counter->count, value); 1101 atomic64_set(&next_event->count, value);
1103 1102
1104 swap(counter->total_time_enabled, next_counter->total_time_enabled); 1103 swap(event->total_time_enabled, next_event->total_time_enabled);
1105 swap(counter->total_time_running, next_counter->total_time_running); 1104 swap(event->total_time_running, next_event->total_time_running);
1106 1105
1107 /* 1106 /*
1108 * Since we swizzled the values, update the user visible data too. 1107 * Since we swizzled the values, update the user visible data too.
1109 */ 1108 */
1110 perf_counter_update_userpage(counter); 1109 perf_event_update_userpage(event);
1111 perf_counter_update_userpage(next_counter); 1110 perf_event_update_userpage(next_event);
1112} 1111}
1113 1112
1114#define list_next_entry(pos, member) \ 1113#define list_next_entry(pos, member) \
1115 list_entry(pos->member.next, typeof(*pos), member) 1114 list_entry(pos->member.next, typeof(*pos), member)
1116 1115
1117static void perf_counter_sync_stat(struct perf_counter_context *ctx, 1116static void perf_event_sync_stat(struct perf_event_context *ctx,
1118 struct perf_counter_context *next_ctx) 1117 struct perf_event_context *next_ctx)
1119{ 1118{
1120 struct perf_counter *counter, *next_counter; 1119 struct perf_event *event, *next_event;
1121 1120
1122 if (!ctx->nr_stat) 1121 if (!ctx->nr_stat)
1123 return; 1122 return;
1124 1123
1125 counter = list_first_entry(&ctx->event_list, 1124 event = list_first_entry(&ctx->event_list,
1126 struct perf_counter, event_entry); 1125 struct perf_event, event_entry);
1127 1126
1128 next_counter = list_first_entry(&next_ctx->event_list, 1127 next_event = list_first_entry(&next_ctx->event_list,
1129 struct perf_counter, event_entry); 1128 struct perf_event, event_entry);
1130 1129
1131 while (&counter->event_entry != &ctx->event_list && 1130 while (&event->event_entry != &ctx->event_list &&
1132 &next_counter->event_entry != &next_ctx->event_list) { 1131 &next_event->event_entry != &next_ctx->event_list) {
1133 1132
1134 __perf_counter_sync_stat(counter, next_counter); 1133 __perf_event_sync_stat(event, next_event);
1135 1134
1136 counter = list_next_entry(counter, event_entry); 1135 event = list_next_entry(event, event_entry);
1137 next_counter = list_next_entry(next_counter, event_entry); 1136 next_event = list_next_entry(next_event, event_entry);
1138 } 1137 }
1139} 1138}
1140 1139
1141/* 1140/*
1142 * Called from scheduler to remove the counters of the current task, 1141 * Called from scheduler to remove the events of the current task,
1143 * with interrupts disabled. 1142 * with interrupts disabled.
1144 * 1143 *
1145 * We stop each counter and update the counter value in counter->count. 1144 * We stop each event and update the event value in event->count.
1146 * 1145 *
1147 * This does not protect us against NMI, but disable() 1146 * This does not protect us against NMI, but disable()
1148 * sets the disabled bit in the control field of counter _before_ 1147 * sets the disabled bit in the control field of event _before_
1149 * accessing the counter control register. If a NMI hits, then it will 1148 * accessing the event control register. If a NMI hits, then it will
1150 * not restart the counter. 1149 * not restart the event.
1151 */ 1150 */
1152void perf_counter_task_sched_out(struct task_struct *task, 1151void perf_event_task_sched_out(struct task_struct *task,
1153 struct task_struct *next, int cpu) 1152 struct task_struct *next, int cpu)
1154{ 1153{
1155 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1154 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1156 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1155 struct perf_event_context *ctx = task->perf_event_ctxp;
1157 struct perf_counter_context *next_ctx; 1156 struct perf_event_context *next_ctx;
1158 struct perf_counter_context *parent; 1157 struct perf_event_context *parent;
1159 struct pt_regs *regs; 1158 struct pt_regs *regs;
1160 int do_switch = 1; 1159 int do_switch = 1;
1161 1160
1162 regs = task_pt_regs(task); 1161 regs = task_pt_regs(task);
1163 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); 1162 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1164 1163
1165 if (likely(!ctx || !cpuctx->task_ctx)) 1164 if (likely(!ctx || !cpuctx->task_ctx))
1166 return; 1165 return;
@@ -1169,7 +1168,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1169 1168
1170 rcu_read_lock(); 1169 rcu_read_lock();
1171 parent = rcu_dereference(ctx->parent_ctx); 1170 parent = rcu_dereference(ctx->parent_ctx);
1172 next_ctx = next->perf_counter_ctxp; 1171 next_ctx = next->perf_event_ctxp;
1173 if (parent && next_ctx && 1172 if (parent && next_ctx &&
1174 rcu_dereference(next_ctx->parent_ctx) == parent) { 1173 rcu_dereference(next_ctx->parent_ctx) == parent) {
1175 /* 1174 /*
@@ -1186,15 +1185,15 @@ void perf_counter_task_sched_out(struct task_struct *task,
1186 if (context_equiv(ctx, next_ctx)) { 1185 if (context_equiv(ctx, next_ctx)) {
1187 /* 1186 /*
1188 * XXX do we need a memory barrier of sorts 1187 * XXX do we need a memory barrier of sorts
1189 * wrt to rcu_dereference() of perf_counter_ctxp 1188 * wrt to rcu_dereference() of perf_event_ctxp
1190 */ 1189 */
1191 task->perf_counter_ctxp = next_ctx; 1190 task->perf_event_ctxp = next_ctx;
1192 next->perf_counter_ctxp = ctx; 1191 next->perf_event_ctxp = ctx;
1193 ctx->task = next; 1192 ctx->task = next;
1194 next_ctx->task = task; 1193 next_ctx->task = task;
1195 do_switch = 0; 1194 do_switch = 0;
1196 1195
1197 perf_counter_sync_stat(ctx, next_ctx); 1196 perf_event_sync_stat(ctx, next_ctx);
1198 } 1197 }
1199 spin_unlock(&next_ctx->lock); 1198 spin_unlock(&next_ctx->lock);
1200 spin_unlock(&ctx->lock); 1199 spin_unlock(&ctx->lock);
@@ -1202,7 +1201,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1202 rcu_read_unlock(); 1201 rcu_read_unlock();
1203 1202
1204 if (do_switch) { 1203 if (do_switch) {
1205 __perf_counter_sched_out(ctx, cpuctx); 1204 __perf_event_sched_out(ctx, cpuctx);
1206 cpuctx->task_ctx = NULL; 1205 cpuctx->task_ctx = NULL;
1207 } 1206 }
1208} 1207}
@@ -1210,7 +1209,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
1210/* 1209/*
1211 * Called with IRQs disabled 1210 * Called with IRQs disabled
1212 */ 1211 */
1213static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) 1212static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1214{ 1213{
1215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1214 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1216 1215
@@ -1220,28 +1219,28 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1220 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1219 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1221 return; 1220 return;
1222 1221
1223 __perf_counter_sched_out(ctx, cpuctx); 1222 __perf_event_sched_out(ctx, cpuctx);
1224 cpuctx->task_ctx = NULL; 1223 cpuctx->task_ctx = NULL;
1225} 1224}
1226 1225
1227/* 1226/*
1228 * Called with IRQs disabled 1227 * Called with IRQs disabled
1229 */ 1228 */
1230static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) 1229static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1231{ 1230{
1232 __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 1231 __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1233} 1232}
1234 1233
1235static void 1234static void
1236__perf_counter_sched_in(struct perf_counter_context *ctx, 1235__perf_event_sched_in(struct perf_event_context *ctx,
1237 struct perf_cpu_context *cpuctx, int cpu) 1236 struct perf_cpu_context *cpuctx, int cpu)
1238{ 1237{
1239 struct perf_counter *counter; 1238 struct perf_event *event;
1240 int can_add_hw = 1; 1239 int can_add_hw = 1;
1241 1240
1242 spin_lock(&ctx->lock); 1241 spin_lock(&ctx->lock);
1243 ctx->is_active = 1; 1242 ctx->is_active = 1;
1244 if (likely(!ctx->nr_counters)) 1243 if (likely(!ctx->nr_events))
1245 goto out; 1244 goto out;
1246 1245
1247 ctx->timestamp = perf_clock(); 1246 ctx->timestamp = perf_clock();
@@ -1252,52 +1251,52 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1252 * First go through the list and put on any pinned groups 1251 * First go through the list and put on any pinned groups
1253 * in order to give them the best chance of going on. 1252 * in order to give them the best chance of going on.
1254 */ 1253 */
1255 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1254 list_for_each_entry(event, &ctx->group_list, group_entry) {
1256 if (counter->state <= PERF_COUNTER_STATE_OFF || 1255 if (event->state <= PERF_EVENT_STATE_OFF ||
1257 !counter->attr.pinned) 1256 !event->attr.pinned)
1258 continue; 1257 continue;
1259 if (counter->cpu != -1 && counter->cpu != cpu) 1258 if (event->cpu != -1 && event->cpu != cpu)
1260 continue; 1259 continue;
1261 1260
1262 if (counter != counter->group_leader) 1261 if (event != event->group_leader)
1263 counter_sched_in(counter, cpuctx, ctx, cpu); 1262 event_sched_in(event, cpuctx, ctx, cpu);
1264 else { 1263 else {
1265 if (group_can_go_on(counter, cpuctx, 1)) 1264 if (group_can_go_on(event, cpuctx, 1))
1266 group_sched_in(counter, cpuctx, ctx, cpu); 1265 group_sched_in(event, cpuctx, ctx, cpu);
1267 } 1266 }
1268 1267
1269 /* 1268 /*
1270 * If this pinned group hasn't been scheduled, 1269 * If this pinned group hasn't been scheduled,
1271 * put it in error state. 1270 * put it in error state.
1272 */ 1271 */
1273 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1272 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1274 update_group_times(counter); 1273 update_group_times(event);
1275 counter->state = PERF_COUNTER_STATE_ERROR; 1274 event->state = PERF_EVENT_STATE_ERROR;
1276 } 1275 }
1277 } 1276 }
1278 1277
1279 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1278 list_for_each_entry(event, &ctx->group_list, group_entry) {
1280 /* 1279 /*
1281 * Ignore counters in OFF or ERROR state, and 1280 * Ignore events in OFF or ERROR state, and
1282 * ignore pinned counters since we did them already. 1281 * ignore pinned events since we did them already.
1283 */ 1282 */
1284 if (counter->state <= PERF_COUNTER_STATE_OFF || 1283 if (event->state <= PERF_EVENT_STATE_OFF ||
1285 counter->attr.pinned) 1284 event->attr.pinned)
1286 continue; 1285 continue;
1287 1286
1288 /* 1287 /*
1289 * Listen to the 'cpu' scheduling filter constraint 1288 * Listen to the 'cpu' scheduling filter constraint
1290 * of counters: 1289 * of events:
1291 */ 1290 */
1292 if (counter->cpu != -1 && counter->cpu != cpu) 1291 if (event->cpu != -1 && event->cpu != cpu)
1293 continue; 1292 continue;
1294 1293
1295 if (counter != counter->group_leader) { 1294 if (event != event->group_leader) {
1296 if (counter_sched_in(counter, cpuctx, ctx, cpu)) 1295 if (event_sched_in(event, cpuctx, ctx, cpu))
1297 can_add_hw = 0; 1296 can_add_hw = 0;
1298 } else { 1297 } else {
1299 if (group_can_go_on(counter, cpuctx, can_add_hw)) { 1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1300 if (group_sched_in(counter, cpuctx, ctx, cpu)) 1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1301 can_add_hw = 0; 1300 can_add_hw = 0;
1302 } 1301 }
1303 } 1302 }
@@ -1308,48 +1307,48 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1308} 1307}
1309 1308
1310/* 1309/*
1311 * Called from scheduler to add the counters of the current task 1310 * Called from scheduler to add the events of the current task
1312 * with interrupts disabled. 1311 * with interrupts disabled.
1313 * 1312 *
1314 * We restore the counter value and then enable it. 1313 * We restore the event value and then enable it.
1315 * 1314 *
1316 * This does not protect us against NMI, but enable() 1315 * This does not protect us against NMI, but enable()
1317 * sets the enabled bit in the control field of counter _before_ 1316 * sets the enabled bit in the control field of event _before_
1318 * accessing the counter control register. If a NMI hits, then it will 1317 * accessing the event control register. If a NMI hits, then it will
1319 * keep the counter running. 1318 * keep the event running.
1320 */ 1319 */
1321void perf_counter_task_sched_in(struct task_struct *task, int cpu) 1320void perf_event_task_sched_in(struct task_struct *task, int cpu)
1322{ 1321{
1323 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1322 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1324 struct perf_counter_context *ctx = task->perf_counter_ctxp; 1323 struct perf_event_context *ctx = task->perf_event_ctxp;
1325 1324
1326 if (likely(!ctx)) 1325 if (likely(!ctx))
1327 return; 1326 return;
1328 if (cpuctx->task_ctx == ctx) 1327 if (cpuctx->task_ctx == ctx)
1329 return; 1328 return;
1330 __perf_counter_sched_in(ctx, cpuctx, cpu); 1329 __perf_event_sched_in(ctx, cpuctx, cpu);
1331 cpuctx->task_ctx = ctx; 1330 cpuctx->task_ctx = ctx;
1332} 1331}
1333 1332
1334static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1333static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1335{ 1334{
1336 struct perf_counter_context *ctx = &cpuctx->ctx; 1335 struct perf_event_context *ctx = &cpuctx->ctx;
1337 1336
1338 __perf_counter_sched_in(ctx, cpuctx, cpu); 1337 __perf_event_sched_in(ctx, cpuctx, cpu);
1339} 1338}
1340 1339
1341#define MAX_INTERRUPTS (~0ULL) 1340#define MAX_INTERRUPTS (~0ULL)
1342 1341
1343static void perf_log_throttle(struct perf_counter *counter, int enable); 1342static void perf_log_throttle(struct perf_event *event, int enable);
1344 1343
1345static void perf_adjust_period(struct perf_counter *counter, u64 events) 1344static void perf_adjust_period(struct perf_event *event, u64 events)
1346{ 1345{
1347 struct hw_perf_counter *hwc = &counter->hw; 1346 struct hw_perf_event *hwc = &event->hw;
1348 u64 period, sample_period; 1347 u64 period, sample_period;
1349 s64 delta; 1348 s64 delta;
1350 1349
1351 events *= hwc->sample_period; 1350 events *= hwc->sample_period;
1352 period = div64_u64(events, counter->attr.sample_freq); 1351 period = div64_u64(events, event->attr.sample_freq);
1353 1352
1354 delta = (s64)(period - hwc->sample_period); 1353 delta = (s64)(period - hwc->sample_period);
1355 delta = (delta + 7) / 8; /* low pass filter */ 1354 delta = (delta + 7) / 8; /* low pass filter */
@@ -1362,39 +1361,39 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
1362 hwc->sample_period = sample_period; 1361 hwc->sample_period = sample_period;
1363} 1362}
1364 1363
1365static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) 1364static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1366{ 1365{
1367 struct perf_counter *counter; 1366 struct perf_event *event;
1368 struct hw_perf_counter *hwc; 1367 struct hw_perf_event *hwc;
1369 u64 interrupts, freq; 1368 u64 interrupts, freq;
1370 1369
1371 spin_lock(&ctx->lock); 1370 spin_lock(&ctx->lock);
1372 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1371 list_for_each_entry(event, &ctx->group_list, group_entry) {
1373 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 1372 if (event->state != PERF_EVENT_STATE_ACTIVE)
1374 continue; 1373 continue;
1375 1374
1376 hwc = &counter->hw; 1375 hwc = &event->hw;
1377 1376
1378 interrupts = hwc->interrupts; 1377 interrupts = hwc->interrupts;
1379 hwc->interrupts = 0; 1378 hwc->interrupts = 0;
1380 1379
1381 /* 1380 /*
1382 * unthrottle counters on the tick 1381 * unthrottle events on the tick
1383 */ 1382 */
1384 if (interrupts == MAX_INTERRUPTS) { 1383 if (interrupts == MAX_INTERRUPTS) {
1385 perf_log_throttle(counter, 1); 1384 perf_log_throttle(event, 1);
1386 counter->pmu->unthrottle(counter); 1385 event->pmu->unthrottle(event);
1387 interrupts = 2*sysctl_perf_counter_sample_rate/HZ; 1386 interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1388 } 1387 }
1389 1388
1390 if (!counter->attr.freq || !counter->attr.sample_freq) 1389 if (!event->attr.freq || !event->attr.sample_freq)
1391 continue; 1390 continue;
1392 1391
1393 /* 1392 /*
1394 * if the specified freq < HZ then we need to skip ticks 1393 * if the specified freq < HZ then we need to skip ticks
1395 */ 1394 */
1396 if (counter->attr.sample_freq < HZ) { 1395 if (event->attr.sample_freq < HZ) {
1397 freq = counter->attr.sample_freq; 1396 freq = event->attr.sample_freq;
1398 1397
1399 hwc->freq_count += freq; 1398 hwc->freq_count += freq;
1400 hwc->freq_interrupts += interrupts; 1399 hwc->freq_interrupts += interrupts;
@@ -1408,7 +1407,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1408 } else 1407 } else
1409 freq = HZ; 1408 freq = HZ;
1410 1409
1411 perf_adjust_period(counter, freq * interrupts); 1410 perf_adjust_period(event, freq * interrupts);
1412 1411
1413 /* 1412 /*
1414 * In order to avoid being stalled by an (accidental) huge 1413 * In order to avoid being stalled by an (accidental) huge
@@ -1417,9 +1416,9 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1417 */ 1416 */
1418 if (!interrupts) { 1417 if (!interrupts) {
1419 perf_disable(); 1418 perf_disable();
1420 counter->pmu->disable(counter); 1419 event->pmu->disable(event);
1421 atomic64_set(&hwc->period_left, 0); 1420 atomic64_set(&hwc->period_left, 0);
1422 counter->pmu->enable(counter); 1421 event->pmu->enable(event);
1423 perf_enable(); 1422 perf_enable();
1424 } 1423 }
1425 } 1424 }
@@ -1427,22 +1426,22 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1427} 1426}
1428 1427
1429/* 1428/*
1430 * Round-robin a context's counters: 1429 * Round-robin a context's events:
1431 */ 1430 */
1432static void rotate_ctx(struct perf_counter_context *ctx) 1431static void rotate_ctx(struct perf_event_context *ctx)
1433{ 1432{
1434 struct perf_counter *counter; 1433 struct perf_event *event;
1435 1434
1436 if (!ctx->nr_counters) 1435 if (!ctx->nr_events)
1437 return; 1436 return;
1438 1437
1439 spin_lock(&ctx->lock); 1438 spin_lock(&ctx->lock);
1440 /* 1439 /*
1441 * Rotate the first entry last (works just fine for group counters too): 1440 * Rotate the first entry last (works just fine for group events too):
1442 */ 1441 */
1443 perf_disable(); 1442 perf_disable();
1444 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1443 list_for_each_entry(event, &ctx->group_list, group_entry) {
1445 list_move_tail(&counter->list_entry, &ctx->counter_list); 1444 list_move_tail(&event->group_entry, &ctx->group_list);
1446 break; 1445 break;
1447 } 1446 }
1448 perf_enable(); 1447 perf_enable();
@@ -1450,93 +1449,93 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1450 spin_unlock(&ctx->lock); 1449 spin_unlock(&ctx->lock);
1451} 1450}
1452 1451
1453void perf_counter_task_tick(struct task_struct *curr, int cpu) 1452void perf_event_task_tick(struct task_struct *curr, int cpu)
1454{ 1453{
1455 struct perf_cpu_context *cpuctx; 1454 struct perf_cpu_context *cpuctx;
1456 struct perf_counter_context *ctx; 1455 struct perf_event_context *ctx;
1457 1456
1458 if (!atomic_read(&nr_counters)) 1457 if (!atomic_read(&nr_events))
1459 return; 1458 return;
1460 1459
1461 cpuctx = &per_cpu(perf_cpu_context, cpu); 1460 cpuctx = &per_cpu(perf_cpu_context, cpu);
1462 ctx = curr->perf_counter_ctxp; 1461 ctx = curr->perf_event_ctxp;
1463 1462
1464 perf_ctx_adjust_freq(&cpuctx->ctx); 1463 perf_ctx_adjust_freq(&cpuctx->ctx);
1465 if (ctx) 1464 if (ctx)
1466 perf_ctx_adjust_freq(ctx); 1465 perf_ctx_adjust_freq(ctx);
1467 1466
1468 perf_counter_cpu_sched_out(cpuctx); 1467 perf_event_cpu_sched_out(cpuctx);
1469 if (ctx) 1468 if (ctx)
1470 __perf_counter_task_sched_out(ctx); 1469 __perf_event_task_sched_out(ctx);
1471 1470
1472 rotate_ctx(&cpuctx->ctx); 1471 rotate_ctx(&cpuctx->ctx);
1473 if (ctx) 1472 if (ctx)
1474 rotate_ctx(ctx); 1473 rotate_ctx(ctx);
1475 1474
1476 perf_counter_cpu_sched_in(cpuctx, cpu); 1475 perf_event_cpu_sched_in(cpuctx, cpu);
1477 if (ctx) 1476 if (ctx)
1478 perf_counter_task_sched_in(curr, cpu); 1477 perf_event_task_sched_in(curr, cpu);
1479} 1478}
1480 1479
1481/* 1480/*
1482 * Enable all of a task's counters that have been marked enable-on-exec. 1481 * Enable all of a task's events that have been marked enable-on-exec.
1483 * This expects task == current. 1482 * This expects task == current.
1484 */ 1483 */
1485static void perf_counter_enable_on_exec(struct task_struct *task) 1484static void perf_event_enable_on_exec(struct task_struct *task)
1486{ 1485{
1487 struct perf_counter_context *ctx; 1486 struct perf_event_context *ctx;
1488 struct perf_counter *counter; 1487 struct perf_event *event;
1489 unsigned long flags; 1488 unsigned long flags;
1490 int enabled = 0; 1489 int enabled = 0;
1491 1490
1492 local_irq_save(flags); 1491 local_irq_save(flags);
1493 ctx = task->perf_counter_ctxp; 1492 ctx = task->perf_event_ctxp;
1494 if (!ctx || !ctx->nr_counters) 1493 if (!ctx || !ctx->nr_events)
1495 goto out; 1494 goto out;
1496 1495
1497 __perf_counter_task_sched_out(ctx); 1496 __perf_event_task_sched_out(ctx);
1498 1497
1499 spin_lock(&ctx->lock); 1498 spin_lock(&ctx->lock);
1500 1499
1501 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1500 list_for_each_entry(event, &ctx->group_list, group_entry) {
1502 if (!counter->attr.enable_on_exec) 1501 if (!event->attr.enable_on_exec)
1503 continue; 1502 continue;
1504 counter->attr.enable_on_exec = 0; 1503 event->attr.enable_on_exec = 0;
1505 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 1504 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1506 continue; 1505 continue;
1507 __perf_counter_mark_enabled(counter, ctx); 1506 __perf_event_mark_enabled(event, ctx);
1508 enabled = 1; 1507 enabled = 1;
1509 } 1508 }
1510 1509
1511 /* 1510 /*
1512 * Unclone this context if we enabled any counter. 1511 * Unclone this context if we enabled any event.
1513 */ 1512 */
1514 if (enabled) 1513 if (enabled)
1515 unclone_ctx(ctx); 1514 unclone_ctx(ctx);
1516 1515
1517 spin_unlock(&ctx->lock); 1516 spin_unlock(&ctx->lock);
1518 1517
1519 perf_counter_task_sched_in(task, smp_processor_id()); 1518 perf_event_task_sched_in(task, smp_processor_id());
1520 out: 1519 out:
1521 local_irq_restore(flags); 1520 local_irq_restore(flags);
1522} 1521}
1523 1522
1524/* 1523/*
1525 * Cross CPU call to read the hardware counter 1524 * Cross CPU call to read the hardware event
1526 */ 1525 */
1527static void __perf_counter_read(void *info) 1526static void __perf_event_read(void *info)
1528{ 1527{
1529 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1530 struct perf_counter *counter = info; 1529 struct perf_event *event = info;
1531 struct perf_counter_context *ctx = counter->ctx; 1530 struct perf_event_context *ctx = event->ctx;
1532 unsigned long flags; 1531 unsigned long flags;
1533 1532
1534 /* 1533 /*
1535 * If this is a task context, we need to check whether it is 1534 * If this is a task context, we need to check whether it is
1536 * the current task context of this cpu. If not it has been 1535 * the current task context of this cpu. If not it has been
1537 * scheduled out before the smp call arrived. In that case 1536 * scheduled out before the smp call arrived. In that case
1538 * counter->count would have been updated to a recent sample 1537 * event->count would have been updated to a recent sample
1539 * when the counter was scheduled out. 1538 * when the event was scheduled out.
1540 */ 1539 */
1541 if (ctx->task && cpuctx->task_ctx != ctx) 1540 if (ctx->task && cpuctx->task_ctx != ctx)
1542 return; 1541 return;
@@ -1544,56 +1543,56 @@ static void __perf_counter_read(void *info)
1544 local_irq_save(flags); 1543 local_irq_save(flags);
1545 if (ctx->is_active) 1544 if (ctx->is_active)
1546 update_context_time(ctx); 1545 update_context_time(ctx);
1547 counter->pmu->read(counter); 1546 event->pmu->read(event);
1548 update_counter_times(counter); 1547 update_event_times(event);
1549 local_irq_restore(flags); 1548 local_irq_restore(flags);
1550} 1549}
1551 1550
1552static u64 perf_counter_read(struct perf_counter *counter) 1551static u64 perf_event_read(struct perf_event *event)
1553{ 1552{
1554 /* 1553 /*
1555 * If counter is enabled and currently active on a CPU, update the 1554 * If event is enabled and currently active on a CPU, update the
1556 * value in the counter structure: 1555 * value in the event structure:
1557 */ 1556 */
1558 if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 1557 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1559 smp_call_function_single(counter->oncpu, 1558 smp_call_function_single(event->oncpu,
1560 __perf_counter_read, counter, 1); 1559 __perf_event_read, event, 1);
1561 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1560 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1562 update_counter_times(counter); 1561 update_event_times(event);
1563 } 1562 }
1564 1563
1565 return atomic64_read(&counter->count); 1564 return atomic64_read(&event->count);
1566} 1565}
1567 1566
1568/* 1567/*
1569 * Initialize the perf_counter context in a task_struct: 1568 * Initialize the perf_event context in a task_struct:
1570 */ 1569 */
1571static void 1570static void
1572__perf_counter_init_context(struct perf_counter_context *ctx, 1571__perf_event_init_context(struct perf_event_context *ctx,
1573 struct task_struct *task) 1572 struct task_struct *task)
1574{ 1573{
1575 memset(ctx, 0, sizeof(*ctx)); 1574 memset(ctx, 0, sizeof(*ctx));
1576 spin_lock_init(&ctx->lock); 1575 spin_lock_init(&ctx->lock);
1577 mutex_init(&ctx->mutex); 1576 mutex_init(&ctx->mutex);
1578 INIT_LIST_HEAD(&ctx->counter_list); 1577 INIT_LIST_HEAD(&ctx->group_list);
1579 INIT_LIST_HEAD(&ctx->event_list); 1578 INIT_LIST_HEAD(&ctx->event_list);
1580 atomic_set(&ctx->refcount, 1); 1579 atomic_set(&ctx->refcount, 1);
1581 ctx->task = task; 1580 ctx->task = task;
1582} 1581}
1583 1582
1584static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1583static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1585{ 1584{
1586 struct perf_counter_context *ctx; 1585 struct perf_event_context *ctx;
1587 struct perf_cpu_context *cpuctx; 1586 struct perf_cpu_context *cpuctx;
1588 struct task_struct *task; 1587 struct task_struct *task;
1589 unsigned long flags; 1588 unsigned long flags;
1590 int err; 1589 int err;
1591 1590
1592 /* 1591 /*
1593 * If cpu is not a wildcard then this is a percpu counter: 1592 * If cpu is not a wildcard then this is a percpu event:
1594 */ 1593 */
1595 if (cpu != -1) { 1594 if (cpu != -1) {
1596 /* Must be root to operate on a CPU counter: */ 1595 /* Must be root to operate on a CPU event: */
1597 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1596 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1598 return ERR_PTR(-EACCES); 1597 return ERR_PTR(-EACCES);
1599 1598
@@ -1601,7 +1600,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1601 return ERR_PTR(-EINVAL); 1600 return ERR_PTR(-EINVAL);
1602 1601
1603 /* 1602 /*
1604 * We could be clever and allow to attach a counter to an 1603 * We could be clever and allow to attach a event to an
1605 * offline CPU and activate it when the CPU comes up, but 1604 * offline CPU and activate it when the CPU comes up, but
1606 * that's for later. 1605 * that's for later.
1607 */ 1606 */
@@ -1628,7 +1627,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1628 return ERR_PTR(-ESRCH); 1627 return ERR_PTR(-ESRCH);
1629 1628
1630 /* 1629 /*
1631 * Can't attach counters to a dying task. 1630 * Can't attach events to a dying task.
1632 */ 1631 */
1633 err = -ESRCH; 1632 err = -ESRCH;
1634 if (task->flags & PF_EXITING) 1633 if (task->flags & PF_EXITING)
@@ -1647,13 +1646,13 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1647 } 1646 }
1648 1647
1649 if (!ctx) { 1648 if (!ctx) {
1650 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 1649 ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1651 err = -ENOMEM; 1650 err = -ENOMEM;
1652 if (!ctx) 1651 if (!ctx)
1653 goto errout; 1652 goto errout;
1654 __perf_counter_init_context(ctx, task); 1653 __perf_event_init_context(ctx, task);
1655 get_ctx(ctx); 1654 get_ctx(ctx);
1656 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { 1655 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1657 /* 1656 /*
1658 * We raced with some other task; use 1657 * We raced with some other task; use
1659 * the context they set. 1658 * the context they set.
@@ -1672,42 +1671,42 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1672 return ERR_PTR(err); 1671 return ERR_PTR(err);
1673} 1672}
1674 1673
1675static void free_counter_rcu(struct rcu_head *head) 1674static void free_event_rcu(struct rcu_head *head)
1676{ 1675{
1677 struct perf_counter *counter; 1676 struct perf_event *event;
1678 1677
1679 counter = container_of(head, struct perf_counter, rcu_head); 1678 event = container_of(head, struct perf_event, rcu_head);
1680 if (counter->ns) 1679 if (event->ns)
1681 put_pid_ns(counter->ns); 1680 put_pid_ns(event->ns);
1682 kfree(counter); 1681 kfree(event);
1683} 1682}
1684 1683
1685static void perf_pending_sync(struct perf_counter *counter); 1684static void perf_pending_sync(struct perf_event *event);
1686 1685
1687static void free_counter(struct perf_counter *counter) 1686static void free_event(struct perf_event *event)
1688{ 1687{
1689 perf_pending_sync(counter); 1688 perf_pending_sync(event);
1690 1689
1691 if (!counter->parent) { 1690 if (!event->parent) {
1692 atomic_dec(&nr_counters); 1691 atomic_dec(&nr_events);
1693 if (counter->attr.mmap) 1692 if (event->attr.mmap)
1694 atomic_dec(&nr_mmap_counters); 1693 atomic_dec(&nr_mmap_events);
1695 if (counter->attr.comm) 1694 if (event->attr.comm)
1696 atomic_dec(&nr_comm_counters); 1695 atomic_dec(&nr_comm_events);
1697 if (counter->attr.task) 1696 if (event->attr.task)
1698 atomic_dec(&nr_task_counters); 1697 atomic_dec(&nr_task_events);
1699 } 1698 }
1700 1699
1701 if (counter->output) { 1700 if (event->output) {
1702 fput(counter->output->filp); 1701 fput(event->output->filp);
1703 counter->output = NULL; 1702 event->output = NULL;
1704 } 1703 }
1705 1704
1706 if (counter->destroy) 1705 if (event->destroy)
1707 counter->destroy(counter); 1706 event->destroy(event);
1708 1707
1709 put_ctx(counter->ctx); 1708 put_ctx(event->ctx);
1710 call_rcu(&counter->rcu_head, free_counter_rcu); 1709 call_rcu(&event->rcu_head, free_event_rcu);
1711} 1710}
1712 1711
1713/* 1712/*
@@ -1715,43 +1714,43 @@ static void free_counter(struct perf_counter *counter)
1715 */ 1714 */
1716static int perf_release(struct inode *inode, struct file *file) 1715static int perf_release(struct inode *inode, struct file *file)
1717{ 1716{
1718 struct perf_counter *counter = file->private_data; 1717 struct perf_event *event = file->private_data;
1719 struct perf_counter_context *ctx = counter->ctx; 1718 struct perf_event_context *ctx = event->ctx;
1720 1719
1721 file->private_data = NULL; 1720 file->private_data = NULL;
1722 1721
1723 WARN_ON_ONCE(ctx->parent_ctx); 1722 WARN_ON_ONCE(ctx->parent_ctx);
1724 mutex_lock(&ctx->mutex); 1723 mutex_lock(&ctx->mutex);
1725 perf_counter_remove_from_context(counter); 1724 perf_event_remove_from_context(event);
1726 mutex_unlock(&ctx->mutex); 1725 mutex_unlock(&ctx->mutex);
1727 1726
1728 mutex_lock(&counter->owner->perf_counter_mutex); 1727 mutex_lock(&event->owner->perf_event_mutex);
1729 list_del_init(&counter->owner_entry); 1728 list_del_init(&event->owner_entry);
1730 mutex_unlock(&counter->owner->perf_counter_mutex); 1729 mutex_unlock(&event->owner->perf_event_mutex);
1731 put_task_struct(counter->owner); 1730 put_task_struct(event->owner);
1732 1731
1733 free_counter(counter); 1732 free_event(event);
1734 1733
1735 return 0; 1734 return 0;
1736} 1735}
1737 1736
1738static int perf_counter_read_size(struct perf_counter *counter) 1737static int perf_event_read_size(struct perf_event *event)
1739{ 1738{
1740 int entry = sizeof(u64); /* value */ 1739 int entry = sizeof(u64); /* value */
1741 int size = 0; 1740 int size = 0;
1742 int nr = 1; 1741 int nr = 1;
1743 1742
1744 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1743 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1745 size += sizeof(u64); 1744 size += sizeof(u64);
1746 1745
1747 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1746 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1748 size += sizeof(u64); 1747 size += sizeof(u64);
1749 1748
1750 if (counter->attr.read_format & PERF_FORMAT_ID) 1749 if (event->attr.read_format & PERF_FORMAT_ID)
1751 entry += sizeof(u64); 1750 entry += sizeof(u64);
1752 1751
1753 if (counter->attr.read_format & PERF_FORMAT_GROUP) { 1752 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1754 nr += counter->group_leader->nr_siblings; 1753 nr += event->group_leader->nr_siblings;
1755 size += sizeof(u64); 1754 size += sizeof(u64);
1756 } 1755 }
1757 1756
@@ -1760,27 +1759,27 @@ static int perf_counter_read_size(struct perf_counter *counter)
1760 return size; 1759 return size;
1761} 1760}
1762 1761
1763static u64 perf_counter_read_value(struct perf_counter *counter) 1762static u64 perf_event_read_value(struct perf_event *event)
1764{ 1763{
1765 struct perf_counter *child; 1764 struct perf_event *child;
1766 u64 total = 0; 1765 u64 total = 0;
1767 1766
1768 total += perf_counter_read(counter); 1767 total += perf_event_read(event);
1769 list_for_each_entry(child, &counter->child_list, child_list) 1768 list_for_each_entry(child, &event->child_list, child_list)
1770 total += perf_counter_read(child); 1769 total += perf_event_read(child);
1771 1770
1772 return total; 1771 return total;
1773} 1772}
1774 1773
1775static int perf_counter_read_entry(struct perf_counter *counter, 1774static int perf_event_read_entry(struct perf_event *event,
1776 u64 read_format, char __user *buf) 1775 u64 read_format, char __user *buf)
1777{ 1776{
1778 int n = 0, count = 0; 1777 int n = 0, count = 0;
1779 u64 values[2]; 1778 u64 values[2];
1780 1779
1781 values[n++] = perf_counter_read_value(counter); 1780 values[n++] = perf_event_read_value(event);
1782 if (read_format & PERF_FORMAT_ID) 1781 if (read_format & PERF_FORMAT_ID)
1783 values[n++] = primary_counter_id(counter); 1782 values[n++] = primary_event_id(event);
1784 1783
1785 count = n * sizeof(u64); 1784 count = n * sizeof(u64);
1786 1785
@@ -1790,10 +1789,10 @@ static int perf_counter_read_entry(struct perf_counter *counter,
1790 return count; 1789 return count;
1791} 1790}
1792 1791
1793static int perf_counter_read_group(struct perf_counter *counter, 1792static int perf_event_read_group(struct perf_event *event,
1794 u64 read_format, char __user *buf) 1793 u64 read_format, char __user *buf)
1795{ 1794{
1796 struct perf_counter *leader = counter->group_leader, *sub; 1795 struct perf_event *leader = event->group_leader, *sub;
1797 int n = 0, size = 0, err = -EFAULT; 1796 int n = 0, size = 0, err = -EFAULT;
1798 u64 values[3]; 1797 u64 values[3];
1799 1798
@@ -1812,14 +1811,14 @@ static int perf_counter_read_group(struct perf_counter *counter,
1812 if (copy_to_user(buf, values, size)) 1811 if (copy_to_user(buf, values, size))
1813 return -EFAULT; 1812 return -EFAULT;
1814 1813
1815 err = perf_counter_read_entry(leader, read_format, buf + size); 1814 err = perf_event_read_entry(leader, read_format, buf + size);
1816 if (err < 0) 1815 if (err < 0)
1817 return err; 1816 return err;
1818 1817
1819 size += err; 1818 size += err;
1820 1819
1821 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1822 err = perf_counter_read_entry(sub, read_format, 1821 err = perf_event_read_entry(sub, read_format,
1823 buf + size); 1822 buf + size);
1824 if (err < 0) 1823 if (err < 0)
1825 return err; 1824 return err;
@@ -1830,23 +1829,23 @@ static int perf_counter_read_group(struct perf_counter *counter,
1830 return size; 1829 return size;
1831} 1830}
1832 1831
1833static int perf_counter_read_one(struct perf_counter *counter, 1832static int perf_event_read_one(struct perf_event *event,
1834 u64 read_format, char __user *buf) 1833 u64 read_format, char __user *buf)
1835{ 1834{
1836 u64 values[4]; 1835 u64 values[4];
1837 int n = 0; 1836 int n = 0;
1838 1837
1839 values[n++] = perf_counter_read_value(counter); 1838 values[n++] = perf_event_read_value(event);
1840 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1839 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1841 values[n++] = counter->total_time_enabled + 1840 values[n++] = event->total_time_enabled +
1842 atomic64_read(&counter->child_total_time_enabled); 1841 atomic64_read(&event->child_total_time_enabled);
1843 } 1842 }
1844 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1845 values[n++] = counter->total_time_running + 1844 values[n++] = event->total_time_running +
1846 atomic64_read(&counter->child_total_time_running); 1845 atomic64_read(&event->child_total_time_running);
1847 } 1846 }
1848 if (read_format & PERF_FORMAT_ID) 1847 if (read_format & PERF_FORMAT_ID)
1849 values[n++] = primary_counter_id(counter); 1848 values[n++] = primary_event_id(event);
1850 1849
1851 if (copy_to_user(buf, values, n * sizeof(u64))) 1850 if (copy_to_user(buf, values, n * sizeof(u64)))
1852 return -EFAULT; 1851 return -EFAULT;
@@ -1855,32 +1854,32 @@ static int perf_counter_read_one(struct perf_counter *counter,
1855} 1854}
1856 1855
1857/* 1856/*
1858 * Read the performance counter - simple non blocking version for now 1857 * Read the performance event - simple non blocking version for now
1859 */ 1858 */
1860static ssize_t 1859static ssize_t
1861perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1860perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1862{ 1861{
1863 u64 read_format = counter->attr.read_format; 1862 u64 read_format = event->attr.read_format;
1864 int ret; 1863 int ret;
1865 1864
1866 /* 1865 /*
1867 * Return end-of-file for a read on a counter that is in 1866 * Return end-of-file for a read on a event that is in
1868 * error state (i.e. because it was pinned but it couldn't be 1867 * error state (i.e. because it was pinned but it couldn't be
1869 * scheduled on to the CPU at some point). 1868 * scheduled on to the CPU at some point).
1870 */ 1869 */
1871 if (counter->state == PERF_COUNTER_STATE_ERROR) 1870 if (event->state == PERF_EVENT_STATE_ERROR)
1872 return 0; 1871 return 0;
1873 1872
1874 if (count < perf_counter_read_size(counter)) 1873 if (count < perf_event_read_size(event))
1875 return -ENOSPC; 1874 return -ENOSPC;
1876 1875
1877 WARN_ON_ONCE(counter->ctx->parent_ctx); 1876 WARN_ON_ONCE(event->ctx->parent_ctx);
1878 mutex_lock(&counter->child_mutex); 1877 mutex_lock(&event->child_mutex);
1879 if (read_format & PERF_FORMAT_GROUP) 1878 if (read_format & PERF_FORMAT_GROUP)
1880 ret = perf_counter_read_group(counter, read_format, buf); 1879 ret = perf_event_read_group(event, read_format, buf);
1881 else 1880 else
1882 ret = perf_counter_read_one(counter, read_format, buf); 1881 ret = perf_event_read_one(event, read_format, buf);
1883 mutex_unlock(&counter->child_mutex); 1882 mutex_unlock(&event->child_mutex);
1884 1883
1885 return ret; 1884 return ret;
1886} 1885}
@@ -1888,79 +1887,79 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1888static ssize_t 1887static ssize_t
1889perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 1888perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1890{ 1889{
1891 struct perf_counter *counter = file->private_data; 1890 struct perf_event *event = file->private_data;
1892 1891
1893 return perf_read_hw(counter, buf, count); 1892 return perf_read_hw(event, buf, count);
1894} 1893}
1895 1894
1896static unsigned int perf_poll(struct file *file, poll_table *wait) 1895static unsigned int perf_poll(struct file *file, poll_table *wait)
1897{ 1896{
1898 struct perf_counter *counter = file->private_data; 1897 struct perf_event *event = file->private_data;
1899 struct perf_mmap_data *data; 1898 struct perf_mmap_data *data;
1900 unsigned int events = POLL_HUP; 1899 unsigned int events = POLL_HUP;
1901 1900
1902 rcu_read_lock(); 1901 rcu_read_lock();
1903 data = rcu_dereference(counter->data); 1902 data = rcu_dereference(event->data);
1904 if (data) 1903 if (data)
1905 events = atomic_xchg(&data->poll, 0); 1904 events = atomic_xchg(&data->poll, 0);
1906 rcu_read_unlock(); 1905 rcu_read_unlock();
1907 1906
1908 poll_wait(file, &counter->waitq, wait); 1907 poll_wait(file, &event->waitq, wait);
1909 1908
1910 return events; 1909 return events;
1911} 1910}
1912 1911
1913static void perf_counter_reset(struct perf_counter *counter) 1912static void perf_event_reset(struct perf_event *event)
1914{ 1913{
1915 (void)perf_counter_read(counter); 1914 (void)perf_event_read(event);
1916 atomic64_set(&counter->count, 0); 1915 atomic64_set(&event->count, 0);
1917 perf_counter_update_userpage(counter); 1916 perf_event_update_userpage(event);
1918} 1917}
1919 1918
1920/* 1919/*
1921 * Holding the top-level counter's child_mutex means that any 1920 * Holding the top-level event's child_mutex means that any
1922 * descendant process that has inherited this counter will block 1921 * descendant process that has inherited this event will block
1923 * in sync_child_counter if it goes to exit, thus satisfying the 1922 * in sync_child_event if it goes to exit, thus satisfying the
1924 * task existence requirements of perf_counter_enable/disable. 1923 * task existence requirements of perf_event_enable/disable.
1925 */ 1924 */
1926static void perf_counter_for_each_child(struct perf_counter *counter, 1925static void perf_event_for_each_child(struct perf_event *event,
1927 void (*func)(struct perf_counter *)) 1926 void (*func)(struct perf_event *))
1928{ 1927{
1929 struct perf_counter *child; 1928 struct perf_event *child;
1930 1929
1931 WARN_ON_ONCE(counter->ctx->parent_ctx); 1930 WARN_ON_ONCE(event->ctx->parent_ctx);
1932 mutex_lock(&counter->child_mutex); 1931 mutex_lock(&event->child_mutex);
1933 func(counter); 1932 func(event);
1934 list_for_each_entry(child, &counter->child_list, child_list) 1933 list_for_each_entry(child, &event->child_list, child_list)
1935 func(child); 1934 func(child);
1936 mutex_unlock(&counter->child_mutex); 1935 mutex_unlock(&event->child_mutex);
1937} 1936}
1938 1937
1939static void perf_counter_for_each(struct perf_counter *counter, 1938static void perf_event_for_each(struct perf_event *event,
1940 void (*func)(struct perf_counter *)) 1939 void (*func)(struct perf_event *))
1941{ 1940{
1942 struct perf_counter_context *ctx = counter->ctx; 1941 struct perf_event_context *ctx = event->ctx;
1943 struct perf_counter *sibling; 1942 struct perf_event *sibling;
1944 1943
1945 WARN_ON_ONCE(ctx->parent_ctx); 1944 WARN_ON_ONCE(ctx->parent_ctx);
1946 mutex_lock(&ctx->mutex); 1945 mutex_lock(&ctx->mutex);
1947 counter = counter->group_leader; 1946 event = event->group_leader;
1948 1947
1949 perf_counter_for_each_child(counter, func); 1948 perf_event_for_each_child(event, func);
1950 func(counter); 1949 func(event);
1951 list_for_each_entry(sibling, &counter->sibling_list, list_entry) 1950 list_for_each_entry(sibling, &event->sibling_list, group_entry)
1952 perf_counter_for_each_child(counter, func); 1951 perf_event_for_each_child(event, func);
1953 mutex_unlock(&ctx->mutex); 1952 mutex_unlock(&ctx->mutex);
1954} 1953}
1955 1954
1956static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1955static int perf_event_period(struct perf_event *event, u64 __user *arg)
1957{ 1956{
1958 struct perf_counter_context *ctx = counter->ctx; 1957 struct perf_event_context *ctx = event->ctx;
1959 unsigned long size; 1958 unsigned long size;
1960 int ret = 0; 1959 int ret = 0;
1961 u64 value; 1960 u64 value;
1962 1961
1963 if (!counter->attr.sample_period) 1962 if (!event->attr.sample_period)
1964 return -EINVAL; 1963 return -EINVAL;
1965 1964
1966 size = copy_from_user(&value, arg, sizeof(value)); 1965 size = copy_from_user(&value, arg, sizeof(value));
@@ -1971,16 +1970,16 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1971 return -EINVAL; 1970 return -EINVAL;
1972 1971
1973 spin_lock_irq(&ctx->lock); 1972 spin_lock_irq(&ctx->lock);
1974 if (counter->attr.freq) { 1973 if (event->attr.freq) {
1975 if (value > sysctl_perf_counter_sample_rate) { 1974 if (value > sysctl_perf_event_sample_rate) {
1976 ret = -EINVAL; 1975 ret = -EINVAL;
1977 goto unlock; 1976 goto unlock;
1978 } 1977 }
1979 1978
1980 counter->attr.sample_freq = value; 1979 event->attr.sample_freq = value;
1981 } else { 1980 } else {
1982 counter->attr.sample_period = value; 1981 event->attr.sample_period = value;
1983 counter->hw.sample_period = value; 1982 event->hw.sample_period = value;
1984 } 1983 }
1985unlock: 1984unlock:
1986 spin_unlock_irq(&ctx->lock); 1985 spin_unlock_irq(&ctx->lock);
@@ -1988,80 +1987,80 @@ unlock:
1988 return ret; 1987 return ret;
1989} 1988}
1990 1989
1991int perf_counter_set_output(struct perf_counter *counter, int output_fd); 1990int perf_event_set_output(struct perf_event *event, int output_fd);
1992 1991
1993static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1992static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1994{ 1993{
1995 struct perf_counter *counter = file->private_data; 1994 struct perf_event *event = file->private_data;
1996 void (*func)(struct perf_counter *); 1995 void (*func)(struct perf_event *);
1997 u32 flags = arg; 1996 u32 flags = arg;
1998 1997
1999 switch (cmd) { 1998 switch (cmd) {
2000 case PERF_COUNTER_IOC_ENABLE: 1999 case PERF_EVENT_IOC_ENABLE:
2001 func = perf_counter_enable; 2000 func = perf_event_enable;
2002 break; 2001 break;
2003 case PERF_COUNTER_IOC_DISABLE: 2002 case PERF_EVENT_IOC_DISABLE:
2004 func = perf_counter_disable; 2003 func = perf_event_disable;
2005 break; 2004 break;
2006 case PERF_COUNTER_IOC_RESET: 2005 case PERF_EVENT_IOC_RESET:
2007 func = perf_counter_reset; 2006 func = perf_event_reset;
2008 break; 2007 break;
2009 2008
2010 case PERF_COUNTER_IOC_REFRESH: 2009 case PERF_EVENT_IOC_REFRESH:
2011 return perf_counter_refresh(counter, arg); 2010 return perf_event_refresh(event, arg);
2012 2011
2013 case PERF_COUNTER_IOC_PERIOD: 2012 case PERF_EVENT_IOC_PERIOD:
2014 return perf_counter_period(counter, (u64 __user *)arg); 2013 return perf_event_period(event, (u64 __user *)arg);
2015 2014
2016 case PERF_COUNTER_IOC_SET_OUTPUT: 2015 case PERF_EVENT_IOC_SET_OUTPUT:
2017 return perf_counter_set_output(counter, arg); 2016 return perf_event_set_output(event, arg);
2018 2017
2019 default: 2018 default:
2020 return -ENOTTY; 2019 return -ENOTTY;
2021 } 2020 }
2022 2021
2023 if (flags & PERF_IOC_FLAG_GROUP) 2022 if (flags & PERF_IOC_FLAG_GROUP)
2024 perf_counter_for_each(counter, func); 2023 perf_event_for_each(event, func);
2025 else 2024 else
2026 perf_counter_for_each_child(counter, func); 2025 perf_event_for_each_child(event, func);
2027 2026
2028 return 0; 2027 return 0;
2029} 2028}
2030 2029
2031int perf_counter_task_enable(void) 2030int perf_event_task_enable(void)
2032{ 2031{
2033 struct perf_counter *counter; 2032 struct perf_event *event;
2034 2033
2035 mutex_lock(&current->perf_counter_mutex); 2034 mutex_lock(&current->perf_event_mutex);
2036 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2035 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2037 perf_counter_for_each_child(counter, perf_counter_enable); 2036 perf_event_for_each_child(event, perf_event_enable);
2038 mutex_unlock(&current->perf_counter_mutex); 2037 mutex_unlock(&current->perf_event_mutex);
2039 2038
2040 return 0; 2039 return 0;
2041} 2040}
2042 2041
2043int perf_counter_task_disable(void) 2042int perf_event_task_disable(void)
2044{ 2043{
2045 struct perf_counter *counter; 2044 struct perf_event *event;
2046 2045
2047 mutex_lock(&current->perf_counter_mutex); 2046 mutex_lock(&current->perf_event_mutex);
2048 list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2047 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2049 perf_counter_for_each_child(counter, perf_counter_disable); 2048 perf_event_for_each_child(event, perf_event_disable);
2050 mutex_unlock(&current->perf_counter_mutex); 2049 mutex_unlock(&current->perf_event_mutex);
2051 2050
2052 return 0; 2051 return 0;
2053} 2052}
2054 2053
2055#ifndef PERF_COUNTER_INDEX_OFFSET 2054#ifndef PERF_EVENT_INDEX_OFFSET
2056# define PERF_COUNTER_INDEX_OFFSET 0 2055# define PERF_EVENT_INDEX_OFFSET 0
2057#endif 2056#endif
2058 2057
2059static int perf_counter_index(struct perf_counter *counter) 2058static int perf_event_index(struct perf_event *event)
2060{ 2059{
2061 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2060 if (event->state != PERF_EVENT_STATE_ACTIVE)
2062 return 0; 2061 return 0;
2063 2062
2064 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; 2063 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2065} 2064}
2066 2065
2067/* 2066/*
@@ -2069,13 +2068,13 @@ static int perf_counter_index(struct perf_counter *counter)
2069 * the seqlock logic goes bad. We can not serialize this because the arch 2068 * the seqlock logic goes bad. We can not serialize this because the arch
2070 * code calls this from NMI context. 2069 * code calls this from NMI context.
2071 */ 2070 */
2072void perf_counter_update_userpage(struct perf_counter *counter) 2071void perf_event_update_userpage(struct perf_event *event)
2073{ 2072{
2074 struct perf_counter_mmap_page *userpg; 2073 struct perf_event_mmap_page *userpg;
2075 struct perf_mmap_data *data; 2074 struct perf_mmap_data *data;
2076 2075
2077 rcu_read_lock(); 2076 rcu_read_lock();
2078 data = rcu_dereference(counter->data); 2077 data = rcu_dereference(event->data);
2079 if (!data) 2078 if (!data)
2080 goto unlock; 2079 goto unlock;
2081 2080
@@ -2088,16 +2087,16 @@ void perf_counter_update_userpage(struct perf_counter *counter)
2088 preempt_disable(); 2087 preempt_disable();
2089 ++userpg->lock; 2088 ++userpg->lock;
2090 barrier(); 2089 barrier();
2091 userpg->index = perf_counter_index(counter); 2090 userpg->index = perf_event_index(event);
2092 userpg->offset = atomic64_read(&counter->count); 2091 userpg->offset = atomic64_read(&event->count);
2093 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 2092 if (event->state == PERF_EVENT_STATE_ACTIVE)
2094 userpg->offset -= atomic64_read(&counter->hw.prev_count); 2093 userpg->offset -= atomic64_read(&event->hw.prev_count);
2095 2094
2096 userpg->time_enabled = counter->total_time_enabled + 2095 userpg->time_enabled = event->total_time_enabled +
2097 atomic64_read(&counter->child_total_time_enabled); 2096 atomic64_read(&event->child_total_time_enabled);
2098 2097
2099 userpg->time_running = counter->total_time_running + 2098 userpg->time_running = event->total_time_running +
2100 atomic64_read(&counter->child_total_time_running); 2099 atomic64_read(&event->child_total_time_running);
2101 2100
2102 barrier(); 2101 barrier();
2103 ++userpg->lock; 2102 ++userpg->lock;
@@ -2108,7 +2107,7 @@ unlock:
2108 2107
2109static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2110{ 2109{
2111 struct perf_counter *counter = vma->vm_file->private_data; 2110 struct perf_event *event = vma->vm_file->private_data;
2112 struct perf_mmap_data *data; 2111 struct perf_mmap_data *data;
2113 int ret = VM_FAULT_SIGBUS; 2112 int ret = VM_FAULT_SIGBUS;
2114 2113
@@ -2119,7 +2118,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2119 } 2118 }
2120 2119
2121 rcu_read_lock(); 2120 rcu_read_lock();
2122 data = rcu_dereference(counter->data); 2121 data = rcu_dereference(event->data);
2123 if (!data) 2122 if (!data)
2124 goto unlock; 2123 goto unlock;
2125 2124
@@ -2148,13 +2147,13 @@ unlock:
2148 return ret; 2147 return ret;
2149} 2148}
2150 2149
2151static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) 2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2152{ 2151{
2153 struct perf_mmap_data *data; 2152 struct perf_mmap_data *data;
2154 unsigned long size; 2153 unsigned long size;
2155 int i; 2154 int i;
2156 2155
2157 WARN_ON(atomic_read(&counter->mmap_count)); 2156 WARN_ON(atomic_read(&event->mmap_count));
2158 2157
2159 size = sizeof(struct perf_mmap_data); 2158 size = sizeof(struct perf_mmap_data);
2160 size += nr_pages * sizeof(void *); 2159 size += nr_pages * sizeof(void *);
@@ -2176,14 +2175,14 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
2176 data->nr_pages = nr_pages; 2175 data->nr_pages = nr_pages;
2177 atomic_set(&data->lock, -1); 2176 atomic_set(&data->lock, -1);
2178 2177
2179 if (counter->attr.watermark) { 2178 if (event->attr.watermark) {
2180 data->watermark = min_t(long, PAGE_SIZE * nr_pages, 2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2181 counter->attr.wakeup_watermark); 2180 event->attr.wakeup_watermark);
2182 } 2181 }
2183 if (!data->watermark) 2182 if (!data->watermark)
2184 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); 2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2185 2184
2186 rcu_assign_pointer(counter->data, data); 2185 rcu_assign_pointer(event->data, data);
2187 2186
2188 return 0; 2187 return 0;
2189 2188
@@ -2222,35 +2221,35 @@ static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2222 kfree(data); 2221 kfree(data);
2223} 2222}
2224 2223
2225static void perf_mmap_data_free(struct perf_counter *counter) 2224static void perf_mmap_data_free(struct perf_event *event)
2226{ 2225{
2227 struct perf_mmap_data *data = counter->data; 2226 struct perf_mmap_data *data = event->data;
2228 2227
2229 WARN_ON(atomic_read(&counter->mmap_count)); 2228 WARN_ON(atomic_read(&event->mmap_count));
2230 2229
2231 rcu_assign_pointer(counter->data, NULL); 2230 rcu_assign_pointer(event->data, NULL);
2232 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2231 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2233} 2232}
2234 2233
2235static void perf_mmap_open(struct vm_area_struct *vma) 2234static void perf_mmap_open(struct vm_area_struct *vma)
2236{ 2235{
2237 struct perf_counter *counter = vma->vm_file->private_data; 2236 struct perf_event *event = vma->vm_file->private_data;
2238 2237
2239 atomic_inc(&counter->mmap_count); 2238 atomic_inc(&event->mmap_count);
2240} 2239}
2241 2240
2242static void perf_mmap_close(struct vm_area_struct *vma) 2241static void perf_mmap_close(struct vm_area_struct *vma)
2243{ 2242{
2244 struct perf_counter *counter = vma->vm_file->private_data; 2243 struct perf_event *event = vma->vm_file->private_data;
2245 2244
2246 WARN_ON_ONCE(counter->ctx->parent_ctx); 2245 WARN_ON_ONCE(event->ctx->parent_ctx);
2247 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { 2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2248 struct user_struct *user = current_user(); 2247 struct user_struct *user = current_user();
2249 2248
2250 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); 2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm);
2251 vma->vm_mm->locked_vm -= counter->data->nr_locked; 2250 vma->vm_mm->locked_vm -= event->data->nr_locked;
2252 perf_mmap_data_free(counter); 2251 perf_mmap_data_free(event);
2253 mutex_unlock(&counter->mmap_mutex); 2252 mutex_unlock(&event->mmap_mutex);
2254 } 2253 }
2255} 2254}
2256 2255
@@ -2263,7 +2262,7 @@ static struct vm_operations_struct perf_mmap_vmops = {
2263 2262
2264static int perf_mmap(struct file *file, struct vm_area_struct *vma) 2263static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2265{ 2264{
2266 struct perf_counter *counter = file->private_data; 2265 struct perf_event *event = file->private_data;
2267 unsigned long user_locked, user_lock_limit; 2266 unsigned long user_locked, user_lock_limit;
2268 struct user_struct *user = current_user(); 2267 struct user_struct *user = current_user();
2269 unsigned long locked, lock_limit; 2268 unsigned long locked, lock_limit;
@@ -2291,21 +2290,21 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2291 if (vma->vm_pgoff != 0) 2290 if (vma->vm_pgoff != 0)
2292 return -EINVAL; 2291 return -EINVAL;
2293 2292
2294 WARN_ON_ONCE(counter->ctx->parent_ctx); 2293 WARN_ON_ONCE(event->ctx->parent_ctx);
2295 mutex_lock(&counter->mmap_mutex); 2294 mutex_lock(&event->mmap_mutex);
2296 if (counter->output) { 2295 if (event->output) {
2297 ret = -EINVAL; 2296 ret = -EINVAL;
2298 goto unlock; 2297 goto unlock;
2299 } 2298 }
2300 2299
2301 if (atomic_inc_not_zero(&counter->mmap_count)) { 2300 if (atomic_inc_not_zero(&event->mmap_count)) {
2302 if (nr_pages != counter->data->nr_pages) 2301 if (nr_pages != event->data->nr_pages)
2303 ret = -EINVAL; 2302 ret = -EINVAL;
2304 goto unlock; 2303 goto unlock;
2305 } 2304 }
2306 2305
2307 user_extra = nr_pages + 1; 2306 user_extra = nr_pages + 1;
2308 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 2307 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2309 2308
2310 /* 2309 /*
2311 * Increase the limit linearly with more CPUs: 2310 * Increase the limit linearly with more CPUs:
@@ -2328,20 +2327,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2328 goto unlock; 2327 goto unlock;
2329 } 2328 }
2330 2329
2331 WARN_ON(counter->data); 2330 WARN_ON(event->data);
2332 ret = perf_mmap_data_alloc(counter, nr_pages); 2331 ret = perf_mmap_data_alloc(event, nr_pages);
2333 if (ret) 2332 if (ret)
2334 goto unlock; 2333 goto unlock;
2335 2334
2336 atomic_set(&counter->mmap_count, 1); 2335 atomic_set(&event->mmap_count, 1);
2337 atomic_long_add(user_extra, &user->locked_vm); 2336 atomic_long_add(user_extra, &user->locked_vm);
2338 vma->vm_mm->locked_vm += extra; 2337 vma->vm_mm->locked_vm += extra;
2339 counter->data->nr_locked = extra; 2338 event->data->nr_locked = extra;
2340 if (vma->vm_flags & VM_WRITE) 2339 if (vma->vm_flags & VM_WRITE)
2341 counter->data->writable = 1; 2340 event->data->writable = 1;
2342 2341
2343unlock: 2342unlock:
2344 mutex_unlock(&counter->mmap_mutex); 2343 mutex_unlock(&event->mmap_mutex);
2345 2344
2346 vma->vm_flags |= VM_RESERVED; 2345 vma->vm_flags |= VM_RESERVED;
2347 vma->vm_ops = &perf_mmap_vmops; 2346 vma->vm_ops = &perf_mmap_vmops;
@@ -2352,11 +2351,11 @@ unlock:
2352static int perf_fasync(int fd, struct file *filp, int on) 2351static int perf_fasync(int fd, struct file *filp, int on)
2353{ 2352{
2354 struct inode *inode = filp->f_path.dentry->d_inode; 2353 struct inode *inode = filp->f_path.dentry->d_inode;
2355 struct perf_counter *counter = filp->private_data; 2354 struct perf_event *event = filp->private_data;
2356 int retval; 2355 int retval;
2357 2356
2358 mutex_lock(&inode->i_mutex); 2357 mutex_lock(&inode->i_mutex);
2359 retval = fasync_helper(fd, filp, on, &counter->fasync); 2358 retval = fasync_helper(fd, filp, on, &event->fasync);
2360 mutex_unlock(&inode->i_mutex); 2359 mutex_unlock(&inode->i_mutex);
2361 2360
2362 if (retval < 0) 2361 if (retval < 0)
@@ -2376,19 +2375,19 @@ static const struct file_operations perf_fops = {
2376}; 2375};
2377 2376
2378/* 2377/*
2379 * Perf counter wakeup 2378 * Perf event wakeup
2380 * 2379 *
2381 * If there's data, ensure we set the poll() state and publish everything 2380 * If there's data, ensure we set the poll() state and publish everything
2382 * to user-space before waking everybody up. 2381 * to user-space before waking everybody up.
2383 */ 2382 */
2384 2383
2385void perf_counter_wakeup(struct perf_counter *counter) 2384void perf_event_wakeup(struct perf_event *event)
2386{ 2385{
2387 wake_up_all(&counter->waitq); 2386 wake_up_all(&event->waitq);
2388 2387
2389 if (counter->pending_kill) { 2388 if (event->pending_kill) {
2390 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); 2389 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2391 counter->pending_kill = 0; 2390 event->pending_kill = 0;
2392 } 2391 }
2393} 2392}
2394 2393
@@ -2401,19 +2400,19 @@ void perf_counter_wakeup(struct perf_counter *counter)
2401 * single linked list and use cmpxchg() to add entries lockless. 2400 * single linked list and use cmpxchg() to add entries lockless.
2402 */ 2401 */
2403 2402
2404static void perf_pending_counter(struct perf_pending_entry *entry) 2403static void perf_pending_event(struct perf_pending_entry *entry)
2405{ 2404{
2406 struct perf_counter *counter = container_of(entry, 2405 struct perf_event *event = container_of(entry,
2407 struct perf_counter, pending); 2406 struct perf_event, pending);
2408 2407
2409 if (counter->pending_disable) { 2408 if (event->pending_disable) {
2410 counter->pending_disable = 0; 2409 event->pending_disable = 0;
2411 __perf_counter_disable(counter); 2410 __perf_event_disable(event);
2412 } 2411 }
2413 2412
2414 if (counter->pending_wakeup) { 2413 if (event->pending_wakeup) {
2415 counter->pending_wakeup = 0; 2414 event->pending_wakeup = 0;
2416 perf_counter_wakeup(counter); 2415 perf_event_wakeup(event);
2417 } 2416 }
2418} 2417}
2419 2418
@@ -2439,7 +2438,7 @@ static void perf_pending_queue(struct perf_pending_entry *entry,
2439 entry->next = *head; 2438 entry->next = *head;
2440 } while (cmpxchg(head, entry->next, entry) != entry->next); 2439 } while (cmpxchg(head, entry->next, entry) != entry->next);
2441 2440
2442 set_perf_counter_pending(); 2441 set_perf_event_pending();
2443 2442
2444 put_cpu_var(perf_pending_head); 2443 put_cpu_var(perf_pending_head);
2445} 2444}
@@ -2472,7 +2471,7 @@ static int __perf_pending_run(void)
2472 return nr; 2471 return nr;
2473} 2472}
2474 2473
2475static inline int perf_not_pending(struct perf_counter *counter) 2474static inline int perf_not_pending(struct perf_event *event)
2476{ 2475{
2477 /* 2476 /*
2478 * If we flush on whatever cpu we run, there is a chance we don't 2477 * If we flush on whatever cpu we run, there is a chance we don't
@@ -2487,15 +2486,15 @@ static inline int perf_not_pending(struct perf_counter *counter)
2487 * so that we do not miss the wakeup. -- see perf_pending_handle() 2486 * so that we do not miss the wakeup. -- see perf_pending_handle()
2488 */ 2487 */
2489 smp_rmb(); 2488 smp_rmb();
2490 return counter->pending.next == NULL; 2489 return event->pending.next == NULL;
2491} 2490}
2492 2491
2493static void perf_pending_sync(struct perf_counter *counter) 2492static void perf_pending_sync(struct perf_event *event)
2494{ 2493{
2495 wait_event(counter->waitq, perf_not_pending(counter)); 2494 wait_event(event->waitq, perf_not_pending(event));
2496} 2495}
2497 2496
2498void perf_counter_do_pending(void) 2497void perf_event_do_pending(void)
2499{ 2498{
2500 __perf_pending_run(); 2499 __perf_pending_run();
2501} 2500}
@@ -2536,25 +2535,25 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2536 atomic_set(&handle->data->poll, POLL_IN); 2535 atomic_set(&handle->data->poll, POLL_IN);
2537 2536
2538 if (handle->nmi) { 2537 if (handle->nmi) {
2539 handle->counter->pending_wakeup = 1; 2538 handle->event->pending_wakeup = 1;
2540 perf_pending_queue(&handle->counter->pending, 2539 perf_pending_queue(&handle->event->pending,
2541 perf_pending_counter); 2540 perf_pending_event);
2542 } else 2541 } else
2543 perf_counter_wakeup(handle->counter); 2542 perf_event_wakeup(handle->event);
2544} 2543}
2545 2544
2546/* 2545/*
2547 * Curious locking construct. 2546 * Curious locking construct.
2548 * 2547 *
2549 * We need to ensure a later event doesn't publish a head when a former 2548 * We need to ensure a later event_id doesn't publish a head when a former
2550 * event isn't done writing. However since we need to deal with NMIs we 2549 * event_id isn't done writing. However since we need to deal with NMIs we
2551 * cannot fully serialize things. 2550 * cannot fully serialize things.
2552 * 2551 *
2553 * What we do is serialize between CPUs so we only have to deal with NMI 2552 * What we do is serialize between CPUs so we only have to deal with NMI
2554 * nesting on a single CPU. 2553 * nesting on a single CPU.
2555 * 2554 *
2556 * We only publish the head (and generate a wakeup) when the outer-most 2555 * We only publish the head (and generate a wakeup) when the outer-most
2557 * event completes. 2556 * event_id completes.
2558 */ 2557 */
2559static void perf_output_lock(struct perf_output_handle *handle) 2558static void perf_output_lock(struct perf_output_handle *handle)
2560{ 2559{
@@ -2658,10 +2657,10 @@ void perf_output_copy(struct perf_output_handle *handle,
2658} 2657}
2659 2658
2660int perf_output_begin(struct perf_output_handle *handle, 2659int perf_output_begin(struct perf_output_handle *handle,
2661 struct perf_counter *counter, unsigned int size, 2660 struct perf_event *event, unsigned int size,
2662 int nmi, int sample) 2661 int nmi, int sample)
2663{ 2662{
2664 struct perf_counter *output_counter; 2663 struct perf_event *output_event;
2665 struct perf_mmap_data *data; 2664 struct perf_mmap_data *data;
2666 unsigned long tail, offset, head; 2665 unsigned long tail, offset, head;
2667 int have_lost; 2666 int have_lost;
@@ -2673,21 +2672,21 @@ int perf_output_begin(struct perf_output_handle *handle,
2673 2672
2674 rcu_read_lock(); 2673 rcu_read_lock();
2675 /* 2674 /*
2676 * For inherited counters we send all the output towards the parent. 2675 * For inherited events we send all the output towards the parent.
2677 */ 2676 */
2678 if (counter->parent) 2677 if (event->parent)
2679 counter = counter->parent; 2678 event = event->parent;
2680 2679
2681 output_counter = rcu_dereference(counter->output); 2680 output_event = rcu_dereference(event->output);
2682 if (output_counter) 2681 if (output_event)
2683 counter = output_counter; 2682 event = output_event;
2684 2683
2685 data = rcu_dereference(counter->data); 2684 data = rcu_dereference(event->data);
2686 if (!data) 2685 if (!data)
2687 goto out; 2686 goto out;
2688 2687
2689 handle->data = data; 2688 handle->data = data;
2690 handle->counter = counter; 2689 handle->event = event;
2691 handle->nmi = nmi; 2690 handle->nmi = nmi;
2692 handle->sample = sample; 2691 handle->sample = sample;
2693 2692
@@ -2721,10 +2720,10 @@ int perf_output_begin(struct perf_output_handle *handle,
2721 atomic_set(&data->wakeup, 1); 2720 atomic_set(&data->wakeup, 1);
2722 2721
2723 if (have_lost) { 2722 if (have_lost) {
2724 lost_event.header.type = PERF_EVENT_LOST; 2723 lost_event.header.type = PERF_RECORD_LOST;
2725 lost_event.header.misc = 0; 2724 lost_event.header.misc = 0;
2726 lost_event.header.size = sizeof(lost_event); 2725 lost_event.header.size = sizeof(lost_event);
2727 lost_event.id = counter->id; 2726 lost_event.id = event->id;
2728 lost_event.lost = atomic_xchg(&data->lost, 0); 2727 lost_event.lost = atomic_xchg(&data->lost, 0);
2729 2728
2730 perf_output_put(handle, lost_event); 2729 perf_output_put(handle, lost_event);
@@ -2743,10 +2742,10 @@ out:
2743 2742
2744void perf_output_end(struct perf_output_handle *handle) 2743void perf_output_end(struct perf_output_handle *handle)
2745{ 2744{
2746 struct perf_counter *counter = handle->counter; 2745 struct perf_event *event = handle->event;
2747 struct perf_mmap_data *data = handle->data; 2746 struct perf_mmap_data *data = handle->data;
2748 2747
2749 int wakeup_events = counter->attr.wakeup_events; 2748 int wakeup_events = event->attr.wakeup_events;
2750 2749
2751 if (handle->sample && wakeup_events) { 2750 if (handle->sample && wakeup_events) {
2752 int events = atomic_inc_return(&data->events); 2751 int events = atomic_inc_return(&data->events);
@@ -2760,58 +2759,58 @@ void perf_output_end(struct perf_output_handle *handle)
2760 rcu_read_unlock(); 2759 rcu_read_unlock();
2761} 2760}
2762 2761
2763static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) 2762static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2764{ 2763{
2765 /* 2764 /*
2766 * only top level counters have the pid namespace they were created in 2765 * only top level events have the pid namespace they were created in
2767 */ 2766 */
2768 if (counter->parent) 2767 if (event->parent)
2769 counter = counter->parent; 2768 event = event->parent;
2770 2769
2771 return task_tgid_nr_ns(p, counter->ns); 2770 return task_tgid_nr_ns(p, event->ns);
2772} 2771}
2773 2772
2774static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) 2773static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2775{ 2774{
2776 /* 2775 /*
2777 * only top level counters have the pid namespace they were created in 2776 * only top level events have the pid namespace they were created in
2778 */ 2777 */
2779 if (counter->parent) 2778 if (event->parent)
2780 counter = counter->parent; 2779 event = event->parent;
2781 2780
2782 return task_pid_nr_ns(p, counter->ns); 2781 return task_pid_nr_ns(p, event->ns);
2783} 2782}
2784 2783
2785static void perf_output_read_one(struct perf_output_handle *handle, 2784static void perf_output_read_one(struct perf_output_handle *handle,
2786 struct perf_counter *counter) 2785 struct perf_event *event)
2787{ 2786{
2788 u64 read_format = counter->attr.read_format; 2787 u64 read_format = event->attr.read_format;
2789 u64 values[4]; 2788 u64 values[4];
2790 int n = 0; 2789 int n = 0;
2791 2790
2792 values[n++] = atomic64_read(&counter->count); 2791 values[n++] = atomic64_read(&event->count);
2793 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2792 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2794 values[n++] = counter->total_time_enabled + 2793 values[n++] = event->total_time_enabled +
2795 atomic64_read(&counter->child_total_time_enabled); 2794 atomic64_read(&event->child_total_time_enabled);
2796 } 2795 }
2797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2796 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2798 values[n++] = counter->total_time_running + 2797 values[n++] = event->total_time_running +
2799 atomic64_read(&counter->child_total_time_running); 2798 atomic64_read(&event->child_total_time_running);
2800 } 2799 }
2801 if (read_format & PERF_FORMAT_ID) 2800 if (read_format & PERF_FORMAT_ID)
2802 values[n++] = primary_counter_id(counter); 2801 values[n++] = primary_event_id(event);
2803 2802
2804 perf_output_copy(handle, values, n * sizeof(u64)); 2803 perf_output_copy(handle, values, n * sizeof(u64));
2805} 2804}
2806 2805
2807/* 2806/*
2808 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. 2807 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2809 */ 2808 */
2810static void perf_output_read_group(struct perf_output_handle *handle, 2809static void perf_output_read_group(struct perf_output_handle *handle,
2811 struct perf_counter *counter) 2810 struct perf_event *event)
2812{ 2811{
2813 struct perf_counter *leader = counter->group_leader, *sub; 2812 struct perf_event *leader = event->group_leader, *sub;
2814 u64 read_format = counter->attr.read_format; 2813 u64 read_format = event->attr.read_format;
2815 u64 values[5]; 2814 u64 values[5];
2816 int n = 0; 2815 int n = 0;
2817 2816
@@ -2823,42 +2822,42 @@ static void perf_output_read_group(struct perf_output_handle *handle,
2823 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2822 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2824 values[n++] = leader->total_time_running; 2823 values[n++] = leader->total_time_running;
2825 2824
2826 if (leader != counter) 2825 if (leader != event)
2827 leader->pmu->read(leader); 2826 leader->pmu->read(leader);
2828 2827
2829 values[n++] = atomic64_read(&leader->count); 2828 values[n++] = atomic64_read(&leader->count);
2830 if (read_format & PERF_FORMAT_ID) 2829 if (read_format & PERF_FORMAT_ID)
2831 values[n++] = primary_counter_id(leader); 2830 values[n++] = primary_event_id(leader);
2832 2831
2833 perf_output_copy(handle, values, n * sizeof(u64)); 2832 perf_output_copy(handle, values, n * sizeof(u64));
2834 2833
2835 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 2834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2836 n = 0; 2835 n = 0;
2837 2836
2838 if (sub != counter) 2837 if (sub != event)
2839 sub->pmu->read(sub); 2838 sub->pmu->read(sub);
2840 2839
2841 values[n++] = atomic64_read(&sub->count); 2840 values[n++] = atomic64_read(&sub->count);
2842 if (read_format & PERF_FORMAT_ID) 2841 if (read_format & PERF_FORMAT_ID)
2843 values[n++] = primary_counter_id(sub); 2842 values[n++] = primary_event_id(sub);
2844 2843
2845 perf_output_copy(handle, values, n * sizeof(u64)); 2844 perf_output_copy(handle, values, n * sizeof(u64));
2846 } 2845 }
2847} 2846}
2848 2847
2849static void perf_output_read(struct perf_output_handle *handle, 2848static void perf_output_read(struct perf_output_handle *handle,
2850 struct perf_counter *counter) 2849 struct perf_event *event)
2851{ 2850{
2852 if (counter->attr.read_format & PERF_FORMAT_GROUP) 2851 if (event->attr.read_format & PERF_FORMAT_GROUP)
2853 perf_output_read_group(handle, counter); 2852 perf_output_read_group(handle, event);
2854 else 2853 else
2855 perf_output_read_one(handle, counter); 2854 perf_output_read_one(handle, event);
2856} 2855}
2857 2856
2858void perf_output_sample(struct perf_output_handle *handle, 2857void perf_output_sample(struct perf_output_handle *handle,
2859 struct perf_event_header *header, 2858 struct perf_event_header *header,
2860 struct perf_sample_data *data, 2859 struct perf_sample_data *data,
2861 struct perf_counter *counter) 2860 struct perf_event *event)
2862{ 2861{
2863 u64 sample_type = data->type; 2862 u64 sample_type = data->type;
2864 2863
@@ -2889,7 +2888,7 @@ void perf_output_sample(struct perf_output_handle *handle,
2889 perf_output_put(handle, data->period); 2888 perf_output_put(handle, data->period);
2890 2889
2891 if (sample_type & PERF_SAMPLE_READ) 2890 if (sample_type & PERF_SAMPLE_READ)
2892 perf_output_read(handle, counter); 2891 perf_output_read(handle, event);
2893 2892
2894 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2893 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2895 if (data->callchain) { 2894 if (data->callchain) {
@@ -2927,14 +2926,14 @@ void perf_output_sample(struct perf_output_handle *handle,
2927 2926
2928void perf_prepare_sample(struct perf_event_header *header, 2927void perf_prepare_sample(struct perf_event_header *header,
2929 struct perf_sample_data *data, 2928 struct perf_sample_data *data,
2930 struct perf_counter *counter, 2929 struct perf_event *event,
2931 struct pt_regs *regs) 2930 struct pt_regs *regs)
2932{ 2931{
2933 u64 sample_type = counter->attr.sample_type; 2932 u64 sample_type = event->attr.sample_type;
2934 2933
2935 data->type = sample_type; 2934 data->type = sample_type;
2936 2935
2937 header->type = PERF_EVENT_SAMPLE; 2936 header->type = PERF_RECORD_SAMPLE;
2938 header->size = sizeof(*header); 2937 header->size = sizeof(*header);
2939 2938
2940 header->misc = 0; 2939 header->misc = 0;
@@ -2948,8 +2947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
2948 2947
2949 if (sample_type & PERF_SAMPLE_TID) { 2948 if (sample_type & PERF_SAMPLE_TID) {
2950 /* namespace issues */ 2949 /* namespace issues */
2951 data->tid_entry.pid = perf_counter_pid(counter, current); 2950 data->tid_entry.pid = perf_event_pid(event, current);
2952 data->tid_entry.tid = perf_counter_tid(counter, current); 2951 data->tid_entry.tid = perf_event_tid(event, current);
2953 2952
2954 header->size += sizeof(data->tid_entry); 2953 header->size += sizeof(data->tid_entry);
2955 } 2954 }
@@ -2964,13 +2963,13 @@ void perf_prepare_sample(struct perf_event_header *header,
2964 header->size += sizeof(data->addr); 2963 header->size += sizeof(data->addr);
2965 2964
2966 if (sample_type & PERF_SAMPLE_ID) { 2965 if (sample_type & PERF_SAMPLE_ID) {
2967 data->id = primary_counter_id(counter); 2966 data->id = primary_event_id(event);
2968 2967
2969 header->size += sizeof(data->id); 2968 header->size += sizeof(data->id);
2970 } 2969 }
2971 2970
2972 if (sample_type & PERF_SAMPLE_STREAM_ID) { 2971 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2973 data->stream_id = counter->id; 2972 data->stream_id = event->id;
2974 2973
2975 header->size += sizeof(data->stream_id); 2974 header->size += sizeof(data->stream_id);
2976 } 2975 }
@@ -2986,7 +2985,7 @@ void perf_prepare_sample(struct perf_event_header *header,
2986 header->size += sizeof(data->period); 2985 header->size += sizeof(data->period);
2987 2986
2988 if (sample_type & PERF_SAMPLE_READ) 2987 if (sample_type & PERF_SAMPLE_READ)
2989 header->size += perf_counter_read_size(counter); 2988 header->size += perf_event_read_size(event);
2990 2989
2991 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2990 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2992 int size = 1; 2991 int size = 1;
@@ -3012,25 +3011,25 @@ void perf_prepare_sample(struct perf_event_header *header,
3012 } 3011 }
3013} 3012}
3014 3013
3015static void perf_counter_output(struct perf_counter *counter, int nmi, 3014static void perf_event_output(struct perf_event *event, int nmi,
3016 struct perf_sample_data *data, 3015 struct perf_sample_data *data,
3017 struct pt_regs *regs) 3016 struct pt_regs *regs)
3018{ 3017{
3019 struct perf_output_handle handle; 3018 struct perf_output_handle handle;
3020 struct perf_event_header header; 3019 struct perf_event_header header;
3021 3020
3022 perf_prepare_sample(&header, data, counter, regs); 3021 perf_prepare_sample(&header, data, event, regs);
3023 3022
3024 if (perf_output_begin(&handle, counter, header.size, nmi, 1)) 3023 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3025 return; 3024 return;
3026 3025
3027 perf_output_sample(&handle, &header, data, counter); 3026 perf_output_sample(&handle, &header, data, event);
3028 3027
3029 perf_output_end(&handle); 3028 perf_output_end(&handle);
3030} 3029}
3031 3030
3032/* 3031/*
3033 * read event 3032 * read event_id
3034 */ 3033 */
3035 3034
3036struct perf_read_event { 3035struct perf_read_event {
@@ -3041,27 +3040,27 @@ struct perf_read_event {
3041}; 3040};
3042 3041
3043static void 3042static void
3044perf_counter_read_event(struct perf_counter *counter, 3043perf_event_read_event(struct perf_event *event,
3045 struct task_struct *task) 3044 struct task_struct *task)
3046{ 3045{
3047 struct perf_output_handle handle; 3046 struct perf_output_handle handle;
3048 struct perf_read_event event = { 3047 struct perf_read_event read_event = {
3049 .header = { 3048 .header = {
3050 .type = PERF_EVENT_READ, 3049 .type = PERF_RECORD_READ,
3051 .misc = 0, 3050 .misc = 0,
3052 .size = sizeof(event) + perf_counter_read_size(counter), 3051 .size = sizeof(read_event) + perf_event_read_size(event),
3053 }, 3052 },
3054 .pid = perf_counter_pid(counter, task), 3053 .pid = perf_event_pid(event, task),
3055 .tid = perf_counter_tid(counter, task), 3054 .tid = perf_event_tid(event, task),
3056 }; 3055 };
3057 int ret; 3056 int ret;
3058 3057
3059 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3058 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3060 if (ret) 3059 if (ret)
3061 return; 3060 return;
3062 3061
3063 perf_output_put(&handle, event); 3062 perf_output_put(&handle, read_event);
3064 perf_output_read(&handle, counter); 3063 perf_output_read(&handle, event);
3065 3064
3066 perf_output_end(&handle); 3065 perf_output_end(&handle);
3067} 3066}
@@ -3074,7 +3073,7 @@ perf_counter_read_event(struct perf_counter *counter,
3074 3073
3075struct perf_task_event { 3074struct perf_task_event {
3076 struct task_struct *task; 3075 struct task_struct *task;
3077 struct perf_counter_context *task_ctx; 3076 struct perf_event_context *task_ctx;
3078 3077
3079 struct { 3078 struct {
3080 struct perf_event_header header; 3079 struct perf_event_header header;
@@ -3084,10 +3083,10 @@ struct perf_task_event {
3084 u32 tid; 3083 u32 tid;
3085 u32 ptid; 3084 u32 ptid;
3086 u64 time; 3085 u64 time;
3087 } event; 3086 } event_id;
3088}; 3087};
3089 3088
3090static void perf_counter_task_output(struct perf_counter *counter, 3089static void perf_event_task_output(struct perf_event *event,
3091 struct perf_task_event *task_event) 3090 struct perf_task_event *task_event)
3092{ 3091{
3093 struct perf_output_handle handle; 3092 struct perf_output_handle handle;
@@ -3095,85 +3094,85 @@ static void perf_counter_task_output(struct perf_counter *counter,
3095 struct task_struct *task = task_event->task; 3094 struct task_struct *task = task_event->task;
3096 int ret; 3095 int ret;
3097 3096
3098 size = task_event->event.header.size; 3097 size = task_event->event_id.header.size;
3099 ret = perf_output_begin(&handle, counter, size, 0, 0); 3098 ret = perf_output_begin(&handle, event, size, 0, 0);
3100 3099
3101 if (ret) 3100 if (ret)
3102 return; 3101 return;
3103 3102
3104 task_event->event.pid = perf_counter_pid(counter, task); 3103 task_event->event_id.pid = perf_event_pid(event, task);
3105 task_event->event.ppid = perf_counter_pid(counter, current); 3104 task_event->event_id.ppid = perf_event_pid(event, current);
3106 3105
3107 task_event->event.tid = perf_counter_tid(counter, task); 3106 task_event->event_id.tid = perf_event_tid(event, task);
3108 task_event->event.ptid = perf_counter_tid(counter, current); 3107 task_event->event_id.ptid = perf_event_tid(event, current);
3109 3108
3110 task_event->event.time = perf_clock(); 3109 task_event->event_id.time = perf_clock();
3111 3110
3112 perf_output_put(&handle, task_event->event); 3111 perf_output_put(&handle, task_event->event_id);
3113 3112
3114 perf_output_end(&handle); 3113 perf_output_end(&handle);
3115} 3114}
3116 3115
3117static int perf_counter_task_match(struct perf_counter *counter) 3116static int perf_event_task_match(struct perf_event *event)
3118{ 3117{
3119 if (counter->attr.comm || counter->attr.mmap || counter->attr.task) 3118 if (event->attr.comm || event->attr.mmap || event->attr.task)
3120 return 1; 3119 return 1;
3121 3120
3122 return 0; 3121 return 0;
3123} 3122}
3124 3123
3125static void perf_counter_task_ctx(struct perf_counter_context *ctx, 3124static void perf_event_task_ctx(struct perf_event_context *ctx,
3126 struct perf_task_event *task_event) 3125 struct perf_task_event *task_event)
3127{ 3126{
3128 struct perf_counter *counter; 3127 struct perf_event *event;
3129 3128
3130 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3131 return; 3130 return;
3132 3131
3133 rcu_read_lock(); 3132 rcu_read_lock();
3134 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3133 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3135 if (perf_counter_task_match(counter)) 3134 if (perf_event_task_match(event))
3136 perf_counter_task_output(counter, task_event); 3135 perf_event_task_output(event, task_event);
3137 } 3136 }
3138 rcu_read_unlock(); 3137 rcu_read_unlock();
3139} 3138}
3140 3139
3141static void perf_counter_task_event(struct perf_task_event *task_event) 3140static void perf_event_task_event(struct perf_task_event *task_event)
3142{ 3141{
3143 struct perf_cpu_context *cpuctx; 3142 struct perf_cpu_context *cpuctx;
3144 struct perf_counter_context *ctx = task_event->task_ctx; 3143 struct perf_event_context *ctx = task_event->task_ctx;
3145 3144
3146 cpuctx = &get_cpu_var(perf_cpu_context); 3145 cpuctx = &get_cpu_var(perf_cpu_context);
3147 perf_counter_task_ctx(&cpuctx->ctx, task_event); 3146 perf_event_task_ctx(&cpuctx->ctx, task_event);
3148 put_cpu_var(perf_cpu_context); 3147 put_cpu_var(perf_cpu_context);
3149 3148
3150 rcu_read_lock(); 3149 rcu_read_lock();
3151 if (!ctx) 3150 if (!ctx)
3152 ctx = rcu_dereference(task_event->task->perf_counter_ctxp); 3151 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3153 if (ctx) 3152 if (ctx)
3154 perf_counter_task_ctx(ctx, task_event); 3153 perf_event_task_ctx(ctx, task_event);
3155 rcu_read_unlock(); 3154 rcu_read_unlock();
3156} 3155}
3157 3156
3158static void perf_counter_task(struct task_struct *task, 3157static void perf_event_task(struct task_struct *task,
3159 struct perf_counter_context *task_ctx, 3158 struct perf_event_context *task_ctx,
3160 int new) 3159 int new)
3161{ 3160{
3162 struct perf_task_event task_event; 3161 struct perf_task_event task_event;
3163 3162
3164 if (!atomic_read(&nr_comm_counters) && 3163 if (!atomic_read(&nr_comm_events) &&
3165 !atomic_read(&nr_mmap_counters) && 3164 !atomic_read(&nr_mmap_events) &&
3166 !atomic_read(&nr_task_counters)) 3165 !atomic_read(&nr_task_events))
3167 return; 3166 return;
3168 3167
3169 task_event = (struct perf_task_event){ 3168 task_event = (struct perf_task_event){
3170 .task = task, 3169 .task = task,
3171 .task_ctx = task_ctx, 3170 .task_ctx = task_ctx,
3172 .event = { 3171 .event_id = {
3173 .header = { 3172 .header = {
3174 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, 3173 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3175 .misc = 0, 3174 .misc = 0,
3176 .size = sizeof(task_event.event), 3175 .size = sizeof(task_event.event_id),
3177 }, 3176 },
3178 /* .pid */ 3177 /* .pid */
3179 /* .ppid */ 3178 /* .ppid */
@@ -3182,12 +3181,12 @@ static void perf_counter_task(struct task_struct *task,
3182 }, 3181 },
3183 }; 3182 };
3184 3183
3185 perf_counter_task_event(&task_event); 3184 perf_event_task_event(&task_event);
3186} 3185}
3187 3186
3188void perf_counter_fork(struct task_struct *task) 3187void perf_event_fork(struct task_struct *task)
3189{ 3188{
3190 perf_counter_task(task, NULL, 1); 3189 perf_event_task(task, NULL, 1);
3191} 3190}
3192 3191
3193/* 3192/*
@@ -3204,56 +3203,56 @@ struct perf_comm_event {
3204 3203
3205 u32 pid; 3204 u32 pid;
3206 u32 tid; 3205 u32 tid;
3207 } event; 3206 } event_id;
3208}; 3207};
3209 3208
3210static void perf_counter_comm_output(struct perf_counter *counter, 3209static void perf_event_comm_output(struct perf_event *event,
3211 struct perf_comm_event *comm_event) 3210 struct perf_comm_event *comm_event)
3212{ 3211{
3213 struct perf_output_handle handle; 3212 struct perf_output_handle handle;
3214 int size = comm_event->event.header.size; 3213 int size = comm_event->event_id.header.size;
3215 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3214 int ret = perf_output_begin(&handle, event, size, 0, 0);
3216 3215
3217 if (ret) 3216 if (ret)
3218 return; 3217 return;
3219 3218
3220 comm_event->event.pid = perf_counter_pid(counter, comm_event->task); 3219 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3221 comm_event->event.tid = perf_counter_tid(counter, comm_event->task); 3220 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3222 3221
3223 perf_output_put(&handle, comm_event->event); 3222 perf_output_put(&handle, comm_event->event_id);
3224 perf_output_copy(&handle, comm_event->comm, 3223 perf_output_copy(&handle, comm_event->comm,
3225 comm_event->comm_size); 3224 comm_event->comm_size);
3226 perf_output_end(&handle); 3225 perf_output_end(&handle);
3227} 3226}
3228 3227
3229static int perf_counter_comm_match(struct perf_counter *counter) 3228static int perf_event_comm_match(struct perf_event *event)
3230{ 3229{
3231 if (counter->attr.comm) 3230 if (event->attr.comm)
3232 return 1; 3231 return 1;
3233 3232
3234 return 0; 3233 return 0;
3235} 3234}
3236 3235
3237static void perf_counter_comm_ctx(struct perf_counter_context *ctx, 3236static void perf_event_comm_ctx(struct perf_event_context *ctx,
3238 struct perf_comm_event *comm_event) 3237 struct perf_comm_event *comm_event)
3239{ 3238{
3240 struct perf_counter *counter; 3239 struct perf_event *event;
3241 3240
3242 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3241 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3243 return; 3242 return;
3244 3243
3245 rcu_read_lock(); 3244 rcu_read_lock();
3246 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3247 if (perf_counter_comm_match(counter)) 3246 if (perf_event_comm_match(event))
3248 perf_counter_comm_output(counter, comm_event); 3247 perf_event_comm_output(event, comm_event);
3249 } 3248 }
3250 rcu_read_unlock(); 3249 rcu_read_unlock();
3251} 3250}
3252 3251
3253static void perf_counter_comm_event(struct perf_comm_event *comm_event) 3252static void perf_event_comm_event(struct perf_comm_event *comm_event)
3254{ 3253{
3255 struct perf_cpu_context *cpuctx; 3254 struct perf_cpu_context *cpuctx;
3256 struct perf_counter_context *ctx; 3255 struct perf_event_context *ctx;
3257 unsigned int size; 3256 unsigned int size;
3258 char comm[TASK_COMM_LEN]; 3257 char comm[TASK_COMM_LEN];
3259 3258
@@ -3264,10 +3263,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3264 comm_event->comm = comm; 3263 comm_event->comm = comm;
3265 comm_event->comm_size = size; 3264 comm_event->comm_size = size;
3266 3265
3267 comm_event->event.header.size = sizeof(comm_event->event) + size; 3266 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3268 3267
3269 cpuctx = &get_cpu_var(perf_cpu_context); 3268 cpuctx = &get_cpu_var(perf_cpu_context);
3270 perf_counter_comm_ctx(&cpuctx->ctx, comm_event); 3269 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3271 put_cpu_var(perf_cpu_context); 3270 put_cpu_var(perf_cpu_context);
3272 3271
3273 rcu_read_lock(); 3272 rcu_read_lock();
@@ -3275,29 +3274,29 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
3275 * doesn't really matter which of the child contexts the 3274 * doesn't really matter which of the child contexts the
3276 * events ends up in. 3275 * events ends up in.
3277 */ 3276 */
3278 ctx = rcu_dereference(current->perf_counter_ctxp); 3277 ctx = rcu_dereference(current->perf_event_ctxp);
3279 if (ctx) 3278 if (ctx)
3280 perf_counter_comm_ctx(ctx, comm_event); 3279 perf_event_comm_ctx(ctx, comm_event);
3281 rcu_read_unlock(); 3280 rcu_read_unlock();
3282} 3281}
3283 3282
3284void perf_counter_comm(struct task_struct *task) 3283void perf_event_comm(struct task_struct *task)
3285{ 3284{
3286 struct perf_comm_event comm_event; 3285 struct perf_comm_event comm_event;
3287 3286
3288 if (task->perf_counter_ctxp) 3287 if (task->perf_event_ctxp)
3289 perf_counter_enable_on_exec(task); 3288 perf_event_enable_on_exec(task);
3290 3289
3291 if (!atomic_read(&nr_comm_counters)) 3290 if (!atomic_read(&nr_comm_events))
3292 return; 3291 return;
3293 3292
3294 comm_event = (struct perf_comm_event){ 3293 comm_event = (struct perf_comm_event){
3295 .task = task, 3294 .task = task,
3296 /* .comm */ 3295 /* .comm */
3297 /* .comm_size */ 3296 /* .comm_size */
3298 .event = { 3297 .event_id = {
3299 .header = { 3298 .header = {
3300 .type = PERF_EVENT_COMM, 3299 .type = PERF_RECORD_COMM,
3301 .misc = 0, 3300 .misc = 0,
3302 /* .size */ 3301 /* .size */
3303 }, 3302 },
@@ -3306,7 +3305,7 @@ void perf_counter_comm(struct task_struct *task)
3306 }, 3305 },
3307 }; 3306 };
3308 3307
3309 perf_counter_comm_event(&comm_event); 3308 perf_event_comm_event(&comm_event);
3310} 3309}
3311 3310
3312/* 3311/*
@@ -3327,57 +3326,57 @@ struct perf_mmap_event {
3327 u64 start; 3326 u64 start;
3328 u64 len; 3327 u64 len;
3329 u64 pgoff; 3328 u64 pgoff;
3330 } event; 3329 } event_id;
3331}; 3330};
3332 3331
3333static void perf_counter_mmap_output(struct perf_counter *counter, 3332static void perf_event_mmap_output(struct perf_event *event,
3334 struct perf_mmap_event *mmap_event) 3333 struct perf_mmap_event *mmap_event)
3335{ 3334{
3336 struct perf_output_handle handle; 3335 struct perf_output_handle handle;
3337 int size = mmap_event->event.header.size; 3336 int size = mmap_event->event_id.header.size;
3338 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3337 int ret = perf_output_begin(&handle, event, size, 0, 0);
3339 3338
3340 if (ret) 3339 if (ret)
3341 return; 3340 return;
3342 3341
3343 mmap_event->event.pid = perf_counter_pid(counter, current); 3342 mmap_event->event_id.pid = perf_event_pid(event, current);
3344 mmap_event->event.tid = perf_counter_tid(counter, current); 3343 mmap_event->event_id.tid = perf_event_tid(event, current);
3345 3344
3346 perf_output_put(&handle, mmap_event->event); 3345 perf_output_put(&handle, mmap_event->event_id);
3347 perf_output_copy(&handle, mmap_event->file_name, 3346 perf_output_copy(&handle, mmap_event->file_name,
3348 mmap_event->file_size); 3347 mmap_event->file_size);
3349 perf_output_end(&handle); 3348 perf_output_end(&handle);
3350} 3349}
3351 3350
3352static int perf_counter_mmap_match(struct perf_counter *counter, 3351static int perf_event_mmap_match(struct perf_event *event,
3353 struct perf_mmap_event *mmap_event) 3352 struct perf_mmap_event *mmap_event)
3354{ 3353{
3355 if (counter->attr.mmap) 3354 if (event->attr.mmap)
3356 return 1; 3355 return 1;
3357 3356
3358 return 0; 3357 return 0;
3359} 3358}
3360 3359
3361static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, 3360static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3362 struct perf_mmap_event *mmap_event) 3361 struct perf_mmap_event *mmap_event)
3363{ 3362{
3364 struct perf_counter *counter; 3363 struct perf_event *event;
3365 3364
3366 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3365 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3367 return; 3366 return;
3368 3367
3369 rcu_read_lock(); 3368 rcu_read_lock();
3370 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3369 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3371 if (perf_counter_mmap_match(counter, mmap_event)) 3370 if (perf_event_mmap_match(event, mmap_event))
3372 perf_counter_mmap_output(counter, mmap_event); 3371 perf_event_mmap_output(event, mmap_event);
3373 } 3372 }
3374 rcu_read_unlock(); 3373 rcu_read_unlock();
3375} 3374}
3376 3375
3377static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) 3376static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3378{ 3377{
3379 struct perf_cpu_context *cpuctx; 3378 struct perf_cpu_context *cpuctx;
3380 struct perf_counter_context *ctx; 3379 struct perf_event_context *ctx;
3381 struct vm_area_struct *vma = mmap_event->vma; 3380 struct vm_area_struct *vma = mmap_event->vma;
3382 struct file *file = vma->vm_file; 3381 struct file *file = vma->vm_file;
3383 unsigned int size; 3382 unsigned int size;
@@ -3425,10 +3424,10 @@ got_name:
3425 mmap_event->file_name = name; 3424 mmap_event->file_name = name;
3426 mmap_event->file_size = size; 3425 mmap_event->file_size = size;
3427 3426
3428 mmap_event->event.header.size = sizeof(mmap_event->event) + size; 3427 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3429 3428
3430 cpuctx = &get_cpu_var(perf_cpu_context); 3429 cpuctx = &get_cpu_var(perf_cpu_context);
3431 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); 3430 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3432 put_cpu_var(perf_cpu_context); 3431 put_cpu_var(perf_cpu_context);
3433 3432
3434 rcu_read_lock(); 3433 rcu_read_lock();
@@ -3436,28 +3435,28 @@ got_name:
3436 * doesn't really matter which of the child contexts the 3435 * doesn't really matter which of the child contexts the
3437 * events ends up in. 3436 * events ends up in.
3438 */ 3437 */
3439 ctx = rcu_dereference(current->perf_counter_ctxp); 3438 ctx = rcu_dereference(current->perf_event_ctxp);
3440 if (ctx) 3439 if (ctx)
3441 perf_counter_mmap_ctx(ctx, mmap_event); 3440 perf_event_mmap_ctx(ctx, mmap_event);
3442 rcu_read_unlock(); 3441 rcu_read_unlock();
3443 3442
3444 kfree(buf); 3443 kfree(buf);
3445} 3444}
3446 3445
3447void __perf_counter_mmap(struct vm_area_struct *vma) 3446void __perf_event_mmap(struct vm_area_struct *vma)
3448{ 3447{
3449 struct perf_mmap_event mmap_event; 3448 struct perf_mmap_event mmap_event;
3450 3449
3451 if (!atomic_read(&nr_mmap_counters)) 3450 if (!atomic_read(&nr_mmap_events))
3452 return; 3451 return;
3453 3452
3454 mmap_event = (struct perf_mmap_event){ 3453 mmap_event = (struct perf_mmap_event){
3455 .vma = vma, 3454 .vma = vma,
3456 /* .file_name */ 3455 /* .file_name */
3457 /* .file_size */ 3456 /* .file_size */
3458 .event = { 3457 .event_id = {
3459 .header = { 3458 .header = {
3460 .type = PERF_EVENT_MMAP, 3459 .type = PERF_RECORD_MMAP,
3461 .misc = 0, 3460 .misc = 0,
3462 /* .size */ 3461 /* .size */
3463 }, 3462 },
@@ -3469,14 +3468,14 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3469 }, 3468 },
3470 }; 3469 };
3471 3470
3472 perf_counter_mmap_event(&mmap_event); 3471 perf_event_mmap_event(&mmap_event);
3473} 3472}
3474 3473
3475/* 3474/*
3476 * IRQ throttle logging 3475 * IRQ throttle logging
3477 */ 3476 */
3478 3477
3479static void perf_log_throttle(struct perf_counter *counter, int enable) 3478static void perf_log_throttle(struct perf_event *event, int enable)
3480{ 3479{
3481 struct perf_output_handle handle; 3480 struct perf_output_handle handle;
3482 int ret; 3481 int ret;
@@ -3488,19 +3487,19 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3488 u64 stream_id; 3487 u64 stream_id;
3489 } throttle_event = { 3488 } throttle_event = {
3490 .header = { 3489 .header = {
3491 .type = PERF_EVENT_THROTTLE, 3490 .type = PERF_RECORD_THROTTLE,
3492 .misc = 0, 3491 .misc = 0,
3493 .size = sizeof(throttle_event), 3492 .size = sizeof(throttle_event),
3494 }, 3493 },
3495 .time = perf_clock(), 3494 .time = perf_clock(),
3496 .id = primary_counter_id(counter), 3495 .id = primary_event_id(event),
3497 .stream_id = counter->id, 3496 .stream_id = event->id,
3498 }; 3497 };
3499 3498
3500 if (enable) 3499 if (enable)
3501 throttle_event.header.type = PERF_EVENT_UNTHROTTLE; 3500 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3502 3501
3503 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3502 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3504 if (ret) 3503 if (ret)
3505 return; 3504 return;
3506 3505
@@ -3509,18 +3508,18 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3509} 3508}
3510 3509
3511/* 3510/*
3512 * Generic counter overflow handling, sampling. 3511 * Generic event overflow handling, sampling.
3513 */ 3512 */
3514 3513
3515static int __perf_counter_overflow(struct perf_counter *counter, int nmi, 3514static int __perf_event_overflow(struct perf_event *event, int nmi,
3516 int throttle, struct perf_sample_data *data, 3515 int throttle, struct perf_sample_data *data,
3517 struct pt_regs *regs) 3516 struct pt_regs *regs)
3518{ 3517{
3519 int events = atomic_read(&counter->event_limit); 3518 int events = atomic_read(&event->event_limit);
3520 struct hw_perf_counter *hwc = &counter->hw; 3519 struct hw_perf_event *hwc = &event->hw;
3521 int ret = 0; 3520 int ret = 0;
3522 3521
3523 throttle = (throttle && counter->pmu->unthrottle != NULL); 3522 throttle = (throttle && event->pmu->unthrottle != NULL);
3524 3523
3525 if (!throttle) { 3524 if (!throttle) {
3526 hwc->interrupts++; 3525 hwc->interrupts++;
@@ -3528,73 +3527,73 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
3528 if (hwc->interrupts != MAX_INTERRUPTS) { 3527 if (hwc->interrupts != MAX_INTERRUPTS) {
3529 hwc->interrupts++; 3528 hwc->interrupts++;
3530 if (HZ * hwc->interrupts > 3529 if (HZ * hwc->interrupts >
3531 (u64)sysctl_perf_counter_sample_rate) { 3530 (u64)sysctl_perf_event_sample_rate) {
3532 hwc->interrupts = MAX_INTERRUPTS; 3531 hwc->interrupts = MAX_INTERRUPTS;
3533 perf_log_throttle(counter, 0); 3532 perf_log_throttle(event, 0);
3534 ret = 1; 3533 ret = 1;
3535 } 3534 }
3536 } else { 3535 } else {
3537 /* 3536 /*
3538 * Keep re-disabling counters even though on the previous 3537 * Keep re-disabling events even though on the previous
3539 * pass we disabled it - just in case we raced with a 3538 * pass we disabled it - just in case we raced with a
3540 * sched-in and the counter got enabled again: 3539 * sched-in and the event got enabled again:
3541 */ 3540 */
3542 ret = 1; 3541 ret = 1;
3543 } 3542 }
3544 } 3543 }
3545 3544
3546 if (counter->attr.freq) { 3545 if (event->attr.freq) {
3547 u64 now = perf_clock(); 3546 u64 now = perf_clock();
3548 s64 delta = now - hwc->freq_stamp; 3547 s64 delta = now - hwc->freq_stamp;
3549 3548
3550 hwc->freq_stamp = now; 3549 hwc->freq_stamp = now;
3551 3550
3552 if (delta > 0 && delta < TICK_NSEC) 3551 if (delta > 0 && delta < TICK_NSEC)
3553 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); 3552 perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3554 } 3553 }
3555 3554
3556 /* 3555 /*
3557 * XXX event_limit might not quite work as expected on inherited 3556 * XXX event_limit might not quite work as expected on inherited
3558 * counters 3557 * events
3559 */ 3558 */
3560 3559
3561 counter->pending_kill = POLL_IN; 3560 event->pending_kill = POLL_IN;
3562 if (events && atomic_dec_and_test(&counter->event_limit)) { 3561 if (events && atomic_dec_and_test(&event->event_limit)) {
3563 ret = 1; 3562 ret = 1;
3564 counter->pending_kill = POLL_HUP; 3563 event->pending_kill = POLL_HUP;
3565 if (nmi) { 3564 if (nmi) {
3566 counter->pending_disable = 1; 3565 event->pending_disable = 1;
3567 perf_pending_queue(&counter->pending, 3566 perf_pending_queue(&event->pending,
3568 perf_pending_counter); 3567 perf_pending_event);
3569 } else 3568 } else
3570 perf_counter_disable(counter); 3569 perf_event_disable(event);
3571 } 3570 }
3572 3571
3573 perf_counter_output(counter, nmi, data, regs); 3572 perf_event_output(event, nmi, data, regs);
3574 return ret; 3573 return ret;
3575} 3574}
3576 3575
3577int perf_counter_overflow(struct perf_counter *counter, int nmi, 3576int perf_event_overflow(struct perf_event *event, int nmi,
3578 struct perf_sample_data *data, 3577 struct perf_sample_data *data,
3579 struct pt_regs *regs) 3578 struct pt_regs *regs)
3580{ 3579{
3581 return __perf_counter_overflow(counter, nmi, 1, data, regs); 3580 return __perf_event_overflow(event, nmi, 1, data, regs);
3582} 3581}
3583 3582
3584/* 3583/*
3585 * Generic software counter infrastructure 3584 * Generic software event infrastructure
3586 */ 3585 */
3587 3586
3588/* 3587/*
3589 * We directly increment counter->count and keep a second value in 3588 * We directly increment event->count and keep a second value in
3590 * counter->hw.period_left to count intervals. This period counter 3589 * event->hw.period_left to count intervals. This period event
3591 * is kept in the range [-sample_period, 0] so that we can use the 3590 * is kept in the range [-sample_period, 0] so that we can use the
3592 * sign as trigger. 3591 * sign as trigger.
3593 */ 3592 */
3594 3593
3595static u64 perf_swcounter_set_period(struct perf_counter *counter) 3594static u64 perf_swevent_set_period(struct perf_event *event)
3596{ 3595{
3597 struct hw_perf_counter *hwc = &counter->hw; 3596 struct hw_perf_event *hwc = &event->hw;
3598 u64 period = hwc->last_period; 3597 u64 period = hwc->last_period;
3599 u64 nr, offset; 3598 u64 nr, offset;
3600 s64 old, val; 3599 s64 old, val;
@@ -3615,22 +3614,22 @@ again:
3615 return nr; 3614 return nr;
3616} 3615}
3617 3616
3618static void perf_swcounter_overflow(struct perf_counter *counter, 3617static void perf_swevent_overflow(struct perf_event *event,
3619 int nmi, struct perf_sample_data *data, 3618 int nmi, struct perf_sample_data *data,
3620 struct pt_regs *regs) 3619 struct pt_regs *regs)
3621{ 3620{
3622 struct hw_perf_counter *hwc = &counter->hw; 3621 struct hw_perf_event *hwc = &event->hw;
3623 int throttle = 0; 3622 int throttle = 0;
3624 u64 overflow; 3623 u64 overflow;
3625 3624
3626 data->period = counter->hw.last_period; 3625 data->period = event->hw.last_period;
3627 overflow = perf_swcounter_set_period(counter); 3626 overflow = perf_swevent_set_period(event);
3628 3627
3629 if (hwc->interrupts == MAX_INTERRUPTS) 3628 if (hwc->interrupts == MAX_INTERRUPTS)
3630 return; 3629 return;
3631 3630
3632 for (; overflow; overflow--) { 3631 for (; overflow; overflow--) {
3633 if (__perf_counter_overflow(counter, nmi, throttle, 3632 if (__perf_event_overflow(event, nmi, throttle,
3634 data, regs)) { 3633 data, regs)) {
3635 /* 3634 /*
3636 * We inhibit the overflow from happening when 3635 * We inhibit the overflow from happening when
@@ -3642,20 +3641,20 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
3642 } 3641 }
3643} 3642}
3644 3643
3645static void perf_swcounter_unthrottle(struct perf_counter *counter) 3644static void perf_swevent_unthrottle(struct perf_event *event)
3646{ 3645{
3647 /* 3646 /*
3648 * Nothing to do, we already reset hwc->interrupts. 3647 * Nothing to do, we already reset hwc->interrupts.
3649 */ 3648 */
3650} 3649}
3651 3650
3652static void perf_swcounter_add(struct perf_counter *counter, u64 nr, 3651static void perf_swevent_add(struct perf_event *event, u64 nr,
3653 int nmi, struct perf_sample_data *data, 3652 int nmi, struct perf_sample_data *data,
3654 struct pt_regs *regs) 3653 struct pt_regs *regs)
3655{ 3654{
3656 struct hw_perf_counter *hwc = &counter->hw; 3655 struct hw_perf_event *hwc = &event->hw;
3657 3656
3658 atomic64_add(nr, &counter->count); 3657 atomic64_add(nr, &event->count);
3659 3658
3660 if (!hwc->sample_period) 3659 if (!hwc->sample_period)
3661 return; 3660 return;
@@ -3664,29 +3663,29 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3664 return; 3663 return;
3665 3664
3666 if (!atomic64_add_negative(nr, &hwc->period_left)) 3665 if (!atomic64_add_negative(nr, &hwc->period_left))
3667 perf_swcounter_overflow(counter, nmi, data, regs); 3666 perf_swevent_overflow(event, nmi, data, regs);
3668} 3667}
3669 3668
3670static int perf_swcounter_is_counting(struct perf_counter *counter) 3669static int perf_swevent_is_counting(struct perf_event *event)
3671{ 3670{
3672 /* 3671 /*
3673 * The counter is active, we're good! 3672 * The event is active, we're good!
3674 */ 3673 */
3675 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3674 if (event->state == PERF_EVENT_STATE_ACTIVE)
3676 return 1; 3675 return 1;
3677 3676
3678 /* 3677 /*
3679 * The counter is off/error, not counting. 3678 * The event is off/error, not counting.
3680 */ 3679 */
3681 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3680 if (event->state != PERF_EVENT_STATE_INACTIVE)
3682 return 0; 3681 return 0;
3683 3682
3684 /* 3683 /*
3685 * The counter is inactive, if the context is active 3684 * The event is inactive, if the context is active
3686 * we're part of a group that didn't make it on the 'pmu', 3685 * we're part of a group that didn't make it on the 'pmu',
3687 * not counting. 3686 * not counting.
3688 */ 3687 */
3689 if (counter->ctx->is_active) 3688 if (event->ctx->is_active)
3690 return 0; 3689 return 0;
3691 3690
3692 /* 3691 /*
@@ -3697,49 +3696,49 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
3697 return 1; 3696 return 1;
3698} 3697}
3699 3698
3700static int perf_swcounter_match(struct perf_counter *counter, 3699static int perf_swevent_match(struct perf_event *event,
3701 enum perf_type_id type, 3700 enum perf_type_id type,
3702 u32 event, struct pt_regs *regs) 3701 u32 event_id, struct pt_regs *regs)
3703{ 3702{
3704 if (!perf_swcounter_is_counting(counter)) 3703 if (!perf_swevent_is_counting(event))
3705 return 0; 3704 return 0;
3706 3705
3707 if (counter->attr.type != type) 3706 if (event->attr.type != type)
3708 return 0; 3707 return 0;
3709 if (counter->attr.config != event) 3708 if (event->attr.config != event_id)
3710 return 0; 3709 return 0;
3711 3710
3712 if (regs) { 3711 if (regs) {
3713 if (counter->attr.exclude_user && user_mode(regs)) 3712 if (event->attr.exclude_user && user_mode(regs))
3714 return 0; 3713 return 0;
3715 3714
3716 if (counter->attr.exclude_kernel && !user_mode(regs)) 3715 if (event->attr.exclude_kernel && !user_mode(regs))
3717 return 0; 3716 return 0;
3718 } 3717 }
3719 3718
3720 return 1; 3719 return 1;
3721} 3720}
3722 3721
3723static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3722static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3724 enum perf_type_id type, 3723 enum perf_type_id type,
3725 u32 event, u64 nr, int nmi, 3724 u32 event_id, u64 nr, int nmi,
3726 struct perf_sample_data *data, 3725 struct perf_sample_data *data,
3727 struct pt_regs *regs) 3726 struct pt_regs *regs)
3728{ 3727{
3729 struct perf_counter *counter; 3728 struct perf_event *event;
3730 3729
3731 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3730 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3732 return; 3731 return;
3733 3732
3734 rcu_read_lock(); 3733 rcu_read_lock();
3735 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3734 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3736 if (perf_swcounter_match(counter, type, event, regs)) 3735 if (perf_swevent_match(event, type, event_id, regs))
3737 perf_swcounter_add(counter, nr, nmi, data, regs); 3736 perf_swevent_add(event, nr, nmi, data, regs);
3738 } 3737 }
3739 rcu_read_unlock(); 3738 rcu_read_unlock();
3740} 3739}
3741 3740
3742static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) 3741static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
3743{ 3742{
3744 if (in_nmi()) 3743 if (in_nmi())
3745 return &cpuctx->recursion[3]; 3744 return &cpuctx->recursion[3];
@@ -3753,14 +3752,14 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3753 return &cpuctx->recursion[0]; 3752 return &cpuctx->recursion[0];
3754} 3753}
3755 3754
3756static void do_perf_swcounter_event(enum perf_type_id type, u32 event, 3755static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3757 u64 nr, int nmi, 3756 u64 nr, int nmi,
3758 struct perf_sample_data *data, 3757 struct perf_sample_data *data,
3759 struct pt_regs *regs) 3758 struct pt_regs *regs)
3760{ 3759{
3761 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3760 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3762 int *recursion = perf_swcounter_recursion_context(cpuctx); 3761 int *recursion = perf_swevent_recursion_context(cpuctx);
3763 struct perf_counter_context *ctx; 3762 struct perf_event_context *ctx;
3764 3763
3765 if (*recursion) 3764 if (*recursion)
3766 goto out; 3765 goto out;
@@ -3768,16 +3767,16 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3768 (*recursion)++; 3767 (*recursion)++;
3769 barrier(); 3768 barrier();
3770 3769
3771 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 3770 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3772 nr, nmi, data, regs); 3771 nr, nmi, data, regs);
3773 rcu_read_lock(); 3772 rcu_read_lock();
3774 /* 3773 /*
3775 * doesn't really matter which of the child contexts the 3774 * doesn't really matter which of the child contexts the
3776 * events ends up in. 3775 * events ends up in.
3777 */ 3776 */
3778 ctx = rcu_dereference(current->perf_counter_ctxp); 3777 ctx = rcu_dereference(current->perf_event_ctxp);
3779 if (ctx) 3778 if (ctx)
3780 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); 3779 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3781 rcu_read_unlock(); 3780 rcu_read_unlock();
3782 3781
3783 barrier(); 3782 barrier();
@@ -3787,57 +3786,57 @@ out:
3787 put_cpu_var(perf_cpu_context); 3786 put_cpu_var(perf_cpu_context);
3788} 3787}
3789 3788
3790void __perf_swcounter_event(u32 event, u64 nr, int nmi, 3789void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3791 struct pt_regs *regs, u64 addr) 3790 struct pt_regs *regs, u64 addr)
3792{ 3791{
3793 struct perf_sample_data data = { 3792 struct perf_sample_data data = {
3794 .addr = addr, 3793 .addr = addr,
3795 }; 3794 };
3796 3795
3797 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, 3796 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
3798 &data, regs); 3797 &data, regs);
3799} 3798}
3800 3799
3801static void perf_swcounter_read(struct perf_counter *counter) 3800static void perf_swevent_read(struct perf_event *event)
3802{ 3801{
3803} 3802}
3804 3803
3805static int perf_swcounter_enable(struct perf_counter *counter) 3804static int perf_swevent_enable(struct perf_event *event)
3806{ 3805{
3807 struct hw_perf_counter *hwc = &counter->hw; 3806 struct hw_perf_event *hwc = &event->hw;
3808 3807
3809 if (hwc->sample_period) { 3808 if (hwc->sample_period) {
3810 hwc->last_period = hwc->sample_period; 3809 hwc->last_period = hwc->sample_period;
3811 perf_swcounter_set_period(counter); 3810 perf_swevent_set_period(event);
3812 } 3811 }
3813 return 0; 3812 return 0;
3814} 3813}
3815 3814
3816static void perf_swcounter_disable(struct perf_counter *counter) 3815static void perf_swevent_disable(struct perf_event *event)
3817{ 3816{
3818} 3817}
3819 3818
3820static const struct pmu perf_ops_generic = { 3819static const struct pmu perf_ops_generic = {
3821 .enable = perf_swcounter_enable, 3820 .enable = perf_swevent_enable,
3822 .disable = perf_swcounter_disable, 3821 .disable = perf_swevent_disable,
3823 .read = perf_swcounter_read, 3822 .read = perf_swevent_read,
3824 .unthrottle = perf_swcounter_unthrottle, 3823 .unthrottle = perf_swevent_unthrottle,
3825}; 3824};
3826 3825
3827/* 3826/*
3828 * hrtimer based swcounter callback 3827 * hrtimer based swevent callback
3829 */ 3828 */
3830 3829
3831static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3830static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3832{ 3831{
3833 enum hrtimer_restart ret = HRTIMER_RESTART; 3832 enum hrtimer_restart ret = HRTIMER_RESTART;
3834 struct perf_sample_data data; 3833 struct perf_sample_data data;
3835 struct pt_regs *regs; 3834 struct pt_regs *regs;
3836 struct perf_counter *counter; 3835 struct perf_event *event;
3837 u64 period; 3836 u64 period;
3838 3837
3839 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 3838 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
3840 counter->pmu->read(counter); 3839 event->pmu->read(event);
3841 3840
3842 data.addr = 0; 3841 data.addr = 0;
3843 regs = get_irq_regs(); 3842 regs = get_irq_regs();
@@ -3845,45 +3844,45 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3845 * In case we exclude kernel IPs or are somehow not in interrupt 3844 * In case we exclude kernel IPs or are somehow not in interrupt
3846 * context, provide the next best thing, the user IP. 3845 * context, provide the next best thing, the user IP.
3847 */ 3846 */
3848 if ((counter->attr.exclude_kernel || !regs) && 3847 if ((event->attr.exclude_kernel || !regs) &&
3849 !counter->attr.exclude_user) 3848 !event->attr.exclude_user)
3850 regs = task_pt_regs(current); 3849 regs = task_pt_regs(current);
3851 3850
3852 if (regs) { 3851 if (regs) {
3853 if (perf_counter_overflow(counter, 0, &data, regs)) 3852 if (perf_event_overflow(event, 0, &data, regs))
3854 ret = HRTIMER_NORESTART; 3853 ret = HRTIMER_NORESTART;
3855 } 3854 }
3856 3855
3857 period = max_t(u64, 10000, counter->hw.sample_period); 3856 period = max_t(u64, 10000, event->hw.sample_period);
3858 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 3857 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3859 3858
3860 return ret; 3859 return ret;
3861} 3860}
3862 3861
3863/* 3862/*
3864 * Software counter: cpu wall time clock 3863 * Software event: cpu wall time clock
3865 */ 3864 */
3866 3865
3867static void cpu_clock_perf_counter_update(struct perf_counter *counter) 3866static void cpu_clock_perf_event_update(struct perf_event *event)
3868{ 3867{
3869 int cpu = raw_smp_processor_id(); 3868 int cpu = raw_smp_processor_id();
3870 s64 prev; 3869 s64 prev;
3871 u64 now; 3870 u64 now;
3872 3871
3873 now = cpu_clock(cpu); 3872 now = cpu_clock(cpu);
3874 prev = atomic64_read(&counter->hw.prev_count); 3873 prev = atomic64_read(&event->hw.prev_count);
3875 atomic64_set(&counter->hw.prev_count, now); 3874 atomic64_set(&event->hw.prev_count, now);
3876 atomic64_add(now - prev, &counter->count); 3875 atomic64_add(now - prev, &event->count);
3877} 3876}
3878 3877
3879static int cpu_clock_perf_counter_enable(struct perf_counter *counter) 3878static int cpu_clock_perf_event_enable(struct perf_event *event)
3880{ 3879{
3881 struct hw_perf_counter *hwc = &counter->hw; 3880 struct hw_perf_event *hwc = &event->hw;
3882 int cpu = raw_smp_processor_id(); 3881 int cpu = raw_smp_processor_id();
3883 3882
3884 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3885 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3886 hwc->hrtimer.function = perf_swcounter_hrtimer; 3885 hwc->hrtimer.function = perf_swevent_hrtimer;
3887 if (hwc->sample_period) { 3886 if (hwc->sample_period) {
3888 u64 period = max_t(u64, 10000, hwc->sample_period); 3887 u64 period = max_t(u64, 10000, hwc->sample_period);
3889 __hrtimer_start_range_ns(&hwc->hrtimer, 3888 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3894,48 +3893,48 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3894 return 0; 3893 return 0;
3895} 3894}
3896 3895
3897static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 3896static void cpu_clock_perf_event_disable(struct perf_event *event)
3898{ 3897{
3899 if (counter->hw.sample_period) 3898 if (event->hw.sample_period)
3900 hrtimer_cancel(&counter->hw.hrtimer); 3899 hrtimer_cancel(&event->hw.hrtimer);
3901 cpu_clock_perf_counter_update(counter); 3900 cpu_clock_perf_event_update(event);
3902} 3901}
3903 3902
3904static void cpu_clock_perf_counter_read(struct perf_counter *counter) 3903static void cpu_clock_perf_event_read(struct perf_event *event)
3905{ 3904{
3906 cpu_clock_perf_counter_update(counter); 3905 cpu_clock_perf_event_update(event);
3907} 3906}
3908 3907
3909static const struct pmu perf_ops_cpu_clock = { 3908static const struct pmu perf_ops_cpu_clock = {
3910 .enable = cpu_clock_perf_counter_enable, 3909 .enable = cpu_clock_perf_event_enable,
3911 .disable = cpu_clock_perf_counter_disable, 3910 .disable = cpu_clock_perf_event_disable,
3912 .read = cpu_clock_perf_counter_read, 3911 .read = cpu_clock_perf_event_read,
3913}; 3912};
3914 3913
3915/* 3914/*
3916 * Software counter: task time clock 3915 * Software event: task time clock
3917 */ 3916 */
3918 3917
3919static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) 3918static void task_clock_perf_event_update(struct perf_event *event, u64 now)
3920{ 3919{
3921 u64 prev; 3920 u64 prev;
3922 s64 delta; 3921 s64 delta;
3923 3922
3924 prev = atomic64_xchg(&counter->hw.prev_count, now); 3923 prev = atomic64_xchg(&event->hw.prev_count, now);
3925 delta = now - prev; 3924 delta = now - prev;
3926 atomic64_add(delta, &counter->count); 3925 atomic64_add(delta, &event->count);
3927} 3926}
3928 3927
3929static int task_clock_perf_counter_enable(struct perf_counter *counter) 3928static int task_clock_perf_event_enable(struct perf_event *event)
3930{ 3929{
3931 struct hw_perf_counter *hwc = &counter->hw; 3930 struct hw_perf_event *hwc = &event->hw;
3932 u64 now; 3931 u64 now;
3933 3932
3934 now = counter->ctx->time; 3933 now = event->ctx->time;
3935 3934
3936 atomic64_set(&hwc->prev_count, now); 3935 atomic64_set(&hwc->prev_count, now);
3937 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3938 hwc->hrtimer.function = perf_swcounter_hrtimer; 3937 hwc->hrtimer.function = perf_swevent_hrtimer;
3939 if (hwc->sample_period) { 3938 if (hwc->sample_period) {
3940 u64 period = max_t(u64, 10000, hwc->sample_period); 3939 u64 period = max_t(u64, 10000, hwc->sample_period);
3941 __hrtimer_start_range_ns(&hwc->hrtimer, 3940 __hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3946,38 +3945,38 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
3946 return 0; 3945 return 0;
3947} 3946}
3948 3947
3949static void task_clock_perf_counter_disable(struct perf_counter *counter) 3948static void task_clock_perf_event_disable(struct perf_event *event)
3950{ 3949{
3951 if (counter->hw.sample_period) 3950 if (event->hw.sample_period)
3952 hrtimer_cancel(&counter->hw.hrtimer); 3951 hrtimer_cancel(&event->hw.hrtimer);
3953 task_clock_perf_counter_update(counter, counter->ctx->time); 3952 task_clock_perf_event_update(event, event->ctx->time);
3954 3953
3955} 3954}
3956 3955
3957static void task_clock_perf_counter_read(struct perf_counter *counter) 3956static void task_clock_perf_event_read(struct perf_event *event)
3958{ 3957{
3959 u64 time; 3958 u64 time;
3960 3959
3961 if (!in_nmi()) { 3960 if (!in_nmi()) {
3962 update_context_time(counter->ctx); 3961 update_context_time(event->ctx);
3963 time = counter->ctx->time; 3962 time = event->ctx->time;
3964 } else { 3963 } else {
3965 u64 now = perf_clock(); 3964 u64 now = perf_clock();
3966 u64 delta = now - counter->ctx->timestamp; 3965 u64 delta = now - event->ctx->timestamp;
3967 time = counter->ctx->time + delta; 3966 time = event->ctx->time + delta;
3968 } 3967 }
3969 3968
3970 task_clock_perf_counter_update(counter, time); 3969 task_clock_perf_event_update(event, time);
3971} 3970}
3972 3971
3973static const struct pmu perf_ops_task_clock = { 3972static const struct pmu perf_ops_task_clock = {
3974 .enable = task_clock_perf_counter_enable, 3973 .enable = task_clock_perf_event_enable,
3975 .disable = task_clock_perf_counter_disable, 3974 .disable = task_clock_perf_event_disable,
3976 .read = task_clock_perf_counter_read, 3975 .read = task_clock_perf_event_read,
3977}; 3976};
3978 3977
3979#ifdef CONFIG_EVENT_PROFILE 3978#ifdef CONFIG_EVENT_PROFILE
3980void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, 3979void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3981 int entry_size) 3980 int entry_size)
3982{ 3981{
3983 struct perf_raw_record raw = { 3982 struct perf_raw_record raw = {
@@ -3995,78 +3994,78 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3995 if (!regs) 3994 if (!regs)
3996 regs = task_pt_regs(current); 3995 regs = task_pt_regs(current);
3997 3996
3998 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 3997 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3999 &data, regs); 3998 &data, regs);
4000} 3999}
4001EXPORT_SYMBOL_GPL(perf_tpcounter_event); 4000EXPORT_SYMBOL_GPL(perf_tp_event);
4002 4001
4003extern int ftrace_profile_enable(int); 4002extern int ftrace_profile_enable(int);
4004extern void ftrace_profile_disable(int); 4003extern void ftrace_profile_disable(int);
4005 4004
4006static void tp_perf_counter_destroy(struct perf_counter *counter) 4005static void tp_perf_event_destroy(struct perf_event *event)
4007{ 4006{
4008 ftrace_profile_disable(counter->attr.config); 4007 ftrace_profile_disable(event->attr.config);
4009} 4008}
4010 4009
4011static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4010static const struct pmu *tp_perf_event_init(struct perf_event *event)
4012{ 4011{
4013 /* 4012 /*
4014 * Raw tracepoint data is a severe data leak, only allow root to 4013 * Raw tracepoint data is a severe data leak, only allow root to
4015 * have these. 4014 * have these.
4016 */ 4015 */
4017 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && 4016 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4018 perf_paranoid_tracepoint_raw() && 4017 perf_paranoid_tracepoint_raw() &&
4019 !capable(CAP_SYS_ADMIN)) 4018 !capable(CAP_SYS_ADMIN))
4020 return ERR_PTR(-EPERM); 4019 return ERR_PTR(-EPERM);
4021 4020
4022 if (ftrace_profile_enable(counter->attr.config)) 4021 if (ftrace_profile_enable(event->attr.config))
4023 return NULL; 4022 return NULL;
4024 4023
4025 counter->destroy = tp_perf_counter_destroy; 4024 event->destroy = tp_perf_event_destroy;
4026 4025
4027 return &perf_ops_generic; 4026 return &perf_ops_generic;
4028} 4027}
4029#else 4028#else
4030static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4029static const struct pmu *tp_perf_event_init(struct perf_event *event)
4031{ 4030{
4032 return NULL; 4031 return NULL;
4033} 4032}
4034#endif 4033#endif
4035 4034
4036atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 4035atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4037 4036
4038static void sw_perf_counter_destroy(struct perf_counter *counter) 4037static void sw_perf_event_destroy(struct perf_event *event)
4039{ 4038{
4040 u64 event = counter->attr.config; 4039 u64 event_id = event->attr.config;
4041 4040
4042 WARN_ON(counter->parent); 4041 WARN_ON(event->parent);
4043 4042
4044 atomic_dec(&perf_swcounter_enabled[event]); 4043 atomic_dec(&perf_swevent_enabled[event_id]);
4045} 4044}
4046 4045
4047static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) 4046static const struct pmu *sw_perf_event_init(struct perf_event *event)
4048{ 4047{
4049 const struct pmu *pmu = NULL; 4048 const struct pmu *pmu = NULL;
4050 u64 event = counter->attr.config; 4049 u64 event_id = event->attr.config;
4051 4050
4052 /* 4051 /*
4053 * Software counters (currently) can't in general distinguish 4052 * Software events (currently) can't in general distinguish
4054 * between user, kernel and hypervisor events. 4053 * between user, kernel and hypervisor events.
4055 * However, context switches and cpu migrations are considered 4054 * However, context switches and cpu migrations are considered
4056 * to be kernel events, and page faults are never hypervisor 4055 * to be kernel events, and page faults are never hypervisor
4057 * events. 4056 * events.
4058 */ 4057 */
4059 switch (event) { 4058 switch (event_id) {
4060 case PERF_COUNT_SW_CPU_CLOCK: 4059 case PERF_COUNT_SW_CPU_CLOCK:
4061 pmu = &perf_ops_cpu_clock; 4060 pmu = &perf_ops_cpu_clock;
4062 4061
4063 break; 4062 break;
4064 case PERF_COUNT_SW_TASK_CLOCK: 4063 case PERF_COUNT_SW_TASK_CLOCK:
4065 /* 4064 /*
4066 * If the user instantiates this as a per-cpu counter, 4065 * If the user instantiates this as a per-cpu event,
4067 * use the cpu_clock counter instead. 4066 * use the cpu_clock event instead.
4068 */ 4067 */
4069 if (counter->ctx->task) 4068 if (event->ctx->task)
4070 pmu = &perf_ops_task_clock; 4069 pmu = &perf_ops_task_clock;
4071 else 4070 else
4072 pmu = &perf_ops_cpu_clock; 4071 pmu = &perf_ops_cpu_clock;
@@ -4077,9 +4076,9 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4077 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4076 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4078 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4077 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4079 case PERF_COUNT_SW_CPU_MIGRATIONS: 4078 case PERF_COUNT_SW_CPU_MIGRATIONS:
4080 if (!counter->parent) { 4079 if (!event->parent) {
4081 atomic_inc(&perf_swcounter_enabled[event]); 4080 atomic_inc(&perf_swevent_enabled[event_id]);
4082 counter->destroy = sw_perf_counter_destroy; 4081 event->destroy = sw_perf_event_destroy;
4083 } 4082 }
4084 pmu = &perf_ops_generic; 4083 pmu = &perf_ops_generic;
4085 break; 4084 break;
@@ -4089,62 +4088,62 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4089} 4088}
4090 4089
4091/* 4090/*
4092 * Allocate and initialize a counter structure 4091 * Allocate and initialize a event structure
4093 */ 4092 */
4094static struct perf_counter * 4093static struct perf_event *
4095perf_counter_alloc(struct perf_counter_attr *attr, 4094perf_event_alloc(struct perf_event_attr *attr,
4096 int cpu, 4095 int cpu,
4097 struct perf_counter_context *ctx, 4096 struct perf_event_context *ctx,
4098 struct perf_counter *group_leader, 4097 struct perf_event *group_leader,
4099 struct perf_counter *parent_counter, 4098 struct perf_event *parent_event,
4100 gfp_t gfpflags) 4099 gfp_t gfpflags)
4101{ 4100{
4102 const struct pmu *pmu; 4101 const struct pmu *pmu;
4103 struct perf_counter *counter; 4102 struct perf_event *event;
4104 struct hw_perf_counter *hwc; 4103 struct hw_perf_event *hwc;
4105 long err; 4104 long err;
4106 4105
4107 counter = kzalloc(sizeof(*counter), gfpflags); 4106 event = kzalloc(sizeof(*event), gfpflags);
4108 if (!counter) 4107 if (!event)
4109 return ERR_PTR(-ENOMEM); 4108 return ERR_PTR(-ENOMEM);
4110 4109
4111 /* 4110 /*
4112 * Single counters are their own group leaders, with an 4111 * Single events are their own group leaders, with an
4113 * empty sibling list: 4112 * empty sibling list:
4114 */ 4113 */
4115 if (!group_leader) 4114 if (!group_leader)
4116 group_leader = counter; 4115 group_leader = event;
4117 4116
4118 mutex_init(&counter->child_mutex); 4117 mutex_init(&event->child_mutex);
4119 INIT_LIST_HEAD(&counter->child_list); 4118 INIT_LIST_HEAD(&event->child_list);
4120 4119
4121 INIT_LIST_HEAD(&counter->list_entry); 4120 INIT_LIST_HEAD(&event->group_entry);
4122 INIT_LIST_HEAD(&counter->event_entry); 4121 INIT_LIST_HEAD(&event->event_entry);
4123 INIT_LIST_HEAD(&counter->sibling_list); 4122 INIT_LIST_HEAD(&event->sibling_list);
4124 init_waitqueue_head(&counter->waitq); 4123 init_waitqueue_head(&event->waitq);
4125 4124
4126 mutex_init(&counter->mmap_mutex); 4125 mutex_init(&event->mmap_mutex);
4127 4126
4128 counter->cpu = cpu; 4127 event->cpu = cpu;
4129 counter->attr = *attr; 4128 event->attr = *attr;
4130 counter->group_leader = group_leader; 4129 event->group_leader = group_leader;
4131 counter->pmu = NULL; 4130 event->pmu = NULL;
4132 counter->ctx = ctx; 4131 event->ctx = ctx;
4133 counter->oncpu = -1; 4132 event->oncpu = -1;
4134 4133
4135 counter->parent = parent_counter; 4134 event->parent = parent_event;
4136 4135
4137 counter->ns = get_pid_ns(current->nsproxy->pid_ns); 4136 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4138 counter->id = atomic64_inc_return(&perf_counter_id); 4137 event->id = atomic64_inc_return(&perf_event_id);
4139 4138
4140 counter->state = PERF_COUNTER_STATE_INACTIVE; 4139 event->state = PERF_EVENT_STATE_INACTIVE;
4141 4140
4142 if (attr->disabled) 4141 if (attr->disabled)
4143 counter->state = PERF_COUNTER_STATE_OFF; 4142 event->state = PERF_EVENT_STATE_OFF;
4144 4143
4145 pmu = NULL; 4144 pmu = NULL;
4146 4145
4147 hwc = &counter->hw; 4146 hwc = &event->hw;
4148 hwc->sample_period = attr->sample_period; 4147 hwc->sample_period = attr->sample_period;
4149 if (attr->freq && attr->sample_freq) 4148 if (attr->freq && attr->sample_freq)
4150 hwc->sample_period = 1; 4149 hwc->sample_period = 1;
@@ -4153,7 +4152,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4153 atomic64_set(&hwc->period_left, hwc->sample_period); 4152 atomic64_set(&hwc->period_left, hwc->sample_period);
4154 4153
4155 /* 4154 /*
4156 * we currently do not support PERF_FORMAT_GROUP on inherited counters 4155 * we currently do not support PERF_FORMAT_GROUP on inherited events
4157 */ 4156 */
4158 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 4157 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4159 goto done; 4158 goto done;
@@ -4162,15 +4161,15 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4162 case PERF_TYPE_RAW: 4161 case PERF_TYPE_RAW:
4163 case PERF_TYPE_HARDWARE: 4162 case PERF_TYPE_HARDWARE:
4164 case PERF_TYPE_HW_CACHE: 4163 case PERF_TYPE_HW_CACHE:
4165 pmu = hw_perf_counter_init(counter); 4164 pmu = hw_perf_event_init(event);
4166 break; 4165 break;
4167 4166
4168 case PERF_TYPE_SOFTWARE: 4167 case PERF_TYPE_SOFTWARE:
4169 pmu = sw_perf_counter_init(counter); 4168 pmu = sw_perf_event_init(event);
4170 break; 4169 break;
4171 4170
4172 case PERF_TYPE_TRACEPOINT: 4171 case PERF_TYPE_TRACEPOINT:
4173 pmu = tp_perf_counter_init(counter); 4172 pmu = tp_perf_event_init(event);
4174 break; 4173 break;
4175 4174
4176 default: 4175 default:
@@ -4184,29 +4183,29 @@ done:
4184 err = PTR_ERR(pmu); 4183 err = PTR_ERR(pmu);
4185 4184
4186 if (err) { 4185 if (err) {
4187 if (counter->ns) 4186 if (event->ns)
4188 put_pid_ns(counter->ns); 4187 put_pid_ns(event->ns);
4189 kfree(counter); 4188 kfree(event);
4190 return ERR_PTR(err); 4189 return ERR_PTR(err);
4191 } 4190 }
4192 4191
4193 counter->pmu = pmu; 4192 event->pmu = pmu;
4194 4193
4195 if (!counter->parent) { 4194 if (!event->parent) {
4196 atomic_inc(&nr_counters); 4195 atomic_inc(&nr_events);
4197 if (counter->attr.mmap) 4196 if (event->attr.mmap)
4198 atomic_inc(&nr_mmap_counters); 4197 atomic_inc(&nr_mmap_events);
4199 if (counter->attr.comm) 4198 if (event->attr.comm)
4200 atomic_inc(&nr_comm_counters); 4199 atomic_inc(&nr_comm_events);
4201 if (counter->attr.task) 4200 if (event->attr.task)
4202 atomic_inc(&nr_task_counters); 4201 atomic_inc(&nr_task_events);
4203 } 4202 }
4204 4203
4205 return counter; 4204 return event;
4206} 4205}
4207 4206
4208static int perf_copy_attr(struct perf_counter_attr __user *uattr, 4207static int perf_copy_attr(struct perf_event_attr __user *uattr,
4209 struct perf_counter_attr *attr) 4208 struct perf_event_attr *attr)
4210{ 4209{
4211 u32 size; 4210 u32 size;
4212 int ret; 4211 int ret;
@@ -4285,11 +4284,11 @@ err_size:
4285 goto out; 4284 goto out;
4286} 4285}
4287 4286
4288int perf_counter_set_output(struct perf_counter *counter, int output_fd) 4287int perf_event_set_output(struct perf_event *event, int output_fd)
4289{ 4288{
4290 struct perf_counter *output_counter = NULL; 4289 struct perf_event *output_event = NULL;
4291 struct file *output_file = NULL; 4290 struct file *output_file = NULL;
4292 struct perf_counter *old_output; 4291 struct perf_event *old_output;
4293 int fput_needed = 0; 4292 int fput_needed = 0;
4294 int ret = -EINVAL; 4293 int ret = -EINVAL;
4295 4294
@@ -4303,28 +4302,28 @@ int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4303 if (output_file->f_op != &perf_fops) 4302 if (output_file->f_op != &perf_fops)
4304 goto out; 4303 goto out;
4305 4304
4306 output_counter = output_file->private_data; 4305 output_event = output_file->private_data;
4307 4306
4308 /* Don't chain output fds */ 4307 /* Don't chain output fds */
4309 if (output_counter->output) 4308 if (output_event->output)
4310 goto out; 4309 goto out;
4311 4310
4312 /* Don't set an output fd when we already have an output channel */ 4311 /* Don't set an output fd when we already have an output channel */
4313 if (counter->data) 4312 if (event->data)
4314 goto out; 4313 goto out;
4315 4314
4316 atomic_long_inc(&output_file->f_count); 4315 atomic_long_inc(&output_file->f_count);
4317 4316
4318set: 4317set:
4319 mutex_lock(&counter->mmap_mutex); 4318 mutex_lock(&event->mmap_mutex);
4320 old_output = counter->output; 4319 old_output = event->output;
4321 rcu_assign_pointer(counter->output, output_counter); 4320 rcu_assign_pointer(event->output, output_event);
4322 mutex_unlock(&counter->mmap_mutex); 4321 mutex_unlock(&event->mmap_mutex);
4323 4322
4324 if (old_output) { 4323 if (old_output) {
4325 /* 4324 /*
4326 * we need to make sure no existing perf_output_*() 4325 * we need to make sure no existing perf_output_*()
4327 * is still referencing this counter. 4326 * is still referencing this event.
4328 */ 4327 */
4329 synchronize_rcu(); 4328 synchronize_rcu();
4330 fput(old_output->filp); 4329 fput(old_output->filp);
@@ -4337,21 +4336,21 @@ out:
4337} 4336}
4338 4337
4339/** 4338/**
4340 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 4339 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4341 * 4340 *
4342 * @attr_uptr: event type attributes for monitoring/sampling 4341 * @attr_uptr: event_id type attributes for monitoring/sampling
4343 * @pid: target pid 4342 * @pid: target pid
4344 * @cpu: target cpu 4343 * @cpu: target cpu
4345 * @group_fd: group leader counter fd 4344 * @group_fd: group leader event fd
4346 */ 4345 */
4347SYSCALL_DEFINE5(perf_counter_open, 4346SYSCALL_DEFINE5(perf_event_open,
4348 struct perf_counter_attr __user *, attr_uptr, 4347 struct perf_event_attr __user *, attr_uptr,
4349 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 4348 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4350{ 4349{
4351 struct perf_counter *counter, *group_leader; 4350 struct perf_event *event, *group_leader;
4352 struct perf_counter_attr attr; 4351 struct perf_event_attr attr;
4353 struct perf_counter_context *ctx; 4352 struct perf_event_context *ctx;
4354 struct file *counter_file = NULL; 4353 struct file *event_file = NULL;
4355 struct file *group_file = NULL; 4354 struct file *group_file = NULL;
4356 int fput_needed = 0; 4355 int fput_needed = 0;
4357 int fput_needed2 = 0; 4356 int fput_needed2 = 0;
@@ -4371,7 +4370,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4371 } 4370 }
4372 4371
4373 if (attr.freq) { 4372 if (attr.freq) {
4374 if (attr.sample_freq > sysctl_perf_counter_sample_rate) 4373 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4375 return -EINVAL; 4374 return -EINVAL;
4376 } 4375 }
4377 4376
@@ -4383,7 +4382,7 @@ SYSCALL_DEFINE5(perf_counter_open,
4383 return PTR_ERR(ctx); 4382 return PTR_ERR(ctx);
4384 4383
4385 /* 4384 /*
4386 * Look up the group leader (we will attach this counter to it): 4385 * Look up the group leader (we will attach this event to it):
4387 */ 4386 */
4388 group_leader = NULL; 4387 group_leader = NULL;
4389 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { 4388 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
@@ -4414,45 +4413,45 @@ SYSCALL_DEFINE5(perf_counter_open,
4414 goto err_put_context; 4413 goto err_put_context;
4415 } 4414 }
4416 4415
4417 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, 4416 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4418 NULL, GFP_KERNEL); 4417 NULL, GFP_KERNEL);
4419 err = PTR_ERR(counter); 4418 err = PTR_ERR(event);
4420 if (IS_ERR(counter)) 4419 if (IS_ERR(event))
4421 goto err_put_context; 4420 goto err_put_context;
4422 4421
4423 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 4422 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
4424 if (err < 0) 4423 if (err < 0)
4425 goto err_free_put_context; 4424 goto err_free_put_context;
4426 4425
4427 counter_file = fget_light(err, &fput_needed2); 4426 event_file = fget_light(err, &fput_needed2);
4428 if (!counter_file) 4427 if (!event_file)
4429 goto err_free_put_context; 4428 goto err_free_put_context;
4430 4429
4431 if (flags & PERF_FLAG_FD_OUTPUT) { 4430 if (flags & PERF_FLAG_FD_OUTPUT) {
4432 err = perf_counter_set_output(counter, group_fd); 4431 err = perf_event_set_output(event, group_fd);
4433 if (err) 4432 if (err)
4434 goto err_fput_free_put_context; 4433 goto err_fput_free_put_context;
4435 } 4434 }
4436 4435
4437 counter->filp = counter_file; 4436 event->filp = event_file;
4438 WARN_ON_ONCE(ctx->parent_ctx); 4437 WARN_ON_ONCE(ctx->parent_ctx);
4439 mutex_lock(&ctx->mutex); 4438 mutex_lock(&ctx->mutex);
4440 perf_install_in_context(ctx, counter, cpu); 4439 perf_install_in_context(ctx, event, cpu);
4441 ++ctx->generation; 4440 ++ctx->generation;
4442 mutex_unlock(&ctx->mutex); 4441 mutex_unlock(&ctx->mutex);
4443 4442
4444 counter->owner = current; 4443 event->owner = current;
4445 get_task_struct(current); 4444 get_task_struct(current);
4446 mutex_lock(&current->perf_counter_mutex); 4445 mutex_lock(&current->perf_event_mutex);
4447 list_add_tail(&counter->owner_entry, &current->perf_counter_list); 4446 list_add_tail(&event->owner_entry, &current->perf_event_list);
4448 mutex_unlock(&current->perf_counter_mutex); 4447 mutex_unlock(&current->perf_event_mutex);
4449 4448
4450err_fput_free_put_context: 4449err_fput_free_put_context:
4451 fput_light(counter_file, fput_needed2); 4450 fput_light(event_file, fput_needed2);
4452 4451
4453err_free_put_context: 4452err_free_put_context:
4454 if (err < 0) 4453 if (err < 0)
4455 kfree(counter); 4454 kfree(event);
4456 4455
4457err_put_context: 4456err_put_context:
4458 if (err < 0) 4457 if (err < 0)
@@ -4464,88 +4463,88 @@ err_put_context:
4464} 4463}
4465 4464
4466/* 4465/*
4467 * inherit a counter from parent task to child task: 4466 * inherit a event from parent task to child task:
4468 */ 4467 */
4469static struct perf_counter * 4468static struct perf_event *
4470inherit_counter(struct perf_counter *parent_counter, 4469inherit_event(struct perf_event *parent_event,
4471 struct task_struct *parent, 4470 struct task_struct *parent,
4472 struct perf_counter_context *parent_ctx, 4471 struct perf_event_context *parent_ctx,
4473 struct task_struct *child, 4472 struct task_struct *child,
4474 struct perf_counter *group_leader, 4473 struct perf_event *group_leader,
4475 struct perf_counter_context *child_ctx) 4474 struct perf_event_context *child_ctx)
4476{ 4475{
4477 struct perf_counter *child_counter; 4476 struct perf_event *child_event;
4478 4477
4479 /* 4478 /*
4480 * Instead of creating recursive hierarchies of counters, 4479 * Instead of creating recursive hierarchies of events,
4481 * we link inherited counters back to the original parent, 4480 * we link inherited events back to the original parent,
4482 * which has a filp for sure, which we use as the reference 4481 * which has a filp for sure, which we use as the reference
4483 * count: 4482 * count:
4484 */ 4483 */
4485 if (parent_counter->parent) 4484 if (parent_event->parent)
4486 parent_counter = parent_counter->parent; 4485 parent_event = parent_event->parent;
4487 4486
4488 child_counter = perf_counter_alloc(&parent_counter->attr, 4487 child_event = perf_event_alloc(&parent_event->attr,
4489 parent_counter->cpu, child_ctx, 4488 parent_event->cpu, child_ctx,
4490 group_leader, parent_counter, 4489 group_leader, parent_event,
4491 GFP_KERNEL); 4490 GFP_KERNEL);
4492 if (IS_ERR(child_counter)) 4491 if (IS_ERR(child_event))
4493 return child_counter; 4492 return child_event;
4494 get_ctx(child_ctx); 4493 get_ctx(child_ctx);
4495 4494
4496 /* 4495 /*
4497 * Make the child state follow the state of the parent counter, 4496 * Make the child state follow the state of the parent event,
4498 * not its attr.disabled bit. We hold the parent's mutex, 4497 * not its attr.disabled bit. We hold the parent's mutex,
4499 * so we won't race with perf_counter_{en, dis}able_family. 4498 * so we won't race with perf_event_{en, dis}able_family.
4500 */ 4499 */
4501 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) 4500 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4502 child_counter->state = PERF_COUNTER_STATE_INACTIVE; 4501 child_event->state = PERF_EVENT_STATE_INACTIVE;
4503 else 4502 else
4504 child_counter->state = PERF_COUNTER_STATE_OFF; 4503 child_event->state = PERF_EVENT_STATE_OFF;
4505 4504
4506 if (parent_counter->attr.freq) 4505 if (parent_event->attr.freq)
4507 child_counter->hw.sample_period = parent_counter->hw.sample_period; 4506 child_event->hw.sample_period = parent_event->hw.sample_period;
4508 4507
4509 /* 4508 /*
4510 * Link it up in the child's context: 4509 * Link it up in the child's context:
4511 */ 4510 */
4512 add_counter_to_ctx(child_counter, child_ctx); 4511 add_event_to_ctx(child_event, child_ctx);
4513 4512
4514 /* 4513 /*
4515 * Get a reference to the parent filp - we will fput it 4514 * Get a reference to the parent filp - we will fput it
4516 * when the child counter exits. This is safe to do because 4515 * when the child event exits. This is safe to do because
4517 * we are in the parent and we know that the filp still 4516 * we are in the parent and we know that the filp still
4518 * exists and has a nonzero count: 4517 * exists and has a nonzero count:
4519 */ 4518 */
4520 atomic_long_inc(&parent_counter->filp->f_count); 4519 atomic_long_inc(&parent_event->filp->f_count);
4521 4520
4522 /* 4521 /*
4523 * Link this into the parent counter's child list 4522 * Link this into the parent event's child list
4524 */ 4523 */
4525 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4524 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4526 mutex_lock(&parent_counter->child_mutex); 4525 mutex_lock(&parent_event->child_mutex);
4527 list_add_tail(&child_counter->child_list, &parent_counter->child_list); 4526 list_add_tail(&child_event->child_list, &parent_event->child_list);
4528 mutex_unlock(&parent_counter->child_mutex); 4527 mutex_unlock(&parent_event->child_mutex);
4529 4528
4530 return child_counter; 4529 return child_event;
4531} 4530}
4532 4531
4533static int inherit_group(struct perf_counter *parent_counter, 4532static int inherit_group(struct perf_event *parent_event,
4534 struct task_struct *parent, 4533 struct task_struct *parent,
4535 struct perf_counter_context *parent_ctx, 4534 struct perf_event_context *parent_ctx,
4536 struct task_struct *child, 4535 struct task_struct *child,
4537 struct perf_counter_context *child_ctx) 4536 struct perf_event_context *child_ctx)
4538{ 4537{
4539 struct perf_counter *leader; 4538 struct perf_event *leader;
4540 struct perf_counter *sub; 4539 struct perf_event *sub;
4541 struct perf_counter *child_ctr; 4540 struct perf_event *child_ctr;
4542 4541
4543 leader = inherit_counter(parent_counter, parent, parent_ctx, 4542 leader = inherit_event(parent_event, parent, parent_ctx,
4544 child, NULL, child_ctx); 4543 child, NULL, child_ctx);
4545 if (IS_ERR(leader)) 4544 if (IS_ERR(leader))
4546 return PTR_ERR(leader); 4545 return PTR_ERR(leader);
4547 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { 4546 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4548 child_ctr = inherit_counter(sub, parent, parent_ctx, 4547 child_ctr = inherit_event(sub, parent, parent_ctx,
4549 child, leader, child_ctx); 4548 child, leader, child_ctx);
4550 if (IS_ERR(child_ctr)) 4549 if (IS_ERR(child_ctr))
4551 return PTR_ERR(child_ctr); 4550 return PTR_ERR(child_ctr);
@@ -4553,74 +4552,74 @@ static int inherit_group(struct perf_counter *parent_counter,
4553 return 0; 4552 return 0;
4554} 4553}
4555 4554
4556static void sync_child_counter(struct perf_counter *child_counter, 4555static void sync_child_event(struct perf_event *child_event,
4557 struct task_struct *child) 4556 struct task_struct *child)
4558{ 4557{
4559 struct perf_counter *parent_counter = child_counter->parent; 4558 struct perf_event *parent_event = child_event->parent;
4560 u64 child_val; 4559 u64 child_val;
4561 4560
4562 if (child_counter->attr.inherit_stat) 4561 if (child_event->attr.inherit_stat)
4563 perf_counter_read_event(child_counter, child); 4562 perf_event_read_event(child_event, child);
4564 4563
4565 child_val = atomic64_read(&child_counter->count); 4564 child_val = atomic64_read(&child_event->count);
4566 4565
4567 /* 4566 /*
4568 * Add back the child's count to the parent's count: 4567 * Add back the child's count to the parent's count:
4569 */ 4568 */
4570 atomic64_add(child_val, &parent_counter->count); 4569 atomic64_add(child_val, &parent_event->count);
4571 atomic64_add(child_counter->total_time_enabled, 4570 atomic64_add(child_event->total_time_enabled,
4572 &parent_counter->child_total_time_enabled); 4571 &parent_event->child_total_time_enabled);
4573 atomic64_add(child_counter->total_time_running, 4572 atomic64_add(child_event->total_time_running,
4574 &parent_counter->child_total_time_running); 4573 &parent_event->child_total_time_running);
4575 4574
4576 /* 4575 /*
4577 * Remove this counter from the parent's list 4576 * Remove this event from the parent's list
4578 */ 4577 */
4579 WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4578 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4580 mutex_lock(&parent_counter->child_mutex); 4579 mutex_lock(&parent_event->child_mutex);
4581 list_del_init(&child_counter->child_list); 4580 list_del_init(&child_event->child_list);
4582 mutex_unlock(&parent_counter->child_mutex); 4581 mutex_unlock(&parent_event->child_mutex);
4583 4582
4584 /* 4583 /*
4585 * Release the parent counter, if this was the last 4584 * Release the parent event, if this was the last
4586 * reference to it. 4585 * reference to it.
4587 */ 4586 */
4588 fput(parent_counter->filp); 4587 fput(parent_event->filp);
4589} 4588}
4590 4589
4591static void 4590static void
4592__perf_counter_exit_task(struct perf_counter *child_counter, 4591__perf_event_exit_task(struct perf_event *child_event,
4593 struct perf_counter_context *child_ctx, 4592 struct perf_event_context *child_ctx,
4594 struct task_struct *child) 4593 struct task_struct *child)
4595{ 4594{
4596 struct perf_counter *parent_counter; 4595 struct perf_event *parent_event;
4597 4596
4598 update_counter_times(child_counter); 4597 update_event_times(child_event);
4599 perf_counter_remove_from_context(child_counter); 4598 perf_event_remove_from_context(child_event);
4600 4599
4601 parent_counter = child_counter->parent; 4600 parent_event = child_event->parent;
4602 /* 4601 /*
4603 * It can happen that parent exits first, and has counters 4602 * It can happen that parent exits first, and has events
4604 * that are still around due to the child reference. These 4603 * that are still around due to the child reference. These
4605 * counters need to be zapped - but otherwise linger. 4604 * events need to be zapped - but otherwise linger.
4606 */ 4605 */
4607 if (parent_counter) { 4606 if (parent_event) {
4608 sync_child_counter(child_counter, child); 4607 sync_child_event(child_event, child);
4609 free_counter(child_counter); 4608 free_event(child_event);
4610 } 4609 }
4611} 4610}
4612 4611
4613/* 4612/*
4614 * When a child task exits, feed back counter values to parent counters. 4613 * When a child task exits, feed back event values to parent events.
4615 */ 4614 */
4616void perf_counter_exit_task(struct task_struct *child) 4615void perf_event_exit_task(struct task_struct *child)
4617{ 4616{
4618 struct perf_counter *child_counter, *tmp; 4617 struct perf_event *child_event, *tmp;
4619 struct perf_counter_context *child_ctx; 4618 struct perf_event_context *child_ctx;
4620 unsigned long flags; 4619 unsigned long flags;
4621 4620
4622 if (likely(!child->perf_counter_ctxp)) { 4621 if (likely(!child->perf_event_ctxp)) {
4623 perf_counter_task(child, NULL, 0); 4622 perf_event_task(child, NULL, 0);
4624 return; 4623 return;
4625 } 4624 }
4626 4625
@@ -4631,37 +4630,37 @@ void perf_counter_exit_task(struct task_struct *child)
4631 * scheduled, so we are now safe from rescheduling changing 4630 * scheduled, so we are now safe from rescheduling changing
4632 * our context. 4631 * our context.
4633 */ 4632 */
4634 child_ctx = child->perf_counter_ctxp; 4633 child_ctx = child->perf_event_ctxp;
4635 __perf_counter_task_sched_out(child_ctx); 4634 __perf_event_task_sched_out(child_ctx);
4636 4635
4637 /* 4636 /*
4638 * Take the context lock here so that if find_get_context is 4637 * Take the context lock here so that if find_get_context is
4639 * reading child->perf_counter_ctxp, we wait until it has 4638 * reading child->perf_event_ctxp, we wait until it has
4640 * incremented the context's refcount before we do put_ctx below. 4639 * incremented the context's refcount before we do put_ctx below.
4641 */ 4640 */
4642 spin_lock(&child_ctx->lock); 4641 spin_lock(&child_ctx->lock);
4643 child->perf_counter_ctxp = NULL; 4642 child->perf_event_ctxp = NULL;
4644 /* 4643 /*
4645 * If this context is a clone; unclone it so it can't get 4644 * If this context is a clone; unclone it so it can't get
4646 * swapped to another process while we're removing all 4645 * swapped to another process while we're removing all
4647 * the counters from it. 4646 * the events from it.
4648 */ 4647 */
4649 unclone_ctx(child_ctx); 4648 unclone_ctx(child_ctx);
4650 spin_unlock_irqrestore(&child_ctx->lock, flags); 4649 spin_unlock_irqrestore(&child_ctx->lock, flags);
4651 4650
4652 /* 4651 /*
4653 * Report the task dead after unscheduling the counters so that we 4652 * Report the task dead after unscheduling the events so that we
4654 * won't get any samples after PERF_EVENT_EXIT. We can however still 4653 * won't get any samples after PERF_RECORD_EXIT. We can however still
4655 * get a few PERF_EVENT_READ events. 4654 * get a few PERF_RECORD_READ events.
4656 */ 4655 */
4657 perf_counter_task(child, child_ctx, 0); 4656 perf_event_task(child, child_ctx, 0);
4658 4657
4659 /* 4658 /*
4660 * We can recurse on the same lock type through: 4659 * We can recurse on the same lock type through:
4661 * 4660 *
4662 * __perf_counter_exit_task() 4661 * __perf_event_exit_task()
4663 * sync_child_counter() 4662 * sync_child_event()
4664 * fput(parent_counter->filp) 4663 * fput(parent_event->filp)
4665 * perf_release() 4664 * perf_release()
4666 * mutex_lock(&ctx->mutex) 4665 * mutex_lock(&ctx->mutex)
4667 * 4666 *
@@ -4670,16 +4669,16 @@ void perf_counter_exit_task(struct task_struct *child)
4670 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 4669 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4671 4670
4672again: 4671again:
4673 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, 4672 list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
4674 list_entry) 4673 group_entry)
4675 __perf_counter_exit_task(child_counter, child_ctx, child); 4674 __perf_event_exit_task(child_event, child_ctx, child);
4676 4675
4677 /* 4676 /*
4678 * If the last counter was a group counter, it will have appended all 4677 * If the last event was a group event, it will have appended all
4679 * its siblings to the list, but we obtained 'tmp' before that which 4678 * its siblings to the list, but we obtained 'tmp' before that which
4680 * will still point to the list head terminating the iteration. 4679 * will still point to the list head terminating the iteration.
4681 */ 4680 */
4682 if (!list_empty(&child_ctx->counter_list)) 4681 if (!list_empty(&child_ctx->group_list))
4683 goto again; 4682 goto again;
4684 4683
4685 mutex_unlock(&child_ctx->mutex); 4684 mutex_unlock(&child_ctx->mutex);
@@ -4691,33 +4690,33 @@ again:
4691 * free an unexposed, unused context as created by inheritance by 4690 * free an unexposed, unused context as created by inheritance by
4692 * init_task below, used by fork() in case of fail. 4691 * init_task below, used by fork() in case of fail.
4693 */ 4692 */
4694void perf_counter_free_task(struct task_struct *task) 4693void perf_event_free_task(struct task_struct *task)
4695{ 4694{
4696 struct perf_counter_context *ctx = task->perf_counter_ctxp; 4695 struct perf_event_context *ctx = task->perf_event_ctxp;
4697 struct perf_counter *counter, *tmp; 4696 struct perf_event *event, *tmp;
4698 4697
4699 if (!ctx) 4698 if (!ctx)
4700 return; 4699 return;
4701 4700
4702 mutex_lock(&ctx->mutex); 4701 mutex_lock(&ctx->mutex);
4703again: 4702again:
4704 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { 4703 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
4705 struct perf_counter *parent = counter->parent; 4704 struct perf_event *parent = event->parent;
4706 4705
4707 if (WARN_ON_ONCE(!parent)) 4706 if (WARN_ON_ONCE(!parent))
4708 continue; 4707 continue;
4709 4708
4710 mutex_lock(&parent->child_mutex); 4709 mutex_lock(&parent->child_mutex);
4711 list_del_init(&counter->child_list); 4710 list_del_init(&event->child_list);
4712 mutex_unlock(&parent->child_mutex); 4711 mutex_unlock(&parent->child_mutex);
4713 4712
4714 fput(parent->filp); 4713 fput(parent->filp);
4715 4714
4716 list_del_counter(counter, ctx); 4715 list_del_event(event, ctx);
4717 free_counter(counter); 4716 free_event(event);
4718 } 4717 }
4719 4718
4720 if (!list_empty(&ctx->counter_list)) 4719 if (!list_empty(&ctx->group_list))
4721 goto again; 4720 goto again;
4722 4721
4723 mutex_unlock(&ctx->mutex); 4722 mutex_unlock(&ctx->mutex);
@@ -4726,37 +4725,37 @@ again:
4726} 4725}
4727 4726
4728/* 4727/*
4729 * Initialize the perf_counter context in task_struct 4728 * Initialize the perf_event context in task_struct
4730 */ 4729 */
4731int perf_counter_init_task(struct task_struct *child) 4730int perf_event_init_task(struct task_struct *child)
4732{ 4731{
4733 struct perf_counter_context *child_ctx, *parent_ctx; 4732 struct perf_event_context *child_ctx, *parent_ctx;
4734 struct perf_counter_context *cloned_ctx; 4733 struct perf_event_context *cloned_ctx;
4735 struct perf_counter *counter; 4734 struct perf_event *event;
4736 struct task_struct *parent = current; 4735 struct task_struct *parent = current;
4737 int inherited_all = 1; 4736 int inherited_all = 1;
4738 int ret = 0; 4737 int ret = 0;
4739 4738
4740 child->perf_counter_ctxp = NULL; 4739 child->perf_event_ctxp = NULL;
4741 4740
4742 mutex_init(&child->perf_counter_mutex); 4741 mutex_init(&child->perf_event_mutex);
4743 INIT_LIST_HEAD(&child->perf_counter_list); 4742 INIT_LIST_HEAD(&child->perf_event_list);
4744 4743
4745 if (likely(!parent->perf_counter_ctxp)) 4744 if (likely(!parent->perf_event_ctxp))
4746 return 0; 4745 return 0;
4747 4746
4748 /* 4747 /*
4749 * This is executed from the parent task context, so inherit 4748 * This is executed from the parent task context, so inherit
4750 * counters that have been marked for cloning. 4749 * events that have been marked for cloning.
4751 * First allocate and initialize a context for the child. 4750 * First allocate and initialize a context for the child.
4752 */ 4751 */
4753 4752
4754 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 4753 child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4755 if (!child_ctx) 4754 if (!child_ctx)
4756 return -ENOMEM; 4755 return -ENOMEM;
4757 4756
4758 __perf_counter_init_context(child_ctx, child); 4757 __perf_event_init_context(child_ctx, child);
4759 child->perf_counter_ctxp = child_ctx; 4758 child->perf_event_ctxp = child_ctx;
4760 get_task_struct(child); 4759 get_task_struct(child);
4761 4760
4762 /* 4761 /*
@@ -4782,16 +4781,16 @@ int perf_counter_init_task(struct task_struct *child)
4782 * We dont have to disable NMIs - we are only looking at 4781 * We dont have to disable NMIs - we are only looking at
4783 * the list, not manipulating it: 4782 * the list, not manipulating it:
4784 */ 4783 */
4785 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { 4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
4786 if (counter != counter->group_leader) 4785 if (event != event->group_leader)
4787 continue; 4786 continue;
4788 4787
4789 if (!counter->attr.inherit) { 4788 if (!event->attr.inherit) {
4790 inherited_all = 0; 4789 inherited_all = 0;
4791 continue; 4790 continue;
4792 } 4791 }
4793 4792
4794 ret = inherit_group(counter, parent, parent_ctx, 4793 ret = inherit_group(event, parent, parent_ctx,
4795 child, child_ctx); 4794 child, child_ctx);
4796 if (ret) { 4795 if (ret) {
4797 inherited_all = 0; 4796 inherited_all = 0;
@@ -4805,7 +4804,7 @@ int perf_counter_init_task(struct task_struct *child)
4805 * context, or of whatever the parent is a clone of. 4804 * context, or of whatever the parent is a clone of.
4806 * Note that if the parent is a clone, it could get 4805 * Note that if the parent is a clone, it could get
4807 * uncloned at any point, but that doesn't matter 4806 * uncloned at any point, but that doesn't matter
4808 * because the list of counters and the generation 4807 * because the list of events and the generation
4809 * count can't have changed since we took the mutex. 4808 * count can't have changed since we took the mutex.
4810 */ 4809 */
4811 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); 4810 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
@@ -4826,41 +4825,41 @@ int perf_counter_init_task(struct task_struct *child)
4826 return ret; 4825 return ret;
4827} 4826}
4828 4827
4829static void __cpuinit perf_counter_init_cpu(int cpu) 4828static void __cpuinit perf_event_init_cpu(int cpu)
4830{ 4829{
4831 struct perf_cpu_context *cpuctx; 4830 struct perf_cpu_context *cpuctx;
4832 4831
4833 cpuctx = &per_cpu(perf_cpu_context, cpu); 4832 cpuctx = &per_cpu(perf_cpu_context, cpu);
4834 __perf_counter_init_context(&cpuctx->ctx, NULL); 4833 __perf_event_init_context(&cpuctx->ctx, NULL);
4835 4834
4836 spin_lock(&perf_resource_lock); 4835 spin_lock(&perf_resource_lock);
4837 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 4836 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4838 spin_unlock(&perf_resource_lock); 4837 spin_unlock(&perf_resource_lock);
4839 4838
4840 hw_perf_counter_setup(cpu); 4839 hw_perf_event_setup(cpu);
4841} 4840}
4842 4841
4843#ifdef CONFIG_HOTPLUG_CPU 4842#ifdef CONFIG_HOTPLUG_CPU
4844static void __perf_counter_exit_cpu(void *info) 4843static void __perf_event_exit_cpu(void *info)
4845{ 4844{
4846 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 4845 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4847 struct perf_counter_context *ctx = &cpuctx->ctx; 4846 struct perf_event_context *ctx = &cpuctx->ctx;
4848 struct perf_counter *counter, *tmp; 4847 struct perf_event *event, *tmp;
4849 4848
4850 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) 4849 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
4851 __perf_counter_remove_from_context(counter); 4850 __perf_event_remove_from_context(event);
4852} 4851}
4853static void perf_counter_exit_cpu(int cpu) 4852static void perf_event_exit_cpu(int cpu)
4854{ 4853{
4855 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 4854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4856 struct perf_counter_context *ctx = &cpuctx->ctx; 4855 struct perf_event_context *ctx = &cpuctx->ctx;
4857 4856
4858 mutex_lock(&ctx->mutex); 4857 mutex_lock(&ctx->mutex);
4859 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); 4858 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
4860 mutex_unlock(&ctx->mutex); 4859 mutex_unlock(&ctx->mutex);
4861} 4860}
4862#else 4861#else
4863static inline void perf_counter_exit_cpu(int cpu) { } 4862static inline void perf_event_exit_cpu(int cpu) { }
4864#endif 4863#endif
4865 4864
4866static int __cpuinit 4865static int __cpuinit
@@ -4872,17 +4871,17 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4872 4871
4873 case CPU_UP_PREPARE: 4872 case CPU_UP_PREPARE:
4874 case CPU_UP_PREPARE_FROZEN: 4873 case CPU_UP_PREPARE_FROZEN:
4875 perf_counter_init_cpu(cpu); 4874 perf_event_init_cpu(cpu);
4876 break; 4875 break;
4877 4876
4878 case CPU_ONLINE: 4877 case CPU_ONLINE:
4879 case CPU_ONLINE_FROZEN: 4878 case CPU_ONLINE_FROZEN:
4880 hw_perf_counter_setup_online(cpu); 4879 hw_perf_event_setup_online(cpu);
4881 break; 4880 break;
4882 4881
4883 case CPU_DOWN_PREPARE: 4882 case CPU_DOWN_PREPARE:
4884 case CPU_DOWN_PREPARE_FROZEN: 4883 case CPU_DOWN_PREPARE_FROZEN:
4885 perf_counter_exit_cpu(cpu); 4884 perf_event_exit_cpu(cpu);
4886 break; 4885 break;
4887 4886
4888 default: 4887 default:
@@ -4900,7 +4899,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
4900 .priority = 20, 4899 .priority = 20,
4901}; 4900};
4902 4901
4903void __init perf_counter_init(void) 4902void __init perf_event_init(void)
4904{ 4903{
4905 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4904 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4906 (void *)(long)smp_processor_id()); 4905 (void *)(long)smp_processor_id());
@@ -4926,7 +4925,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4926 err = strict_strtoul(buf, 10, &val); 4925 err = strict_strtoul(buf, 10, &val);
4927 if (err) 4926 if (err)
4928 return err; 4927 return err;
4929 if (val > perf_max_counters) 4928 if (val > perf_max_events)
4930 return -EINVAL; 4929 return -EINVAL;
4931 4930
4932 spin_lock(&perf_resource_lock); 4931 spin_lock(&perf_resource_lock);
@@ -4934,8 +4933,8 @@ perf_set_reserve_percpu(struct sysdev_class *class,
4934 for_each_online_cpu(cpu) { 4933 for_each_online_cpu(cpu) {
4935 cpuctx = &per_cpu(perf_cpu_context, cpu); 4934 cpuctx = &per_cpu(perf_cpu_context, cpu);
4936 spin_lock_irq(&cpuctx->ctx.lock); 4935 spin_lock_irq(&cpuctx->ctx.lock);
4937 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, 4936 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
4938 perf_max_counters - perf_reserved_percpu); 4937 perf_max_events - perf_reserved_percpu);
4939 cpuctx->max_pertask = mpt; 4938 cpuctx->max_pertask = mpt;
4940 spin_unlock_irq(&cpuctx->ctx.lock); 4939 spin_unlock_irq(&cpuctx->ctx.lock);
4941 } 4940 }
@@ -4990,12 +4989,12 @@ static struct attribute *perfclass_attrs[] = {
4990 4989
4991static struct attribute_group perfclass_attr_group = { 4990static struct attribute_group perfclass_attr_group = {
4992 .attrs = perfclass_attrs, 4991 .attrs = perfclass_attrs,
4993 .name = "perf_counters", 4992 .name = "perf_events",
4994}; 4993};
4995 4994
4996static int __init perf_counter_sysfs_init(void) 4995static int __init perf_event_sysfs_init(void)
4997{ 4996{
4998 return sysfs_create_group(&cpu_sysdev_class.kset.kobj, 4997 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4999 &perfclass_attr_group); 4998 &perfclass_attr_group);
5000} 4999}
5001device_initcall(perf_counter_sysfs_init); 5000device_initcall(perf_event_sysfs_init);
diff --git a/kernel/sched.c b/kernel/sched.c
index 830967e18285..91843ba7f237 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2053 if (task_hot(p, old_rq->clock, NULL)) 2053 if (task_hot(p, old_rq->clock, NULL))
2054 schedstat_inc(p, se.nr_forced2_migrations); 2054 schedstat_inc(p, se.nr_forced2_migrations);
2055#endif 2055#endif
2056 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2056 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2057 1, 1, NULL, 0); 2057 1, 1, NULL, 0);
2058 } 2058 }
2059 p->se.vruntime -= old_cfsrq->min_vruntime - 2059 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2718,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2718 */ 2718 */
2719 prev_state = prev->state; 2719 prev_state = prev->state;
2720 finish_arch_switch(prev); 2720 finish_arch_switch(prev);
2721 perf_counter_task_sched_in(current, cpu_of(rq)); 2721 perf_event_task_sched_in(current, cpu_of(rq));
2722 finish_lock_switch(rq, prev); 2722 finish_lock_switch(rq, prev);
2723 2723
2724 fire_sched_in_preempt_notifiers(current); 2724 fire_sched_in_preempt_notifiers(current);
@@ -5193,7 +5193,7 @@ void scheduler_tick(void)
5193 curr->sched_class->task_tick(rq, curr, 0); 5193 curr->sched_class->task_tick(rq, curr, 0);
5194 spin_unlock(&rq->lock); 5194 spin_unlock(&rq->lock);
5195 5195
5196 perf_counter_task_tick(curr, cpu); 5196 perf_event_task_tick(curr, cpu);
5197 5197
5198#ifdef CONFIG_SMP 5198#ifdef CONFIG_SMP
5199 rq->idle_at_tick = idle_cpu(cpu); 5199 rq->idle_at_tick = idle_cpu(cpu);
@@ -5409,7 +5409,7 @@ need_resched_nonpreemptible:
5409 5409
5410 if (likely(prev != next)) { 5410 if (likely(prev != next)) {
5411 sched_info_switch(prev, next); 5411 sched_info_switch(prev, next);
5412 perf_counter_task_sched_out(prev, next, cpu); 5412 perf_event_task_sched_out(prev, next, cpu);
5413 5413
5414 rq->nr_switches++; 5414 rq->nr_switches++;
5415 rq->curr = next; 5415 rq->curr = next;
@@ -7671,7 +7671,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7671/* 7671/*
7672 * Register at high priority so that task migration (migrate_all_tasks) 7672 * Register at high priority so that task migration (migrate_all_tasks)
7673 * happens before everything else. This has to be lower priority than 7673 * happens before everything else. This has to be lower priority than
7674 * the notifier in the perf_counter subsystem, though. 7674 * the notifier in the perf_event subsystem, though.
7675 */ 7675 */
7676static struct notifier_block __cpuinitdata migration_notifier = { 7676static struct notifier_block __cpuinitdata migration_notifier = {
7677 .notifier_call = migration_call, 7677 .notifier_call = migration_call,
@@ -9528,7 +9528,7 @@ void __init sched_init(void)
9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9529#endif /* SMP */ 9529#endif /* SMP */
9530 9530
9531 perf_counter_init(); 9531 perf_event_init();
9532 9532
9533 scheduler_running = 1; 9533 scheduler_running = 1;
9534} 9534}
diff --git a/kernel/sys.c b/kernel/sys.c
index b3f1097c76fa..ea5c3bcac881 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,7 +14,7 @@
14#include <linux/prctl.h> 14#include <linux/prctl.h>
15#include <linux/highuid.h> 15#include <linux/highuid.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/perf_counter.h> 17#include <linux/perf_event.h>
18#include <linux/resource.h> 18#include <linux/resource.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
@@ -1511,11 +1511,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1511 case PR_SET_TSC: 1511 case PR_SET_TSC:
1512 error = SET_TSC_CTL(arg2); 1512 error = SET_TSC_CTL(arg2);
1513 break; 1513 break;
1514 case PR_TASK_PERF_COUNTERS_DISABLE: 1514 case PR_TASK_PERF_EVENTS_DISABLE:
1515 error = perf_counter_task_disable(); 1515 error = perf_event_task_disable();
1516 break; 1516 break;
1517 case PR_TASK_PERF_COUNTERS_ENABLE: 1517 case PR_TASK_PERF_EVENTS_ENABLE:
1518 error = perf_counter_task_enable(); 1518 error = perf_event_task_enable();
1519 break; 1519 break;
1520 case PR_GET_TIMERSLACK: 1520 case PR_GET_TIMERSLACK:
1521 error = current->timer_slack_ns; 1521 error = current->timer_slack_ns;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 68320f6b07b5..515bc230ac2a 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -177,4 +177,4 @@ cond_syscall(sys_eventfd);
177cond_syscall(sys_eventfd2); 177cond_syscall(sys_eventfd2);
178 178
179/* performance counters: */ 179/* performance counters: */
180cond_syscall(sys_perf_counter_open); 180cond_syscall(sys_perf_event_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1a631ba684a4..6ba49c7cb128 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,7 +50,7 @@
50#include <linux/reboot.h> 50#include <linux/reboot.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/slow-work.h> 52#include <linux/slow-work.h>
53#include <linux/perf_counter.h> 53#include <linux/perf_event.h>
54 54
55#include <asm/uaccess.h> 55#include <asm/uaccess.h>
56#include <asm/processor.h> 56#include <asm/processor.h>
@@ -964,28 +964,28 @@ static struct ctl_table kern_table[] = {
964 .child = slow_work_sysctls, 964 .child = slow_work_sysctls,
965 }, 965 },
966#endif 966#endif
967#ifdef CONFIG_PERF_COUNTERS 967#ifdef CONFIG_PERF_EVENTS
968 { 968 {
969 .ctl_name = CTL_UNNUMBERED, 969 .ctl_name = CTL_UNNUMBERED,
970 .procname = "perf_counter_paranoid", 970 .procname = "perf_event_paranoid",
971 .data = &sysctl_perf_counter_paranoid, 971 .data = &sysctl_perf_event_paranoid,
972 .maxlen = sizeof(sysctl_perf_counter_paranoid), 972 .maxlen = sizeof(sysctl_perf_event_paranoid),
973 .mode = 0644, 973 .mode = 0644,
974 .proc_handler = &proc_dointvec, 974 .proc_handler = &proc_dointvec,
975 }, 975 },
976 { 976 {
977 .ctl_name = CTL_UNNUMBERED, 977 .ctl_name = CTL_UNNUMBERED,
978 .procname = "perf_counter_mlock_kb", 978 .procname = "perf_event_mlock_kb",
979 .data = &sysctl_perf_counter_mlock, 979 .data = &sysctl_perf_event_mlock,
980 .maxlen = sizeof(sysctl_perf_counter_mlock), 980 .maxlen = sizeof(sysctl_perf_event_mlock),
981 .mode = 0644, 981 .mode = 0644,
982 .proc_handler = &proc_dointvec, 982 .proc_handler = &proc_dointvec,
983 }, 983 },
984 { 984 {
985 .ctl_name = CTL_UNNUMBERED, 985 .ctl_name = CTL_UNNUMBERED,
986 .procname = "perf_counter_max_sample_rate", 986 .procname = "perf_event_max_sample_rate",
987 .data = &sysctl_perf_counter_sample_rate, 987 .data = &sysctl_perf_event_sample_rate,
988 .maxlen = sizeof(sysctl_perf_counter_sample_rate), 988 .maxlen = sizeof(sysctl_perf_event_sample_rate),
989 .mode = 0644, 989 .mode = 0644,
990 .proc_handler = &proc_dointvec, 990 .proc_handler = &proc_dointvec,
991 }, 991 },
diff --git a/kernel/timer.c b/kernel/timer.c
index bbb51074680e..811e5c391456 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_counter.h> 40#include <linux/perf_event.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42 42
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -1187,7 +1187,7 @@ static void run_timer_softirq(struct softirq_action *h)
1187{ 1187{
1188 struct tvec_base *base = __get_cpu_var(tvec_bases); 1188 struct tvec_base *base = __get_cpu_var(tvec_bases);
1189 1189
1190 perf_counter_do_pending(); 1190 perf_event_do_pending();
1191 1191
1192 hrtimer_run_pending(); 1192 hrtimer_run_pending();
1193 1193
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7a3550cf2597..9fbce6c9d2e1 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
2#include <trace/events/syscalls.h> 2#include <trace/events/syscalls.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/ftrace.h> 4#include <linux/ftrace.h>
5#include <linux/perf_counter.h> 5#include <linux/perf_event.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8#include "trace_output.h" 8#include "trace_output.h"
@@ -433,7 +433,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
433 rec->nr = syscall_nr; 433 rec->nr = syscall_nr;
434 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 434 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
435 (unsigned long *)&rec->args); 435 (unsigned long *)&rec->args);
436 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); 436 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
437 437
438end: 438end:
439 local_irq_restore(flags); 439 local_irq_restore(flags);
@@ -532,7 +532,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
532 rec->nr = syscall_nr; 532 rec->nr = syscall_nr;
533 rec->ret = syscall_get_return_value(current, regs); 533 rec->ret = syscall_get_return_value(current, regs);
534 534
535 perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size); 535 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
536 536
537end: 537end:
538 local_irq_restore(flags); 538 local_irq_restore(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
index 26892e346d8f..376492ed08f4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -28,7 +28,7 @@
28#include <linux/mempolicy.h> 28#include <linux/mempolicy.h>
29#include <linux/rmap.h> 29#include <linux/rmap.h>
30#include <linux/mmu_notifier.h> 30#include <linux/mmu_notifier.h>
31#include <linux/perf_counter.h> 31#include <linux/perf_event.h>
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
@@ -1220,7 +1220,7 @@ munmap_back:
1220 if (correct_wcount) 1220 if (correct_wcount)
1221 atomic_inc(&inode->i_writecount); 1221 atomic_inc(&inode->i_writecount);
1222out: 1222out:
1223 perf_counter_mmap(vma); 1223 perf_event_mmap(vma);
1224 1224
1225 mm->total_vm += len >> PAGE_SHIFT; 1225 mm->total_vm += len >> PAGE_SHIFT;
1226 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 1226 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -2308,7 +2308,7 @@ int install_special_mapping(struct mm_struct *mm,
2308 2308
2309 mm->total_vm += len >> PAGE_SHIFT; 2309 mm->total_vm += len >> PAGE_SHIFT;
2310 2310
2311 perf_counter_mmap(vma); 2311 perf_event_mmap(vma);
2312 2312
2313 return 0; 2313 return 0;
2314} 2314}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index d80311baeb2d..8bc969d8112d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -23,7 +23,7 @@
23#include <linux/swapops.h> 23#include <linux/swapops.h>
24#include <linux/mmu_notifier.h> 24#include <linux/mmu_notifier.h>
25#include <linux/migrate.h> 25#include <linux/migrate.h>
26#include <linux/perf_counter.h> 26#include <linux/perf_event.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/pgtable.h> 28#include <asm/pgtable.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
@@ -300,7 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
301 if (error) 301 if (error)
302 goto out; 302 goto out;
303 perf_counter_mmap(vma); 303 perf_event_mmap(vma);
304 nstart = tmp; 304 nstart = tmp;
305 305
306 if (nstart < prev->vm_end) 306 if (nstart < prev->vm_end)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 0aba8b6e9c54..b5f1953b6144 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -318,7 +318,7 @@ export PERL_PATH
318 318
319LIB_FILE=libperf.a 319LIB_FILE=libperf.a
320 320
321LIB_H += ../../include/linux/perf_counter.h 321LIB_H += ../../include/linux/perf_event.h
322LIB_H += ../../include/linux/rbtree.h 322LIB_H += ../../include/linux/rbtree.h
323LIB_H += ../../include/linux/list.h 323LIB_H += ../../include/linux/list.h
324LIB_H += util/include/linux/list.h 324LIB_H += util/include/linux/list.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 043d85b7e254..1ec741615814 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -505,7 +505,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
505 return -1; 505 return -1;
506 } 506 }
507 507
508 if (event->header.misc & PERF_EVENT_MISC_KERNEL) { 508 if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
509 show = SHOW_KERNEL; 509 show = SHOW_KERNEL;
510 level = 'k'; 510 level = 'k';
511 511
@@ -513,7 +513,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
513 513
514 dump_printf(" ...... dso: %s\n", dso->name); 514 dump_printf(" ...... dso: %s\n", dso->name);
515 515
516 } else if (event->header.misc & PERF_EVENT_MISC_USER) { 516 } else if (event->header.misc & PERF_RECORD_MISC_USER) {
517 517
518 show = SHOW_USER; 518 show = SHOW_USER;
519 level = '.'; 519 level = '.';
@@ -565,7 +565,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
565 565
566 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 566 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
567 567
568 dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", 568 dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
569 (void *)(offset + head), 569 (void *)(offset + head),
570 (void *)(long)(event->header.size), 570 (void *)(long)(event->header.size),
571 event->mmap.pid, 571 event->mmap.pid,
@@ -575,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
575 event->mmap.filename); 575 event->mmap.filename);
576 576
577 if (thread == NULL || map == NULL) { 577 if (thread == NULL || map == NULL) {
578 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 578 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
579 return 0; 579 return 0;
580 } 580 }
581 581
@@ -591,14 +591,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
591 struct thread *thread; 591 struct thread *thread;
592 592
593 thread = threads__findnew(event->comm.pid, &threads, &last_match); 593 thread = threads__findnew(event->comm.pid, &threads, &last_match);
594 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 594 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
595 (void *)(offset + head), 595 (void *)(offset + head),
596 (void *)(long)(event->header.size), 596 (void *)(long)(event->header.size),
597 event->comm.comm, event->comm.pid); 597 event->comm.comm, event->comm.pid);
598 598
599 if (thread == NULL || 599 if (thread == NULL ||
600 thread__set_comm(thread, event->comm.comm)) { 600 thread__set_comm(thread, event->comm.comm)) {
601 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 601 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
602 return -1; 602 return -1;
603 } 603 }
604 total_comm++; 604 total_comm++;
@@ -614,7 +614,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
614 614
615 thread = threads__findnew(event->fork.pid, &threads, &last_match); 615 thread = threads__findnew(event->fork.pid, &threads, &last_match);
616 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 616 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
617 dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 617 dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
618 (void *)(offset + head), 618 (void *)(offset + head),
619 (void *)(long)(event->header.size), 619 (void *)(long)(event->header.size),
620 event->fork.pid, event->fork.ppid); 620 event->fork.pid, event->fork.ppid);
@@ -627,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
627 return 0; 627 return 0;
628 628
629 if (!thread || !parent || thread__fork(thread, parent)) { 629 if (!thread || !parent || thread__fork(thread, parent)) {
630 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 630 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
631 return -1; 631 return -1;
632 } 632 }
633 total_fork++; 633 total_fork++;
@@ -639,23 +639,23 @@ static int
639process_event(event_t *event, unsigned long offset, unsigned long head) 639process_event(event_t *event, unsigned long offset, unsigned long head)
640{ 640{
641 switch (event->header.type) { 641 switch (event->header.type) {
642 case PERF_EVENT_SAMPLE: 642 case PERF_RECORD_SAMPLE:
643 return process_sample_event(event, offset, head); 643 return process_sample_event(event, offset, head);
644 644
645 case PERF_EVENT_MMAP: 645 case PERF_RECORD_MMAP:
646 return process_mmap_event(event, offset, head); 646 return process_mmap_event(event, offset, head);
647 647
648 case PERF_EVENT_COMM: 648 case PERF_RECORD_COMM:
649 return process_comm_event(event, offset, head); 649 return process_comm_event(event, offset, head);
650 650
651 case PERF_EVENT_FORK: 651 case PERF_RECORD_FORK:
652 return process_fork_event(event, offset, head); 652 return process_fork_event(event, offset, head);
653 /* 653 /*
654 * We dont process them right now but they are fine: 654 * We dont process them right now but they are fine:
655 */ 655 */
656 656
657 case PERF_EVENT_THROTTLE: 657 case PERF_RECORD_THROTTLE:
658 case PERF_EVENT_UNTHROTTLE: 658 case PERF_RECORD_UNTHROTTLE:
659 return 0; 659 return 0;
660 660
661 default: 661 default:
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 2459e5a22ed8..a5a050af8e7d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -77,7 +77,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
77 77
78static unsigned long mmap_read_head(struct mmap_data *md) 78static unsigned long mmap_read_head(struct mmap_data *md)
79{ 79{
80 struct perf_counter_mmap_page *pc = md->base; 80 struct perf_event_mmap_page *pc = md->base;
81 long head; 81 long head;
82 82
83 head = pc->data_head; 83 head = pc->data_head;
@@ -88,7 +88,7 @@ static unsigned long mmap_read_head(struct mmap_data *md)
88 88
89static void mmap_write_tail(struct mmap_data *md, unsigned long tail) 89static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
90{ 90{
91 struct perf_counter_mmap_page *pc = md->base; 91 struct perf_event_mmap_page *pc = md->base;
92 92
93 /* 93 /*
94 * ensure all reads are done before we write the tail out. 94 * ensure all reads are done before we write the tail out.
@@ -233,7 +233,7 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full)
233 } 233 }
234 } 234 }
235 235
236 comm_ev.header.type = PERF_EVENT_COMM; 236 comm_ev.header.type = PERF_RECORD_COMM;
237 size = ALIGN(size, sizeof(u64)); 237 size = ALIGN(size, sizeof(u64));
238 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); 238 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size);
239 239
@@ -288,7 +288,7 @@ static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid)
288 while (1) { 288 while (1) {
289 char bf[BUFSIZ], *pbf = bf; 289 char bf[BUFSIZ], *pbf = bf;
290 struct mmap_event mmap_ev = { 290 struct mmap_event mmap_ev = {
291 .header = { .type = PERF_EVENT_MMAP }, 291 .header = { .type = PERF_RECORD_MMAP },
292 }; 292 };
293 int n; 293 int n;
294 size_t size; 294 size_t size;
@@ -355,7 +355,7 @@ static void synthesize_all(void)
355 355
356static int group_fd; 356static int group_fd;
357 357
358static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr) 358static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
359{ 359{
360 struct perf_header_attr *h_attr; 360 struct perf_header_attr *h_attr;
361 361
@@ -371,7 +371,7 @@ static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int
371 371
372static void create_counter(int counter, int cpu, pid_t pid) 372static void create_counter(int counter, int cpu, pid_t pid)
373{ 373{
374 struct perf_counter_attr *attr = attrs + counter; 374 struct perf_event_attr *attr = attrs + counter;
375 struct perf_header_attr *h_attr; 375 struct perf_header_attr *h_attr;
376 int track = !counter; /* only the first counter needs these */ 376 int track = !counter; /* only the first counter needs these */
377 struct { 377 struct {
@@ -417,7 +417,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
417 attr->disabled = 1; 417 attr->disabled = 1;
418 418
419try_again: 419try_again:
420 fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); 420 fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
421 421
422 if (fd[nr_cpu][counter] < 0) { 422 if (fd[nr_cpu][counter] < 0) {
423 int err = errno; 423 int err = errno;
@@ -444,7 +444,7 @@ try_again:
444 printf("\n"); 444 printf("\n");
445 error("perfcounter syscall returned with %d (%s)\n", 445 error("perfcounter syscall returned with %d (%s)\n",
446 fd[nr_cpu][counter], strerror(err)); 446 fd[nr_cpu][counter], strerror(err));
447 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 447 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
448 exit(-1); 448 exit(-1);
449 } 449 }
450 450
@@ -478,7 +478,7 @@ try_again:
478 if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { 478 if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
479 int ret; 479 int ret;
480 480
481 ret = ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_SET_OUTPUT, multiplex_fd); 481 ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
482 assert(ret != -1); 482 assert(ret != -1);
483 } else { 483 } else {
484 event_array[nr_poll].fd = fd[nr_cpu][counter]; 484 event_array[nr_poll].fd = fd[nr_cpu][counter];
@@ -496,7 +496,7 @@ try_again:
496 } 496 }
497 } 497 }
498 498
499 ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE); 499 ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
500} 500}
501 501
502static void open_counters(int cpu, pid_t pid) 502static void open_counters(int cpu, pid_t pid)
@@ -642,7 +642,7 @@ static int __cmd_record(int argc, const char **argv)
642 if (done) { 642 if (done) {
643 for (i = 0; i < nr_cpu; i++) { 643 for (i = 0; i < nr_cpu; i++) {
644 for (counter = 0; counter < nr_counters; counter++) 644 for (counter = 0; counter < nr_counters; counter++)
645 ioctl(fd[i][counter], PERF_COUNTER_IOC_DISABLE); 645 ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE);
646 } 646 }
647 } 647 }
648 } 648 }
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cdf9a8d27bb9..19669c20088e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1121,7 +1121,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1121 more_data += sizeof(u64); 1121 more_data += sizeof(u64);
1122 } 1122 }
1123 1123
1124 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1124 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1125 (void *)(offset + head), 1125 (void *)(offset + head),
1126 (void *)(long)(event->header.size), 1126 (void *)(long)(event->header.size),
1127 event->header.misc, 1127 event->header.misc,
@@ -1158,9 +1158,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm)) 1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm))
1159 return 0; 1159 return 0;
1160 1160
1161 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1161 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1162 1162
1163 if (cpumode == PERF_EVENT_MISC_KERNEL) { 1163 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1164 show = SHOW_KERNEL; 1164 show = SHOW_KERNEL;
1165 level = 'k'; 1165 level = 'k';
1166 1166
@@ -1168,7 +1168,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1168 1168
1169 dump_printf(" ...... dso: %s\n", dso->name); 1169 dump_printf(" ...... dso: %s\n", dso->name);
1170 1170
1171 } else if (cpumode == PERF_EVENT_MISC_USER) { 1171 } else if (cpumode == PERF_RECORD_MISC_USER) {
1172 1172
1173 show = SHOW_USER; 1173 show = SHOW_USER;
1174 level = '.'; 1174 level = '.';
@@ -1210,7 +1210,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1210 1210
1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
1212 1212
1213 dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1213 dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
1214 (void *)(offset + head), 1214 (void *)(offset + head),
1215 (void *)(long)(event->header.size), 1215 (void *)(long)(event->header.size),
1216 event->mmap.pid, 1216 event->mmap.pid,
@@ -1221,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1221 event->mmap.filename); 1221 event->mmap.filename);
1222 1222
1223 if (thread == NULL || map == NULL) { 1223 if (thread == NULL || map == NULL) {
1224 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 1224 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1225 return 0; 1225 return 0;
1226 } 1226 }
1227 1227
@@ -1238,14 +1238,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1238 1238
1239 thread = threads__findnew(event->comm.pid, &threads, &last_match); 1239 thread = threads__findnew(event->comm.pid, &threads, &last_match);
1240 1240
1241 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1241 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
1242 (void *)(offset + head), 1242 (void *)(offset + head),
1243 (void *)(long)(event->header.size), 1243 (void *)(long)(event->header.size),
1244 event->comm.comm, event->comm.pid); 1244 event->comm.comm, event->comm.pid);
1245 1245
1246 if (thread == NULL || 1246 if (thread == NULL ||
1247 thread__set_comm_adjust(thread, event->comm.comm)) { 1247 thread__set_comm_adjust(thread, event->comm.comm)) {
1248 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 1248 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
1249 return -1; 1249 return -1;
1250 } 1250 }
1251 total_comm++; 1251 total_comm++;
@@ -1262,10 +1262,10 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1262 thread = threads__findnew(event->fork.pid, &threads, &last_match); 1262 thread = threads__findnew(event->fork.pid, &threads, &last_match);
1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
1264 1264
1265 dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1265 dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n",
1266 (void *)(offset + head), 1266 (void *)(offset + head),
1267 (void *)(long)(event->header.size), 1267 (void *)(long)(event->header.size),
1268 event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", 1268 event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT",
1269 event->fork.pid, event->fork.tid, 1269 event->fork.pid, event->fork.tid,
1270 event->fork.ppid, event->fork.ptid); 1270 event->fork.ppid, event->fork.ptid);
1271 1271
@@ -1276,11 +1276,11 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1276 if (thread == parent) 1276 if (thread == parent)
1277 return 0; 1277 return 0;
1278 1278
1279 if (event->header.type == PERF_EVENT_EXIT) 1279 if (event->header.type == PERF_RECORD_EXIT)
1280 return 0; 1280 return 0;
1281 1281
1282 if (!thread || !parent || thread__fork(thread, parent)) { 1282 if (!thread || !parent || thread__fork(thread, parent)) {
1283 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 1283 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1284 return -1; 1284 return -1;
1285 } 1285 }
1286 total_fork++; 1286 total_fork++;
@@ -1291,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1291static int 1291static int
1292process_lost_event(event_t *event, unsigned long offset, unsigned long head) 1292process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1293{ 1293{
1294 dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n", 1294 dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n",
1295 (void *)(offset + head), 1295 (void *)(offset + head),
1296 (void *)(long)(event->header.size), 1296 (void *)(long)(event->header.size),
1297 event->lost.id, 1297 event->lost.id,
@@ -1305,7 +1305,7 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1305static int 1305static int
1306process_read_event(event_t *event, unsigned long offset, unsigned long head) 1306process_read_event(event_t *event, unsigned long offset, unsigned long head)
1307{ 1307{
1308 struct perf_counter_attr *attr; 1308 struct perf_event_attr *attr;
1309 1309
1310 attr = perf_header__find_attr(event->read.id, header); 1310 attr = perf_header__find_attr(event->read.id, header);
1311 1311
@@ -1319,7 +1319,7 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head)
1319 event->read.value); 1319 event->read.value);
1320 } 1320 }
1321 1321
1322 dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n", 1322 dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n",
1323 (void *)(offset + head), 1323 (void *)(offset + head),
1324 (void *)(long)(event->header.size), 1324 (void *)(long)(event->header.size),
1325 event->read.pid, 1325 event->read.pid,
@@ -1337,31 +1337,31 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1337 trace_event(event); 1337 trace_event(event);
1338 1338
1339 switch (event->header.type) { 1339 switch (event->header.type) {
1340 case PERF_EVENT_SAMPLE: 1340 case PERF_RECORD_SAMPLE:
1341 return process_sample_event(event, offset, head); 1341 return process_sample_event(event, offset, head);
1342 1342
1343 case PERF_EVENT_MMAP: 1343 case PERF_RECORD_MMAP:
1344 return process_mmap_event(event, offset, head); 1344 return process_mmap_event(event, offset, head);
1345 1345
1346 case PERF_EVENT_COMM: 1346 case PERF_RECORD_COMM:
1347 return process_comm_event(event, offset, head); 1347 return process_comm_event(event, offset, head);
1348 1348
1349 case PERF_EVENT_FORK: 1349 case PERF_RECORD_FORK:
1350 case PERF_EVENT_EXIT: 1350 case PERF_RECORD_EXIT:
1351 return process_task_event(event, offset, head); 1351 return process_task_event(event, offset, head);
1352 1352
1353 case PERF_EVENT_LOST: 1353 case PERF_RECORD_LOST:
1354 return process_lost_event(event, offset, head); 1354 return process_lost_event(event, offset, head);
1355 1355
1356 case PERF_EVENT_READ: 1356 case PERF_RECORD_READ:
1357 return process_read_event(event, offset, head); 1357 return process_read_event(event, offset, head);
1358 1358
1359 /* 1359 /*
1360 * We dont process them right now but they are fine: 1360 * We dont process them right now but they are fine:
1361 */ 1361 */
1362 1362
1363 case PERF_EVENT_THROTTLE: 1363 case PERF_RECORD_THROTTLE:
1364 case PERF_EVENT_UNTHROTTLE: 1364 case PERF_RECORD_UNTHROTTLE:
1365 return 0; 1365 return 0;
1366 1366
1367 default: 1367 default:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 275d79c6627a..ea9c15c0cdfe 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1573,7 +1573,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1573 more_data += sizeof(u64); 1573 more_data += sizeof(u64);
1574 } 1574 }
1575 1575
1576 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1576 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1577 (void *)(offset + head), 1577 (void *)(offset + head),
1578 (void *)(long)(event->header.size), 1578 (void *)(long)(event->header.size),
1579 event->header.misc, 1579 event->header.misc,
@@ -1589,9 +1589,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1589 return -1; 1589 return -1;
1590 } 1590 }
1591 1591
1592 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1592 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1593 1593
1594 if (cpumode == PERF_EVENT_MISC_KERNEL) { 1594 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1595 show = SHOW_KERNEL; 1595 show = SHOW_KERNEL;
1596 level = 'k'; 1596 level = 'k';
1597 1597
@@ -1599,7 +1599,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1599 1599
1600 dump_printf(" ...... dso: %s\n", dso->name); 1600 dump_printf(" ...... dso: %s\n", dso->name);
1601 1601
1602 } else if (cpumode == PERF_EVENT_MISC_USER) { 1602 } else if (cpumode == PERF_RECORD_MISC_USER) {
1603 1603
1604 show = SHOW_USER; 1604 show = SHOW_USER;
1605 level = '.'; 1605 level = '.';
@@ -1626,23 +1626,23 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1626 1626
1627 nr_events++; 1627 nr_events++;
1628 switch (event->header.type) { 1628 switch (event->header.type) {
1629 case PERF_EVENT_MMAP: 1629 case PERF_RECORD_MMAP:
1630 return 0; 1630 return 0;
1631 case PERF_EVENT_LOST: 1631 case PERF_RECORD_LOST:
1632 nr_lost_chunks++; 1632 nr_lost_chunks++;
1633 nr_lost_events += event->lost.lost; 1633 nr_lost_events += event->lost.lost;
1634 return 0; 1634 return 0;
1635 1635
1636 case PERF_EVENT_COMM: 1636 case PERF_RECORD_COMM:
1637 return process_comm_event(event, offset, head); 1637 return process_comm_event(event, offset, head);
1638 1638
1639 case PERF_EVENT_EXIT ... PERF_EVENT_READ: 1639 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
1640 return 0; 1640 return 0;
1641 1641
1642 case PERF_EVENT_SAMPLE: 1642 case PERF_RECORD_SAMPLE:
1643 return process_sample_event(event, offset, head); 1643 return process_sample_event(event, offset, head);
1644 1644
1645 case PERF_EVENT_MAX: 1645 case PERF_RECORD_MAX:
1646 default: 1646 default:
1647 return -1; 1647 return -1;
1648 } 1648 }
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 61b828236c11..16af2d82e858 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -48,7 +48,7 @@
48#include <sys/prctl.h> 48#include <sys/prctl.h>
49#include <math.h> 49#include <math.h>
50 50
51static struct perf_counter_attr default_attrs[] = { 51static struct perf_event_attr default_attrs[] = {
52 52
53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, 54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -130,11 +130,11 @@ struct stats runtime_cycles_stats;
130 attrs[counter].config == PERF_COUNT_##c) 130 attrs[counter].config == PERF_COUNT_##c)
131 131
132#define ERR_PERF_OPEN \ 132#define ERR_PERF_OPEN \
133"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n" 133"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
134 134
135static void create_perf_stat_counter(int counter, int pid) 135static void create_perf_stat_counter(int counter, int pid)
136{ 136{
137 struct perf_counter_attr *attr = attrs + counter; 137 struct perf_event_attr *attr = attrs + counter;
138 138
139 if (scale) 139 if (scale)
140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -144,7 +144,7 @@ static void create_perf_stat_counter(int counter, int pid)
144 unsigned int cpu; 144 unsigned int cpu;
145 145
146 for (cpu = 0; cpu < nr_cpus; cpu++) { 146 for (cpu = 0; cpu < nr_cpus; cpu++) {
147 fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); 147 fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0);
148 if (fd[cpu][counter] < 0 && verbose) 148 if (fd[cpu][counter] < 0 && verbose)
149 fprintf(stderr, ERR_PERF_OPEN, counter, 149 fprintf(stderr, ERR_PERF_OPEN, counter,
150 fd[cpu][counter], strerror(errno)); 150 fd[cpu][counter], strerror(errno));
@@ -154,7 +154,7 @@ static void create_perf_stat_counter(int counter, int pid)
154 attr->disabled = 1; 154 attr->disabled = 1;
155 attr->enable_on_exec = 1; 155 attr->enable_on_exec = 1;
156 156
157 fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0); 157 fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0);
158 if (fd[0][counter] < 0 && verbose) 158 if (fd[0][counter] < 0 && verbose)
159 fprintf(stderr, ERR_PERF_OPEN, counter, 159 fprintf(stderr, ERR_PERF_OPEN, counter,
160 fd[0][counter], strerror(errno)); 160 fd[0][counter], strerror(errno));
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 600406396274..4405681b3134 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -937,21 +937,21 @@ process_event(event_t *event)
937 937
938 switch (event->header.type) { 938 switch (event->header.type) {
939 939
940 case PERF_EVENT_COMM: 940 case PERF_RECORD_COMM:
941 return process_comm_event(event); 941 return process_comm_event(event);
942 case PERF_EVENT_FORK: 942 case PERF_RECORD_FORK:
943 return process_fork_event(event); 943 return process_fork_event(event);
944 case PERF_EVENT_EXIT: 944 case PERF_RECORD_EXIT:
945 return process_exit_event(event); 945 return process_exit_event(event);
946 case PERF_EVENT_SAMPLE: 946 case PERF_RECORD_SAMPLE:
947 return queue_sample_event(event); 947 return queue_sample_event(event);
948 948
949 /* 949 /*
950 * We dont process them right now but they are fine: 950 * We dont process them right now but they are fine:
951 */ 951 */
952 case PERF_EVENT_MMAP: 952 case PERF_RECORD_MMAP:
953 case PERF_EVENT_THROTTLE: 953 case PERF_RECORD_THROTTLE:
954 case PERF_EVENT_UNTHROTTLE: 954 case PERF_RECORD_UNTHROTTLE:
955 return 0; 955 return 0;
956 956
957 default: 957 default:
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4002ccb36750..1ca88896eee4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -901,7 +901,7 @@ struct mmap_data {
901 901
902static unsigned int mmap_read_head(struct mmap_data *md) 902static unsigned int mmap_read_head(struct mmap_data *md)
903{ 903{
904 struct perf_counter_mmap_page *pc = md->base; 904 struct perf_event_mmap_page *pc = md->base;
905 int head; 905 int head;
906 906
907 head = pc->data_head; 907 head = pc->data_head;
@@ -977,9 +977,9 @@ static void mmap_read_counter(struct mmap_data *md)
977 977
978 old += size; 978 old += size;
979 979
980 if (event->header.type == PERF_EVENT_SAMPLE) { 980 if (event->header.type == PERF_RECORD_SAMPLE) {
981 int user = 981 int user =
982 (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; 982 (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER;
983 process_event(event->ip.ip, md->counter, user); 983 process_event(event->ip.ip, md->counter, user);
984 } 984 }
985 } 985 }
@@ -1005,7 +1005,7 @@ int group_fd;
1005 1005
1006static void start_counter(int i, int counter) 1006static void start_counter(int i, int counter)
1007{ 1007{
1008 struct perf_counter_attr *attr; 1008 struct perf_event_attr *attr;
1009 int cpu; 1009 int cpu;
1010 1010
1011 cpu = profile_cpu; 1011 cpu = profile_cpu;
@@ -1019,7 +1019,7 @@ static void start_counter(int i, int counter)
1019 attr->inherit = (cpu < 0) && inherit; 1019 attr->inherit = (cpu < 0) && inherit;
1020 1020
1021try_again: 1021try_again:
1022 fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); 1022 fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
1023 1023
1024 if (fd[i][counter] < 0) { 1024 if (fd[i][counter] < 0) {
1025 int err = errno; 1025 int err = errno;
@@ -1044,7 +1044,7 @@ try_again:
1044 printf("\n"); 1044 printf("\n");
1045 error("perfcounter syscall returned with %d (%s)\n", 1045 error("perfcounter syscall returned with %d (%s)\n",
1046 fd[i][counter], strerror(err)); 1046 fd[i][counter], strerror(err));
1047 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 1047 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1048 exit(-1); 1048 exit(-1);
1049 } 1049 }
1050 assert(fd[i][counter] >= 0); 1050 assert(fd[i][counter] >= 0);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 914ab366e369..e9d256e2f47d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -35,14 +35,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
35 35
36 thread = threads__findnew(event->comm.pid, &threads, &last_match); 36 thread = threads__findnew(event->comm.pid, &threads, &last_match);
37 37
38 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 38 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
39 (void *)(offset + head), 39 (void *)(offset + head),
40 (void *)(long)(event->header.size), 40 (void *)(long)(event->header.size),
41 event->comm.comm, event->comm.pid); 41 event->comm.comm, event->comm.pid);
42 42
43 if (thread == NULL || 43 if (thread == NULL ||
44 thread__set_comm(thread, event->comm.comm)) { 44 thread__set_comm(thread, event->comm.comm)) {
45 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 45 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
46 return -1; 46 return -1;
47 } 47 }
48 total_comm++; 48 total_comm++;
@@ -82,7 +82,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
82 more_data += sizeof(u64); 82 more_data += sizeof(u64);
83 } 83 }
84 84
85 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 85 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
86 (void *)(offset + head), 86 (void *)(offset + head),
87 (void *)(long)(event->header.size), 87 (void *)(long)(event->header.size),
88 event->header.misc, 88 event->header.misc,
@@ -98,9 +98,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
98 return -1; 98 return -1;
99 } 99 }
100 100
101 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 101 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
102 102
103 if (cpumode == PERF_EVENT_MISC_KERNEL) { 103 if (cpumode == PERF_RECORD_MISC_KERNEL) {
104 show = SHOW_KERNEL; 104 show = SHOW_KERNEL;
105 level = 'k'; 105 level = 'k';
106 106
@@ -108,7 +108,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
108 108
109 dump_printf(" ...... dso: %s\n", dso->name); 109 dump_printf(" ...... dso: %s\n", dso->name);
110 110
111 } else if (cpumode == PERF_EVENT_MISC_USER) { 111 } else if (cpumode == PERF_RECORD_MISC_USER) {
112 112
113 show = SHOW_USER; 113 show = SHOW_USER;
114 level = '.'; 114 level = '.';
@@ -146,19 +146,19 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
146 trace_event(event); 146 trace_event(event);
147 147
148 switch (event->header.type) { 148 switch (event->header.type) {
149 case PERF_EVENT_MMAP ... PERF_EVENT_LOST: 149 case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
150 return 0; 150 return 0;
151 151
152 case PERF_EVENT_COMM: 152 case PERF_RECORD_COMM:
153 return process_comm_event(event, offset, head); 153 return process_comm_event(event, offset, head);
154 154
155 case PERF_EVENT_EXIT ... PERF_EVENT_READ: 155 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
156 return 0; 156 return 0;
157 157
158 case PERF_EVENT_SAMPLE: 158 case PERF_RECORD_SAMPLE:
159 return process_sample_event(event, offset, head); 159 return process_sample_event(event, offset, head);
160 160
161 case PERF_EVENT_MAX: 161 case PERF_RECORD_MAX:
162 default: 162 default:
163 return -1; 163 return -1;
164 } 164 }
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index f71e0d245cba..f1946d107b10 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -18,10 +18,10 @@ underlying hardware counters.
18Performance counters are accessed via special file descriptors. 18Performance counters are accessed via special file descriptors.
19There's one file descriptor per virtual counter used. 19There's one file descriptor per virtual counter used.
20 20
21The special file descriptor is opened via the perf_counter_open() 21The special file descriptor is opened via the perf_event_open()
22system call: 22system call:
23 23
24 int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, 24 int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr,
25 pid_t pid, int cpu, int group_fd, 25 pid_t pid, int cpu, int group_fd,
26 unsigned long flags); 26 unsigned long flags);
27 27
@@ -32,9 +32,9 @@ can be used to set the blocking mode, etc.
32Multiple counters can be kept open at a time, and the counters 32Multiple counters can be kept open at a time, and the counters
33can be poll()ed. 33can be poll()ed.
34 34
35When creating a new counter fd, 'perf_counter_hw_event' is: 35When creating a new counter fd, 'perf_event_hw_event' is:
36 36
37struct perf_counter_hw_event { 37struct perf_event_hw_event {
38 /* 38 /*
39 * The MSB of the config word signifies if the rest contains cpu 39 * The MSB of the config word signifies if the rest contains cpu
40 * specific (raw) counter configuration data, if unset, the next 40 * specific (raw) counter configuration data, if unset, the next
@@ -93,7 +93,7 @@ specified by 'event_id':
93 93
94/* 94/*
95 * Generalized performance counter event types, used by the hw_event.event_id 95 * Generalized performance counter event types, used by the hw_event.event_id
96 * parameter of the sys_perf_counter_open() syscall: 96 * parameter of the sys_perf_event_open() syscall:
97 */ 97 */
98enum hw_event_ids { 98enum hw_event_ids {
99 /* 99 /*
@@ -159,7 +159,7 @@ in size.
159 * reads on the counter should return the indicated quantities, 159 * reads on the counter should return the indicated quantities,
160 * in increasing order of bit value, after the counter value. 160 * in increasing order of bit value, after the counter value.
161 */ 161 */
162enum perf_counter_read_format { 162enum perf_event_read_format {
163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1, 163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2, 164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
165}; 165};
@@ -178,7 +178,7 @@ interrupt:
178 * Bits that can be set in hw_event.record_type to request information 178 * Bits that can be set in hw_event.record_type to request information
179 * in the overflow packets. 179 * in the overflow packets.
180 */ 180 */
181enum perf_counter_record_format { 181enum perf_event_record_format {
182 PERF_RECORD_IP = 1U << 0, 182 PERF_RECORD_IP = 1U << 0,
183 PERF_RECORD_TID = 1U << 1, 183 PERF_RECORD_TID = 1U << 1,
184 PERF_RECORD_TIME = 1U << 2, 184 PERF_RECORD_TIME = 1U << 2,
@@ -228,7 +228,7 @@ these events are recorded in the ring-buffer (see below).
228The 'comm' bit allows tracking of process comm data on process creation. 228The 'comm' bit allows tracking of process comm data on process creation.
229This too is recorded in the ring-buffer (see below). 229This too is recorded in the ring-buffer (see below).
230 230
231The 'pid' parameter to the perf_counter_open() system call allows the 231The 'pid' parameter to the perf_event_open() system call allows the
232counter to be specific to a task: 232counter to be specific to a task:
233 233
234 pid == 0: if the pid parameter is zero, the counter is attached to the 234 pid == 0: if the pid parameter is zero, the counter is attached to the
@@ -258,7 +258,7 @@ The 'flags' parameter is currently unused and must be zero.
258 258
259The 'group_fd' parameter allows counter "groups" to be set up. A 259The 'group_fd' parameter allows counter "groups" to be set up. A
260counter group has one counter which is the group "leader". The leader 260counter group has one counter which is the group "leader". The leader
261is created first, with group_fd = -1 in the perf_counter_open call 261is created first, with group_fd = -1 in the perf_event_open call
262that creates it. The rest of the group members are created 262that creates it. The rest of the group members are created
263subsequently, with group_fd giving the fd of the group leader. 263subsequently, with group_fd giving the fd of the group leader.
264(A single counter on its own is created with group_fd = -1 and is 264(A single counter on its own is created with group_fd = -1 and is
@@ -277,13 +277,13 @@ tracking are logged into a ring-buffer. This ring-buffer is created and
277accessed through mmap(). 277accessed through mmap().
278 278
279The mmap size should be 1+2^n pages, where the first page is a meta-data page 279The mmap size should be 1+2^n pages, where the first page is a meta-data page
280(struct perf_counter_mmap_page) that contains various bits of information such 280(struct perf_event_mmap_page) that contains various bits of information such
281as where the ring-buffer head is. 281as where the ring-buffer head is.
282 282
283/* 283/*
284 * Structure of the page that can be mapped via mmap 284 * Structure of the page that can be mapped via mmap
285 */ 285 */
286struct perf_counter_mmap_page { 286struct perf_event_mmap_page {
287 __u32 version; /* version number of this structure */ 287 __u32 version; /* version number of this structure */
288 __u32 compat_version; /* lowest version this is compat with */ 288 __u32 compat_version; /* lowest version this is compat with */
289 289
@@ -317,7 +317,7 @@ struct perf_counter_mmap_page {
317 * Control data for the mmap() data buffer. 317 * Control data for the mmap() data buffer.
318 * 318 *
319 * User-space reading this value should issue an rmb(), on SMP capable 319 * User-space reading this value should issue an rmb(), on SMP capable
320 * platforms, after reading this value -- see perf_counter_wakeup(). 320 * platforms, after reading this value -- see perf_event_wakeup().
321 */ 321 */
322 __u32 data_head; /* head in the data section */ 322 __u32 data_head; /* head in the data section */
323}; 323};
@@ -327,9 +327,9 @@ NOTE: the hw-counter userspace bits are arch specific and are currently only
327 327
328The following 2^n pages are the ring-buffer which contains events of the form: 328The following 2^n pages are the ring-buffer which contains events of the form:
329 329
330#define PERF_EVENT_MISC_KERNEL (1 << 0) 330#define PERF_RECORD_MISC_KERNEL (1 << 0)
331#define PERF_EVENT_MISC_USER (1 << 1) 331#define PERF_RECORD_MISC_USER (1 << 1)
332#define PERF_EVENT_MISC_OVERFLOW (1 << 2) 332#define PERF_RECORD_MISC_OVERFLOW (1 << 2)
333 333
334struct perf_event_header { 334struct perf_event_header {
335 __u32 type; 335 __u32 type;
@@ -353,8 +353,8 @@ enum perf_event_type {
353 * char filename[]; 353 * char filename[];
354 * }; 354 * };
355 */ 355 */
356 PERF_EVENT_MMAP = 1, 356 PERF_RECORD_MMAP = 1,
357 PERF_EVENT_MUNMAP = 2, 357 PERF_RECORD_MUNMAP = 2,
358 358
359 /* 359 /*
360 * struct { 360 * struct {
@@ -364,10 +364,10 @@ enum perf_event_type {
364 * char comm[]; 364 * char comm[];
365 * }; 365 * };
366 */ 366 */
367 PERF_EVENT_COMM = 3, 367 PERF_RECORD_COMM = 3,
368 368
369 /* 369 /*
370 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 370 * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field
371 * will be PERF_RECORD_* 371 * will be PERF_RECORD_*
372 * 372 *
373 * struct { 373 * struct {
@@ -397,7 +397,7 @@ Notification of new events is possible through poll()/select()/epoll() and
397fcntl() managing signals. 397fcntl() managing signals.
398 398
399Normally a notification is generated for every page filled, however one can 399Normally a notification is generated for every page filled, however one can
400additionally set perf_counter_hw_event.wakeup_events to generate one every 400additionally set perf_event_hw_event.wakeup_events to generate one every
401so many counter overflow events. 401so many counter overflow events.
402 402
403Future work will include a splice() interface to the ring-buffer. 403Future work will include a splice() interface to the ring-buffer.
@@ -409,11 +409,11 @@ events but does continue to exist and maintain its count value.
409 409
410An individual counter or counter group can be enabled with 410An individual counter or counter group can be enabled with
411 411
412 ioctl(fd, PERF_COUNTER_IOC_ENABLE); 412 ioctl(fd, PERF_EVENT_IOC_ENABLE);
413 413
414or disabled with 414or disabled with
415 415
416 ioctl(fd, PERF_COUNTER_IOC_DISABLE); 416 ioctl(fd, PERF_EVENT_IOC_DISABLE);
417 417
418Enabling or disabling the leader of a group enables or disables the 418Enabling or disabling the leader of a group enables or disables the
419whole group; that is, while the group leader is disabled, none of the 419whole group; that is, while the group leader is disabled, none of the
@@ -424,16 +424,16 @@ other counter.
424 424
425Additionally, non-inherited overflow counters can use 425Additionally, non-inherited overflow counters can use
426 426
427 ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); 427 ioctl(fd, PERF_EVENT_IOC_REFRESH, nr);
428 428
429to enable a counter for 'nr' events, after which it gets disabled again. 429to enable a counter for 'nr' events, after which it gets disabled again.
430 430
431A process can enable or disable all the counter groups that are 431A process can enable or disable all the counter groups that are
432attached to it, using prctl: 432attached to it, using prctl:
433 433
434 prctl(PR_TASK_PERF_COUNTERS_ENABLE); 434 prctl(PR_TASK_PERF_EVENTS_ENABLE);
435 435
436 prctl(PR_TASK_PERF_COUNTERS_DISABLE); 436 prctl(PR_TASK_PERF_EVENTS_DISABLE);
437 437
438This applies to all counters on the current process, whether created 438This applies to all counters on the current process, whether created
439by this process or by another, and doesn't affect any counters that 439by this process or by another, and doesn't affect any counters that
@@ -447,11 +447,11 @@ Arch requirements
447If your architecture does not have hardware performance metrics, you can 447If your architecture does not have hardware performance metrics, you can
448still use the generic software counters based on hrtimers for sampling. 448still use the generic software counters based on hrtimers for sampling.
449 449
450So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you 450So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
451will need at least this: 451will need at least this:
452 - asm/perf_counter.h - a basic stub will suffice at first 452 - asm/perf_event.h - a basic stub will suffice at first
453 - support for atomic64 types (and associated helper functions) 453 - support for atomic64 types (and associated helper functions)
454 - set_perf_counter_pending() implemented 454 - set_perf_event_pending() implemented
455 455
456If your architecture does have hardware capabilities, you can override the 456If your architecture does have hardware capabilities, you can override the
457weak stub hw_perf_counter_init() to register hardware counters. 457weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 2abeb20d0bf3..8cc4623afd6f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,15 +52,15 @@
52#include <sys/types.h> 52#include <sys/types.h>
53#include <sys/syscall.h> 53#include <sys/syscall.h>
54 54
55#include "../../include/linux/perf_counter.h" 55#include "../../include/linux/perf_event.h"
56#include "util/types.h" 56#include "util/types.h"
57 57
58/* 58/*
59 * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all 59 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
60 * counters in the current task. 60 * counters in the current task.
61 */ 61 */
62#define PR_TASK_PERF_COUNTERS_DISABLE 31 62#define PR_TASK_PERF_EVENTS_DISABLE 31
63#define PR_TASK_PERF_COUNTERS_ENABLE 32 63#define PR_TASK_PERF_EVENTS_ENABLE 32
64 64
65#ifndef NSEC_PER_SEC 65#ifndef NSEC_PER_SEC
66# define NSEC_PER_SEC 1000000000ULL 66# define NSEC_PER_SEC 1000000000ULL
@@ -90,12 +90,12 @@ static inline unsigned long long rdclock(void)
90 _min1 < _min2 ? _min1 : _min2; }) 90 _min1 < _min2 ? _min1 : _min2; })
91 91
92static inline int 92static inline int
93sys_perf_counter_open(struct perf_counter_attr *attr, 93sys_perf_event_open(struct perf_event_attr *attr,
94 pid_t pid, int cpu, int group_fd, 94 pid_t pid, int cpu, int group_fd,
95 unsigned long flags) 95 unsigned long flags)
96{ 96{
97 attr->size = sizeof(*attr); 97 attr->size = sizeof(*attr);
98 return syscall(__NR_perf_counter_open, attr, pid, cpu, 98 return syscall(__NR_perf_event_open, attr, pid, cpu,
99 group_fd, flags); 99 group_fd, flags);
100} 100}
101 101
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 018d414a09d1..2c9c26d6ded0 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,5 +1,5 @@
1#ifndef __PERF_EVENT_H 1#ifndef __PERF_RECORD_H
2#define __PERF_EVENT_H 2#define __PERF_RECORD_H
3#include "../perf.h" 3#include "../perf.h"
4#include "util.h" 4#include "util.h"
5#include <linux/list.h> 5#include <linux/list.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bb4fca3efcc3..e306857b2c2b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -9,7 +9,7 @@
9/* 9/*
10 * Create new perf.data header attribute: 10 * Create new perf.data header attribute:
11 */ 11 */
12struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr) 12struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
13{ 13{
14 struct perf_header_attr *self = malloc(sizeof(*self)); 14 struct perf_header_attr *self = malloc(sizeof(*self));
15 15
@@ -134,7 +134,7 @@ struct perf_file_section {
134}; 134};
135 135
136struct perf_file_attr { 136struct perf_file_attr {
137 struct perf_counter_attr attr; 137 struct perf_event_attr attr;
138 struct perf_file_section ids; 138 struct perf_file_section ids;
139}; 139};
140 140
@@ -320,7 +320,7 @@ u64 perf_header__sample_type(struct perf_header *header)
320 return type; 320 return type;
321} 321}
322 322
323struct perf_counter_attr * 323struct perf_event_attr *
324perf_header__find_attr(u64 id, struct perf_header *header) 324perf_header__find_attr(u64 id, struct perf_header *header)
325{ 325{
326 int i; 326 int i;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 7b0e84a87179..a0761bc7863c 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,12 +1,12 @@
1#ifndef _PERF_HEADER_H 1#ifndef _PERF_HEADER_H
2#define _PERF_HEADER_H 2#define _PERF_HEADER_H
3 3
4#include "../../../include/linux/perf_counter.h" 4#include "../../../include/linux/perf_event.h"
5#include <sys/types.h> 5#include <sys/types.h>
6#include "types.h" 6#include "types.h"
7 7
8struct perf_header_attr { 8struct perf_header_attr {
9 struct perf_counter_attr attr; 9 struct perf_event_attr attr;
10 int ids, size; 10 int ids, size;
11 u64 *id; 11 u64 *id;
12 off_t id_offset; 12 off_t id_offset;
@@ -34,11 +34,11 @@ char *perf_header__find_event(u64 id);
34 34
35 35
36struct perf_header_attr * 36struct perf_header_attr *
37perf_header_attr__new(struct perf_counter_attr *attr); 37perf_header_attr__new(struct perf_event_attr *attr);
38void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); 38void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
39 39
40u64 perf_header__sample_type(struct perf_header *header); 40u64 perf_header__sample_type(struct perf_header *header);
41struct perf_counter_attr * 41struct perf_event_attr *
42perf_header__find_attr(u64 id, struct perf_header *header); 42perf_header__find_attr(u64 id, struct perf_header *header);
43 43
44 44
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 89172fd0038b..13ab4b842d49 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -10,7 +10,7 @@
10 10
11int nr_counters; 11int nr_counters;
12 12
13struct perf_counter_attr attrs[MAX_COUNTERS]; 13struct perf_event_attr attrs[MAX_COUNTERS];
14 14
15struct event_symbol { 15struct event_symbol {
16 u8 type; 16 u8 type;
@@ -48,13 +48,13 @@ static struct event_symbol event_symbols[] = {
48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, 48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
49}; 49};
50 50
51#define __PERF_COUNTER_FIELD(config, name) \ 51#define __PERF_EVENT_FIELD(config, name) \
52 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) 52 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
53 53
54#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) 54#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
55#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) 55#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
56#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) 56#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
57#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) 57#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
58 58
59static const char *hw_event_names[] = { 59static const char *hw_event_names[] = {
60 "cycles", 60 "cycles",
@@ -352,7 +352,7 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
352} 352}
353 353
354static enum event_result 354static enum event_result
355parse_generic_hw_event(const char **str, struct perf_counter_attr *attr) 355parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
356{ 356{
357 const char *s = *str; 357 const char *s = *str;
358 int cache_type = -1, cache_op = -1, cache_result = -1; 358 int cache_type = -1, cache_op = -1, cache_result = -1;
@@ -417,7 +417,7 @@ parse_single_tracepoint_event(char *sys_name,
417 const char *evt_name, 417 const char *evt_name,
418 unsigned int evt_length, 418 unsigned int evt_length,
419 char *flags, 419 char *flags,
420 struct perf_counter_attr *attr, 420 struct perf_event_attr *attr,
421 const char **strp) 421 const char **strp)
422{ 422{
423 char evt_path[MAXPATHLEN]; 423 char evt_path[MAXPATHLEN];
@@ -505,7 +505,7 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
505 505
506 506
507static enum event_result parse_tracepoint_event(const char **strp, 507static enum event_result parse_tracepoint_event(const char **strp,
508 struct perf_counter_attr *attr) 508 struct perf_event_attr *attr)
509{ 509{
510 const char *evt_name; 510 const char *evt_name;
511 char *flags; 511 char *flags;
@@ -563,7 +563,7 @@ static int check_events(const char *str, unsigned int i)
563} 563}
564 564
565static enum event_result 565static enum event_result
566parse_symbolic_event(const char **strp, struct perf_counter_attr *attr) 566parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
567{ 567{
568 const char *str = *strp; 568 const char *str = *strp;
569 unsigned int i; 569 unsigned int i;
@@ -582,7 +582,7 @@ parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
582} 582}
583 583
584static enum event_result 584static enum event_result
585parse_raw_event(const char **strp, struct perf_counter_attr *attr) 585parse_raw_event(const char **strp, struct perf_event_attr *attr)
586{ 586{
587 const char *str = *strp; 587 const char *str = *strp;
588 u64 config; 588 u64 config;
@@ -601,7 +601,7 @@ parse_raw_event(const char **strp, struct perf_counter_attr *attr)
601} 601}
602 602
603static enum event_result 603static enum event_result
604parse_numeric_event(const char **strp, struct perf_counter_attr *attr) 604parse_numeric_event(const char **strp, struct perf_event_attr *attr)
605{ 605{
606 const char *str = *strp; 606 const char *str = *strp;
607 char *endp; 607 char *endp;
@@ -623,7 +623,7 @@ parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
623} 623}
624 624
625static enum event_result 625static enum event_result
626parse_event_modifier(const char **strp, struct perf_counter_attr *attr) 626parse_event_modifier(const char **strp, struct perf_event_attr *attr)
627{ 627{
628 const char *str = *strp; 628 const char *str = *strp;
629 int eu = 1, ek = 1, eh = 1; 629 int eu = 1, ek = 1, eh = 1;
@@ -656,7 +656,7 @@ parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
656 * Symbolic names are (almost) exactly matched. 656 * Symbolic names are (almost) exactly matched.
657 */ 657 */
658static enum event_result 658static enum event_result
659parse_event_symbols(const char **str, struct perf_counter_attr *attr) 659parse_event_symbols(const char **str, struct perf_event_attr *attr)
660{ 660{
661 enum event_result ret; 661 enum event_result ret;
662 662
@@ -711,7 +711,7 @@ static void store_event_type(const char *orgname)
711 711
712int parse_events(const struct option *opt __used, const char *str, int unset __used) 712int parse_events(const struct option *opt __used, const char *str, int unset __used)
713{ 713{
714 struct perf_counter_attr attr; 714 struct perf_event_attr attr;
715 enum event_result ret; 715 enum event_result ret;
716 716
717 if (strchr(str, ':')) 717 if (strchr(str, ':'))
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 60704c15961f..30c608112845 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -16,7 +16,7 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
16 16
17extern int nr_counters; 17extern int nr_counters;
18 18
19extern struct perf_counter_attr attrs[MAX_COUNTERS]; 19extern struct perf_event_attr attrs[MAX_COUNTERS];
20 20
21extern const char *event_name(int ctr); 21extern const char *event_name(int ctr);
22extern const char *__event_name(int type, u64 config); 22extern const char *__event_name(int type, u64 config);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 1fd824c1f1c4..af4b0573b37f 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -480,12 +480,12 @@ out:
480} 480}
481 481
482static struct tracepoint_path * 482static struct tracepoint_path *
483get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters) 483get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
484{ 484{
485 struct tracepoint_path path, *ppath = &path; 485 struct tracepoint_path path, *ppath = &path;
486 int i; 486 int i;
487 487
488 for (i = 0; i < nb_counters; i++) { 488 for (i = 0; i < nb_events; i++) {
489 if (pattrs[i].type != PERF_TYPE_TRACEPOINT) 489 if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
490 continue; 490 continue;
491 ppath->next = tracepoint_id_to_path(pattrs[i].config); 491 ppath->next = tracepoint_id_to_path(pattrs[i].config);
@@ -496,7 +496,7 @@ get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
496 496
497 return path.next; 497 return path.next;
498} 498}
499void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters) 499void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
500{ 500{
501 char buf[BUFSIZ]; 501 char buf[BUFSIZ];
502 struct tracepoint_path *tps; 502 struct tracepoint_path *tps;
@@ -530,7 +530,7 @@ void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
530 page_size = getpagesize(); 530 page_size = getpagesize();
531 write_or_die(&page_size, 4); 531 write_or_die(&page_size, 4);
532 532
533 tps = get_tracepoints_path(pattrs, nb_counters); 533 tps = get_tracepoints_path(pattrs, nb_events);
534 534
535 read_header_files(); 535 read_header_files();
536 read_ftrace_files(tps); 536 read_ftrace_files(tps);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index d35ebf1e29ff..693f815c9429 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -240,6 +240,6 @@ unsigned long long
240raw_field_value(struct event *event, const char *name, void *data); 240raw_field_value(struct event *event, const char *name, void *data);
241void *raw_field_ptr(struct event *event, const char *name, void *data); 241void *raw_field_ptr(struct event *event, const char *name, void *data);
242 242
243void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters); 243void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
244 244
245#endif /* _TRACE_EVENTS_H */ 245#endif /* _TRACE_EVENTS_H */