diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-11-20 14:36:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-12-21 05:01:07 -0500 |
commit | 35edc2a5095efb189e60dc32bbb9d2663aec6d24 (patch) | |
tree | 3296a0dc54c4eb9d9ae5e0715d7521ecbb6d6f7e | |
parent | 9a0f05cb36888550d1509d60aa55788615abea44 (diff) |
perf, arch: Rework perf_event_index()
Put the logic to compute the event index into a per pmu method. This
is required because the x86 rules are weird and wonderful and don't
match the capabilities of the current scheme.
AFAIK only powerpc actually has a usable userspace read of the PMCs
but I'm not at all sure anybody actually used that.
ARM is restored to the default since it currently does not support
userspace access at all. And all software events are provided with a
method that reports their index as 0 (disabled).
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Arun Sharma <asharma@fb.com>
Link: http://lkml.kernel.org/n/tip-dfydxodki16lylkt3gl2j7cw@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/arm/include/asm/perf_event.h | 4 | ||||
-rw-r--r-- | arch/frv/include/asm/perf_event.h | 2 | ||||
-rw-r--r-- | arch/hexagon/include/asm/perf_event.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/perf_event_server.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 6 | ||||
-rw-r--r-- | arch/s390/include/asm/perf_event.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 2 | ||||
-rw-r--r-- | include/linux/perf_event.h | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 27 | ||||
-rw-r--r-- | kernel/events/hw_breakpoint.c | 7 |
10 files changed, 41 insertions, 18 deletions
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 0f8e3827a89b..08f94d8fc04c 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -12,10 +12,6 @@ | |||
12 | #ifndef __ARM_PERF_EVENT_H__ | 12 | #ifndef __ARM_PERF_EVENT_H__ |
13 | #define __ARM_PERF_EVENT_H__ | 13 | #define __ARM_PERF_EVENT_H__ |
14 | 14 | ||
15 | /* ARM performance counters start from 1 (in the cp15 accesses) so use the | ||
16 | * same indexes here for consistency. */ | ||
17 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
18 | |||
19 | /* ARM perf PMU IDs for use by internal perf clients. */ | 15 | /* ARM perf PMU IDs for use by internal perf clients. */ |
20 | enum arm_perf_pmu_ids { | 16 | enum arm_perf_pmu_ids { |
21 | ARM_PERF_PMU_ID_XSCALE1 = 0, | 17 | ARM_PERF_PMU_ID_XSCALE1 = 0, |
diff --git a/arch/frv/include/asm/perf_event.h b/arch/frv/include/asm/perf_event.h index a69e0155d146..c52ea5546b5b 100644 --- a/arch/frv/include/asm/perf_event.h +++ b/arch/frv/include/asm/perf_event.h | |||
@@ -12,6 +12,4 @@ | |||
12 | #ifndef _ASM_PERF_EVENT_H | 12 | #ifndef _ASM_PERF_EVENT_H |
13 | #define _ASM_PERF_EVENT_H | 13 | #define _ASM_PERF_EVENT_H |
14 | 14 | ||
15 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
16 | |||
17 | #endif /* _ASM_PERF_EVENT_H */ | 15 | #endif /* _ASM_PERF_EVENT_H */ |
diff --git a/arch/hexagon/include/asm/perf_event.h b/arch/hexagon/include/asm/perf_event.h index 6c2910f91180..8b8526b491c7 100644 --- a/arch/hexagon/include/asm/perf_event.h +++ b/arch/hexagon/include/asm/perf_event.h | |||
@@ -19,6 +19,4 @@ | |||
19 | #ifndef _ASM_PERF_EVENT_H | 19 | #ifndef _ASM_PERF_EVENT_H |
20 | #define _ASM_PERF_EVENT_H | 20 | #define _ASM_PERF_EVENT_H |
21 | 21 | ||
22 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
23 | |||
24 | #endif /* _ASM_PERF_EVENT_H */ | 22 | #endif /* _ASM_PERF_EVENT_H */ |
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 8f1df1208d23..1a8093fa8f71 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h | |||
@@ -61,8 +61,6 @@ struct pt_regs; | |||
61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | 61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); |
62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | 62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); |
63 | 63 | ||
64 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
65 | |||
66 | /* | 64 | /* |
67 | * Only override the default definitions in include/linux/perf_event.h | 65 | * Only override the default definitions in include/linux/perf_event.h |
68 | * if we have hardware PMU support. | 66 | * if we have hardware PMU support. |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 10a140f82cb8..d614ab57ccca 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1187,6 +1187,11 @@ static int power_pmu_event_init(struct perf_event *event) | |||
1187 | return err; | 1187 | return err; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | static int power_pmu_event_idx(struct perf_event *event) | ||
1191 | { | ||
1192 | return event->hw.idx; | ||
1193 | } | ||
1194 | |||
1190 | struct pmu power_pmu = { | 1195 | struct pmu power_pmu = { |
1191 | .pmu_enable = power_pmu_enable, | 1196 | .pmu_enable = power_pmu_enable, |
1192 | .pmu_disable = power_pmu_disable, | 1197 | .pmu_disable = power_pmu_disable, |
@@ -1199,6 +1204,7 @@ struct pmu power_pmu = { | |||
1199 | .start_txn = power_pmu_start_txn, | 1204 | .start_txn = power_pmu_start_txn, |
1200 | .cancel_txn = power_pmu_cancel_txn, | 1205 | .cancel_txn = power_pmu_cancel_txn, |
1201 | .commit_txn = power_pmu_commit_txn, | 1206 | .commit_txn = power_pmu_commit_txn, |
1207 | .event_idx = power_pmu_event_idx, | ||
1202 | }; | 1208 | }; |
1203 | 1209 | ||
1204 | /* | 1210 | /* |
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index a75f168d2718..4eb444edbe49 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h | |||
@@ -6,4 +6,3 @@ | |||
6 | 6 | ||
7 | /* Empty, just to avoid compiling error */ | 7 | /* Empty, just to avoid compiling error */ |
8 | 8 | ||
9 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 096c975e099f..9b922c136254 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -188,8 +188,6 @@ extern u32 get_ibs_caps(void); | |||
188 | #ifdef CONFIG_PERF_EVENTS | 188 | #ifdef CONFIG_PERF_EVENTS |
189 | extern void perf_events_lapic_init(void); | 189 | extern void perf_events_lapic_init(void); |
190 | 190 | ||
191 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
192 | |||
193 | /* | 191 | /* |
194 | * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. | 192 | * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. |
195 | * This flag is otherwise unused and ABI specified to be 0, so nobody should | 193 | * This flag is otherwise unused and ABI specified to be 0, so nobody should |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 08855613ceb3..02545e6df95b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -680,6 +680,12 @@ struct pmu { | |||
680 | * for each successful ->add() during the transaction. | 680 | * for each successful ->add() during the transaction. |
681 | */ | 681 | */ |
682 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 682 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
683 | |||
684 | /* | ||
685 | * Will return the value for perf_event_mmap_page::index for this event, | ||
686 | * if no implementation is provided it will default to: event->hw.idx + 1. | ||
687 | */ | ||
688 | int (*event_idx) (struct perf_event *event); /*optional */ | ||
683 | }; | 689 | }; |
684 | 690 | ||
685 | /** | 691 | /** |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0ca1f648ac08..3894309c41a2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3208,10 +3208,6 @@ int perf_event_task_disable(void) | |||
3208 | return 0; | 3208 | return 0; |
3209 | } | 3209 | } |
3210 | 3210 | ||
3211 | #ifndef PERF_EVENT_INDEX_OFFSET | ||
3212 | # define PERF_EVENT_INDEX_OFFSET 0 | ||
3213 | #endif | ||
3214 | |||
3215 | static int perf_event_index(struct perf_event *event) | 3211 | static int perf_event_index(struct perf_event *event) |
3216 | { | 3212 | { |
3217 | if (event->hw.state & PERF_HES_STOPPED) | 3213 | if (event->hw.state & PERF_HES_STOPPED) |
@@ -3220,7 +3216,7 @@ static int perf_event_index(struct perf_event *event) | |||
3220 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 3216 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
3221 | return 0; | 3217 | return 0; |
3222 | 3218 | ||
3223 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; | 3219 | return event->pmu->event_idx(event); |
3224 | } | 3220 | } |
3225 | 3221 | ||
3226 | static void calc_timer_values(struct perf_event *event, | 3222 | static void calc_timer_values(struct perf_event *event, |
@@ -4992,6 +4988,11 @@ static int perf_swevent_init(struct perf_event *event) | |||
4992 | return 0; | 4988 | return 0; |
4993 | } | 4989 | } |
4994 | 4990 | ||
4991 | static int perf_swevent_event_idx(struct perf_event *event) | ||
4992 | { | ||
4993 | return 0; | ||
4994 | } | ||
4995 | |||
4995 | static struct pmu perf_swevent = { | 4996 | static struct pmu perf_swevent = { |
4996 | .task_ctx_nr = perf_sw_context, | 4997 | .task_ctx_nr = perf_sw_context, |
4997 | 4998 | ||
@@ -5001,6 +5002,8 @@ static struct pmu perf_swevent = { | |||
5001 | .start = perf_swevent_start, | 5002 | .start = perf_swevent_start, |
5002 | .stop = perf_swevent_stop, | 5003 | .stop = perf_swevent_stop, |
5003 | .read = perf_swevent_read, | 5004 | .read = perf_swevent_read, |
5005 | |||
5006 | .event_idx = perf_swevent_event_idx, | ||
5004 | }; | 5007 | }; |
5005 | 5008 | ||
5006 | #ifdef CONFIG_EVENT_TRACING | 5009 | #ifdef CONFIG_EVENT_TRACING |
@@ -5087,6 +5090,8 @@ static struct pmu perf_tracepoint = { | |||
5087 | .start = perf_swevent_start, | 5090 | .start = perf_swevent_start, |
5088 | .stop = perf_swevent_stop, | 5091 | .stop = perf_swevent_stop, |
5089 | .read = perf_swevent_read, | 5092 | .read = perf_swevent_read, |
5093 | |||
5094 | .event_idx = perf_swevent_event_idx, | ||
5090 | }; | 5095 | }; |
5091 | 5096 | ||
5092 | static inline void perf_tp_register(void) | 5097 | static inline void perf_tp_register(void) |
@@ -5306,6 +5311,8 @@ static struct pmu perf_cpu_clock = { | |||
5306 | .start = cpu_clock_event_start, | 5311 | .start = cpu_clock_event_start, |
5307 | .stop = cpu_clock_event_stop, | 5312 | .stop = cpu_clock_event_stop, |
5308 | .read = cpu_clock_event_read, | 5313 | .read = cpu_clock_event_read, |
5314 | |||
5315 | .event_idx = perf_swevent_event_idx, | ||
5309 | }; | 5316 | }; |
5310 | 5317 | ||
5311 | /* | 5318 | /* |
@@ -5378,6 +5385,8 @@ static struct pmu perf_task_clock = { | |||
5378 | .start = task_clock_event_start, | 5385 | .start = task_clock_event_start, |
5379 | .stop = task_clock_event_stop, | 5386 | .stop = task_clock_event_stop, |
5380 | .read = task_clock_event_read, | 5387 | .read = task_clock_event_read, |
5388 | |||
5389 | .event_idx = perf_swevent_event_idx, | ||
5381 | }; | 5390 | }; |
5382 | 5391 | ||
5383 | static void perf_pmu_nop_void(struct pmu *pmu) | 5392 | static void perf_pmu_nop_void(struct pmu *pmu) |
@@ -5405,6 +5414,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) | |||
5405 | perf_pmu_enable(pmu); | 5414 | perf_pmu_enable(pmu); |
5406 | } | 5415 | } |
5407 | 5416 | ||
5417 | static int perf_event_idx_default(struct perf_event *event) | ||
5418 | { | ||
5419 | return event->hw.idx + 1; | ||
5420 | } | ||
5421 | |||
5408 | /* | 5422 | /* |
5409 | * Ensures all contexts with the same task_ctx_nr have the same | 5423 | * Ensures all contexts with the same task_ctx_nr have the same |
5410 | * pmu_cpu_context too. | 5424 | * pmu_cpu_context too. |
@@ -5594,6 +5608,9 @@ got_cpu_context: | |||
5594 | pmu->pmu_disable = perf_pmu_nop_void; | 5608 | pmu->pmu_disable = perf_pmu_nop_void; |
5595 | } | 5609 | } |
5596 | 5610 | ||
5611 | if (!pmu->event_idx) | ||
5612 | pmu->event_idx = perf_event_idx_default; | ||
5613 | |||
5597 | list_add_rcu(&pmu->entry, &pmus); | 5614 | list_add_rcu(&pmu->entry, &pmus); |
5598 | ret = 0; | 5615 | ret = 0; |
5599 | unlock: | 5616 | unlock: |
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b7971d6f38bf..b0309f76d777 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -613,6 +613,11 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags) | |||
613 | bp->hw.state = PERF_HES_STOPPED; | 613 | bp->hw.state = PERF_HES_STOPPED; |
614 | } | 614 | } |
615 | 615 | ||
616 | static int hw_breakpoint_event_idx(struct perf_event *bp) | ||
617 | { | ||
618 | return 0; | ||
619 | } | ||
620 | |||
616 | static struct pmu perf_breakpoint = { | 621 | static struct pmu perf_breakpoint = { |
617 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ | 622 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
618 | 623 | ||
@@ -622,6 +627,8 @@ static struct pmu perf_breakpoint = { | |||
622 | .start = hw_breakpoint_start, | 627 | .start = hw_breakpoint_start, |
623 | .stop = hw_breakpoint_stop, | 628 | .stop = hw_breakpoint_stop, |
624 | .read = hw_breakpoint_pmu_read, | 629 | .read = hw_breakpoint_pmu_read, |
630 | |||
631 | .event_idx = hw_breakpoint_event_idx, | ||
625 | }; | 632 | }; |
626 | 633 | ||
627 | int __init init_hw_breakpoint(void) | 634 | int __init init_hw_breakpoint(void) |