aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-11-20 14:36:02 -0500
committerIngo Molnar <mingo@elte.hu>2011-12-21 05:01:07 -0500
commit35edc2a5095efb189e60dc32bbb9d2663aec6d24 (patch)
tree3296a0dc54c4eb9d9ae5e0715d7521ecbb6d6f7e /kernel/events/core.c
parent9a0f05cb36888550d1509d60aa55788615abea44 (diff)
perf, arch: Rework perf_event_index()
Put the logic to compute the event index into a per pmu method. This is required because the x86 rules are weird and wonderful and don't match the capabilities of the current scheme. AFAIK only powerpc actually has a usable userspace read of the PMCs but I'm not at all sure anybody actually used that. ARM is restored to the default since it currently does not support userspace access at all. And all software events are provided with a method that reports their index as 0 (disabled). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Stephane Eranian <eranian@google.com> Cc: Arun Sharma <asharma@fb.com> Link: http://lkml.kernel.org/n/tip-dfydxodki16lylkt3gl2j7cw@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0ca1f648ac08..3894309c41a2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3208,10 +3208,6 @@ int perf_event_task_disable(void)
3208 return 0; 3208 return 0;
3209} 3209}
3210 3210
3211#ifndef PERF_EVENT_INDEX_OFFSET
3212# define PERF_EVENT_INDEX_OFFSET 0
3213#endif
3214
3215static int perf_event_index(struct perf_event *event) 3211static int perf_event_index(struct perf_event *event)
3216{ 3212{
3217 if (event->hw.state & PERF_HES_STOPPED) 3213 if (event->hw.state & PERF_HES_STOPPED)
@@ -3220,7 +3216,7 @@ static int perf_event_index(struct perf_event *event)
3220 if (event->state != PERF_EVENT_STATE_ACTIVE) 3216 if (event->state != PERF_EVENT_STATE_ACTIVE)
3221 return 0; 3217 return 0;
3222 3218
3223 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; 3219 return event->pmu->event_idx(event);
3224} 3220}
3225 3221
3226static void calc_timer_values(struct perf_event *event, 3222static void calc_timer_values(struct perf_event *event,
@@ -4992,6 +4988,11 @@ static int perf_swevent_init(struct perf_event *event)
4992 return 0; 4988 return 0;
4993} 4989}
4994 4990
4991static int perf_swevent_event_idx(struct perf_event *event)
4992{
4993 return 0;
4994}
4995
4995static struct pmu perf_swevent = { 4996static struct pmu perf_swevent = {
4996 .task_ctx_nr = perf_sw_context, 4997 .task_ctx_nr = perf_sw_context,
4997 4998
@@ -5001,6 +5002,8 @@ static struct pmu perf_swevent = {
5001 .start = perf_swevent_start, 5002 .start = perf_swevent_start,
5002 .stop = perf_swevent_stop, 5003 .stop = perf_swevent_stop,
5003 .read = perf_swevent_read, 5004 .read = perf_swevent_read,
5005
5006 .event_idx = perf_swevent_event_idx,
5004}; 5007};
5005 5008
5006#ifdef CONFIG_EVENT_TRACING 5009#ifdef CONFIG_EVENT_TRACING
@@ -5087,6 +5090,8 @@ static struct pmu perf_tracepoint = {
5087 .start = perf_swevent_start, 5090 .start = perf_swevent_start,
5088 .stop = perf_swevent_stop, 5091 .stop = perf_swevent_stop,
5089 .read = perf_swevent_read, 5092 .read = perf_swevent_read,
5093
5094 .event_idx = perf_swevent_event_idx,
5090}; 5095};
5091 5096
5092static inline void perf_tp_register(void) 5097static inline void perf_tp_register(void)
@@ -5306,6 +5311,8 @@ static struct pmu perf_cpu_clock = {
5306 .start = cpu_clock_event_start, 5311 .start = cpu_clock_event_start,
5307 .stop = cpu_clock_event_stop, 5312 .stop = cpu_clock_event_stop,
5308 .read = cpu_clock_event_read, 5313 .read = cpu_clock_event_read,
5314
5315 .event_idx = perf_swevent_event_idx,
5309}; 5316};
5310 5317
5311/* 5318/*
@@ -5378,6 +5385,8 @@ static struct pmu perf_task_clock = {
5378 .start = task_clock_event_start, 5385 .start = task_clock_event_start,
5379 .stop = task_clock_event_stop, 5386 .stop = task_clock_event_stop,
5380 .read = task_clock_event_read, 5387 .read = task_clock_event_read,
5388
5389 .event_idx = perf_swevent_event_idx,
5381}; 5390};
5382 5391
5383static void perf_pmu_nop_void(struct pmu *pmu) 5392static void perf_pmu_nop_void(struct pmu *pmu)
@@ -5405,6 +5414,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu)
5405 perf_pmu_enable(pmu); 5414 perf_pmu_enable(pmu);
5406} 5415}
5407 5416
5417static int perf_event_idx_default(struct perf_event *event)
5418{
5419 return event->hw.idx + 1;
5420}
5421
5408/* 5422/*
5409 * Ensures all contexts with the same task_ctx_nr have the same 5423 * Ensures all contexts with the same task_ctx_nr have the same
5410 * pmu_cpu_context too. 5424 * pmu_cpu_context too.
@@ -5594,6 +5608,9 @@ got_cpu_context:
5594 pmu->pmu_disable = perf_pmu_nop_void; 5608 pmu->pmu_disable = perf_pmu_nop_void;
5595 } 5609 }
5596 5610
5611 if (!pmu->event_idx)
5612 pmu->event_idx = perf_event_idx_default;
5613
5597 list_add_rcu(&pmu->entry, &pmus); 5614 list_add_rcu(&pmu->entry, &pmus);
5598 ret = 0; 5615 ret = 0;
5599unlock: 5616unlock: