diff options
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r-- | kernel/events/core.c | 27 |
1 files changed, 22 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0ca1f648ac08..3894309c41a2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3208,10 +3208,6 @@ int perf_event_task_disable(void) | |||
3208 | return 0; | 3208 | return 0; |
3209 | } | 3209 | } |
3210 | 3210 | ||
3211 | #ifndef PERF_EVENT_INDEX_OFFSET | ||
3212 | # define PERF_EVENT_INDEX_OFFSET 0 | ||
3213 | #endif | ||
3214 | |||
3215 | static int perf_event_index(struct perf_event *event) | 3211 | static int perf_event_index(struct perf_event *event) |
3216 | { | 3212 | { |
3217 | if (event->hw.state & PERF_HES_STOPPED) | 3213 | if (event->hw.state & PERF_HES_STOPPED) |
@@ -3220,7 +3216,7 @@ static int perf_event_index(struct perf_event *event) | |||
3220 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 3216 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
3221 | return 0; | 3217 | return 0; |
3222 | 3218 | ||
3223 | return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; | 3219 | return event->pmu->event_idx(event); |
3224 | } | 3220 | } |
3225 | 3221 | ||
3226 | static void calc_timer_values(struct perf_event *event, | 3222 | static void calc_timer_values(struct perf_event *event, |
@@ -4992,6 +4988,11 @@ static int perf_swevent_init(struct perf_event *event) | |||
4992 | return 0; | 4988 | return 0; |
4993 | } | 4989 | } |
4994 | 4990 | ||
4991 | static int perf_swevent_event_idx(struct perf_event *event) | ||
4992 | { | ||
4993 | return 0; | ||
4994 | } | ||
4995 | |||
4995 | static struct pmu perf_swevent = { | 4996 | static struct pmu perf_swevent = { |
4996 | .task_ctx_nr = perf_sw_context, | 4997 | .task_ctx_nr = perf_sw_context, |
4997 | 4998 | ||
@@ -5001,6 +5002,8 @@ static struct pmu perf_swevent = { | |||
5001 | .start = perf_swevent_start, | 5002 | .start = perf_swevent_start, |
5002 | .stop = perf_swevent_stop, | 5003 | .stop = perf_swevent_stop, |
5003 | .read = perf_swevent_read, | 5004 | .read = perf_swevent_read, |
5005 | |||
5006 | .event_idx = perf_swevent_event_idx, | ||
5004 | }; | 5007 | }; |
5005 | 5008 | ||
5006 | #ifdef CONFIG_EVENT_TRACING | 5009 | #ifdef CONFIG_EVENT_TRACING |
@@ -5087,6 +5090,8 @@ static struct pmu perf_tracepoint = { | |||
5087 | .start = perf_swevent_start, | 5090 | .start = perf_swevent_start, |
5088 | .stop = perf_swevent_stop, | 5091 | .stop = perf_swevent_stop, |
5089 | .read = perf_swevent_read, | 5092 | .read = perf_swevent_read, |
5093 | |||
5094 | .event_idx = perf_swevent_event_idx, | ||
5090 | }; | 5095 | }; |
5091 | 5096 | ||
5092 | static inline void perf_tp_register(void) | 5097 | static inline void perf_tp_register(void) |
@@ -5306,6 +5311,8 @@ static struct pmu perf_cpu_clock = { | |||
5306 | .start = cpu_clock_event_start, | 5311 | .start = cpu_clock_event_start, |
5307 | .stop = cpu_clock_event_stop, | 5312 | .stop = cpu_clock_event_stop, |
5308 | .read = cpu_clock_event_read, | 5313 | .read = cpu_clock_event_read, |
5314 | |||
5315 | .event_idx = perf_swevent_event_idx, | ||
5309 | }; | 5316 | }; |
5310 | 5317 | ||
5311 | /* | 5318 | /* |
@@ -5378,6 +5385,8 @@ static struct pmu perf_task_clock = { | |||
5378 | .start = task_clock_event_start, | 5385 | .start = task_clock_event_start, |
5379 | .stop = task_clock_event_stop, | 5386 | .stop = task_clock_event_stop, |
5380 | .read = task_clock_event_read, | 5387 | .read = task_clock_event_read, |
5388 | |||
5389 | .event_idx = perf_swevent_event_idx, | ||
5381 | }; | 5390 | }; |
5382 | 5391 | ||
5383 | static void perf_pmu_nop_void(struct pmu *pmu) | 5392 | static void perf_pmu_nop_void(struct pmu *pmu) |
@@ -5405,6 +5414,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) | |||
5405 | perf_pmu_enable(pmu); | 5414 | perf_pmu_enable(pmu); |
5406 | } | 5415 | } |
5407 | 5416 | ||
5417 | static int perf_event_idx_default(struct perf_event *event) | ||
5418 | { | ||
5419 | return event->hw.idx + 1; | ||
5420 | } | ||
5421 | |||
5408 | /* | 5422 | /* |
5409 | * Ensures all contexts with the same task_ctx_nr have the same | 5423 | * Ensures all contexts with the same task_ctx_nr have the same |
5410 | * pmu_cpu_context too. | 5424 | * pmu_cpu_context too. |
@@ -5594,6 +5608,9 @@ got_cpu_context: | |||
5594 | pmu->pmu_disable = perf_pmu_nop_void; | 5608 | pmu->pmu_disable = perf_pmu_nop_void; |
5595 | } | 5609 | } |
5596 | 5610 | ||
5611 | if (!pmu->event_idx) | ||
5612 | pmu->event_idx = perf_event_idx_default; | ||
5613 | |||
5597 | list_add_rcu(&pmu->entry, &pmus); | 5614 | list_add_rcu(&pmu->entry, &pmus); |
5598 | ret = 0; | 5615 | ret = 0; |
5599 | unlock: | 5616 | unlock: |