diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-03 05:21:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-03 11:03:08 -0400 |
commit | c6df8d5ab87a246942d138321e1721edbb69f6e1 (patch) | |
tree | 4fa2965d148c3e7ea456ab889d278d5c16d25d17 /kernel | |
parent | da3fd1a0010ccc9fe6fd5ae2b9e85e1aacc03e4d (diff) |
perf: Fix crash in swevents
Frederic reported that because swevents handling doesn't disable IRQs
anymore, we can get a recursion of perf_adjust_period(), once from
overflow handling and once from the tick.
If both call ->disable, we get a double hlist_del_rcu() and trigger
a LIST_POISON2 dereference.
Since we don't actually need to stop/start a swevent to re-programm
the hardware (lack of hardware to program), simply nop out these
callbacks for the swevent pmu.
Reported-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1275557609.27810.35218.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 858f56fa2432..31d6afe92594 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -4055,13 +4055,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | |||
4055 | } | 4055 | } |
4056 | } | 4056 | } |
4057 | 4057 | ||
4058 | static void perf_swevent_unthrottle(struct perf_event *event) | ||
4059 | { | ||
4060 | /* | ||
4061 | * Nothing to do, we already reset hwc->interrupts. | ||
4062 | */ | ||
4063 | } | ||
4064 | |||
4065 | static void perf_swevent_add(struct perf_event *event, u64 nr, | 4058 | static void perf_swevent_add(struct perf_event *event, u64 nr, |
4066 | int nmi, struct perf_sample_data *data, | 4059 | int nmi, struct perf_sample_data *data, |
4067 | struct pt_regs *regs) | 4060 | struct pt_regs *regs) |
@@ -4276,11 +4269,22 @@ static void perf_swevent_disable(struct perf_event *event) | |||
4276 | hlist_del_rcu(&event->hlist_entry); | 4269 | hlist_del_rcu(&event->hlist_entry); |
4277 | } | 4270 | } |
4278 | 4271 | ||
4272 | static void perf_swevent_void(struct perf_event *event) | ||
4273 | { | ||
4274 | } | ||
4275 | |||
4276 | static int perf_swevent_int(struct perf_event *event) | ||
4277 | { | ||
4278 | return 0; | ||
4279 | } | ||
4280 | |||
4279 | static const struct pmu perf_ops_generic = { | 4281 | static const struct pmu perf_ops_generic = { |
4280 | .enable = perf_swevent_enable, | 4282 | .enable = perf_swevent_enable, |
4281 | .disable = perf_swevent_disable, | 4283 | .disable = perf_swevent_disable, |
4284 | .start = perf_swevent_int, | ||
4285 | .stop = perf_swevent_void, | ||
4282 | .read = perf_swevent_read, | 4286 | .read = perf_swevent_read, |
4283 | .unthrottle = perf_swevent_unthrottle, | 4287 | .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */ |
4284 | }; | 4288 | }; |
4285 | 4289 | ||
4286 | /* | 4290 | /* |
@@ -4561,8 +4565,10 @@ static int swevent_hlist_get(struct perf_event *event) | |||
4561 | static const struct pmu perf_ops_tracepoint = { | 4565 | static const struct pmu perf_ops_tracepoint = { |
4562 | .enable = perf_trace_enable, | 4566 | .enable = perf_trace_enable, |
4563 | .disable = perf_trace_disable, | 4567 | .disable = perf_trace_disable, |
4568 | .start = perf_swevent_int, | ||
4569 | .stop = perf_swevent_void, | ||
4564 | .read = perf_swevent_read, | 4570 | .read = perf_swevent_read, |
4565 | .unthrottle = perf_swevent_unthrottle, | 4571 | .unthrottle = perf_swevent_void, |
4566 | }; | 4572 | }; |
4567 | 4573 | ||
4568 | static int perf_tp_filter_match(struct perf_event *event, | 4574 | static int perf_tp_filter_match(struct perf_event *event, |