diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-10-14 11:43:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-10-18 13:58:55 -0400 |
commit | d580ff8699e8811a9af37e9de4dea375401bdeec (patch) | |
tree | f2789d8a8628b856ad902c0bfbb1899e6cc67697 | |
parent | c6be5a5cb62592d9d661899a2aa78236eb00ffa5 (diff) |
perf, hw_breakpoint: Fix crash in hw_breakpoint creation
hw_breakpoint creation needs to account stuff per-task to ensure there
is always sufficient hardware resources to back these things due to
ptrace.
With the perf per pmu context changes the event initialization no
longer has access to the event context, for the simple reason that we
need to first find the pmu (result of initialization) before we can
find the context.
This makes hw_breakpoints unhappy, because it can no longer do per
task accounting, cure this by frobbing a task pointer in the event::hw
bits for now...
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20101014203625.391543667@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_event.h | 7 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 8 | ||||
-rw-r--r-- | kernel/perf_event.c | 23 |
3 files changed, 29 insertions, 9 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2ebfc9ae4755..97965fac55fe 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -536,6 +536,12 @@ struct hw_perf_event { | |||
536 | struct { /* breakpoint */ | 536 | struct { /* breakpoint */ |
537 | struct arch_hw_breakpoint info; | 537 | struct arch_hw_breakpoint info; |
538 | struct list_head bp_list; | 538 | struct list_head bp_list; |
539 | /* | ||
540 | * Crufty hack to avoid the chicken and egg | ||
541 | * problem hw_breakpoint has with context | ||
542 | * creation and event initalization. | ||
543 | */ | ||
544 | struct task_struct *bp_target; | ||
539 | }; | 545 | }; |
540 | #endif | 546 | #endif |
541 | }; | 547 | }; |
@@ -693,6 +699,7 @@ struct swevent_hlist { | |||
693 | 699 | ||
694 | #define PERF_ATTACH_CONTEXT 0x01 | 700 | #define PERF_ATTACH_CONTEXT 0x01 |
695 | #define PERF_ATTACH_GROUP 0x02 | 701 | #define PERF_ATTACH_GROUP 0x02 |
702 | #define PERF_ATTACH_TASK 0x04 | ||
696 | 703 | ||
697 | /** | 704 | /** |
698 | * struct perf_event - performance event kernel representation: | 705 | * struct perf_event - performance event kernel representation: |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 3b714e839c10..2c9120f0afca 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -113,12 +113,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) | |||
113 | */ | 113 | */ |
114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) | 114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) |
115 | { | 115 | { |
116 | struct perf_event_context *ctx = bp->ctx; | 116 | struct task_struct *tsk = bp->hw.bp_target; |
117 | struct perf_event *iter; | 117 | struct perf_event *iter; |
118 | int count = 0; | 118 | int count = 0; |
119 | 119 | ||
120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
121 | if (iter->ctx == ctx && find_slot_idx(iter) == type) | 121 | if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) |
122 | count += hw_breakpoint_weight(iter); | 122 | count += hw_breakpoint_weight(iter); |
123 | } | 123 | } |
124 | 124 | ||
@@ -134,7 +134,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | |||
134 | enum bp_type_idx type) | 134 | enum bp_type_idx type) |
135 | { | 135 | { |
136 | int cpu = bp->cpu; | 136 | int cpu = bp->cpu; |
137 | struct task_struct *tsk = bp->ctx->task; | 137 | struct task_struct *tsk = bp->hw.bp_target; |
138 | 138 | ||
139 | if (cpu >= 0) { | 139 | if (cpu >= 0) { |
140 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); | 140 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); |
@@ -213,7 +213,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | |||
213 | int weight) | 213 | int weight) |
214 | { | 214 | { |
215 | int cpu = bp->cpu; | 215 | int cpu = bp->cpu; |
216 | struct task_struct *tsk = bp->ctx->task; | 216 | struct task_struct *tsk = bp->hw.bp_target; |
217 | 217 | ||
218 | /* Pinned counter cpu profiling */ | 218 | /* Pinned counter cpu profiling */ |
219 | if (!tsk) { | 219 | if (!tsk) { |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b21d06aaef60..856e20baf13f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -5255,9 +5255,10 @@ unlock: | |||
5255 | */ | 5255 | */ |
5256 | static struct perf_event * | 5256 | static struct perf_event * |
5257 | perf_event_alloc(struct perf_event_attr *attr, int cpu, | 5257 | perf_event_alloc(struct perf_event_attr *attr, int cpu, |
5258 | struct perf_event *group_leader, | 5258 | struct task_struct *task, |
5259 | struct perf_event *parent_event, | 5259 | struct perf_event *group_leader, |
5260 | perf_overflow_handler_t overflow_handler) | 5260 | struct perf_event *parent_event, |
5261 | perf_overflow_handler_t overflow_handler) | ||
5261 | { | 5262 | { |
5262 | struct pmu *pmu; | 5263 | struct pmu *pmu; |
5263 | struct perf_event *event; | 5264 | struct perf_event *event; |
@@ -5299,6 +5300,17 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
5299 | 5300 | ||
5300 | event->state = PERF_EVENT_STATE_INACTIVE; | 5301 | event->state = PERF_EVENT_STATE_INACTIVE; |
5301 | 5302 | ||
5303 | if (task) { | ||
5304 | event->attach_state = PERF_ATTACH_TASK; | ||
5305 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
5306 | /* | ||
5307 | * hw_breakpoint is a bit difficult here.. | ||
5308 | */ | ||
5309 | if (attr->type == PERF_TYPE_BREAKPOINT) | ||
5310 | event->hw.bp_target = task; | ||
5311 | #endif | ||
5312 | } | ||
5313 | |||
5302 | if (!overflow_handler && parent_event) | 5314 | if (!overflow_handler && parent_event) |
5303 | overflow_handler = parent_event->overflow_handler; | 5315 | overflow_handler = parent_event->overflow_handler; |
5304 | 5316 | ||
@@ -5559,7 +5571,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5559 | } | 5571 | } |
5560 | } | 5572 | } |
5561 | 5573 | ||
5562 | event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL); | 5574 | event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); |
5563 | if (IS_ERR(event)) { | 5575 | if (IS_ERR(event)) { |
5564 | err = PTR_ERR(event); | 5576 | err = PTR_ERR(event); |
5565 | goto err_task; | 5577 | goto err_task; |
@@ -5728,7 +5740,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
5728 | * Get the target context (task or percpu): | 5740 | * Get the target context (task or percpu): |
5729 | */ | 5741 | */ |
5730 | 5742 | ||
5731 | event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler); | 5743 | event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); |
5732 | if (IS_ERR(event)) { | 5744 | if (IS_ERR(event)) { |
5733 | err = PTR_ERR(event); | 5745 | err = PTR_ERR(event); |
5734 | goto err; | 5746 | goto err; |
@@ -5996,6 +6008,7 @@ inherit_event(struct perf_event *parent_event, | |||
5996 | 6008 | ||
5997 | child_event = perf_event_alloc(&parent_event->attr, | 6009 | child_event = perf_event_alloc(&parent_event->attr, |
5998 | parent_event->cpu, | 6010 | parent_event->cpu, |
6011 | child, | ||
5999 | group_leader, parent_event, | 6012 | group_leader, parent_event, |
6000 | NULL); | 6013 | NULL); |
6001 | if (IS_ERR(child_event)) | 6014 | if (IS_ERR(child_event)) |