diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-04-04 15:13:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-04-04 15:13:10 -0400 |
commit | 8ce42c8b7fdf4fc008a6fc7349beb8f4dd5cb774 (patch) | |
tree | bc05326ed8ade9137e3ce5fb5b1d439dcdce266f /kernel | |
parent | 0121b0c771f929bb5298554b70843ab46280c298 (diff) | |
parent | 6e03bb5ad363fdbe4e1e227cfb78f7978c662e18 (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Always build the powerpc perf_arch_fetch_caller_regs version
perf: Always build the stub perf_arch_fetch_caller_regs version
perf, probe-finder: Build fix on Debian
perf/scripts: Tuple was set from long in both branches in python_process_event()
perf: Fix 'perf sched record' deadlock
perf, x86: Fix callgraphs of 32-bit processes on 64-bit kernels
perf, x86: Fix AMD hotplug & constraint initialization
x86: Move notify_cpu_starting() callback to a later stage
x86,kgdb: Always initialize the hw breakpoint attribute
perf: Use hot regs with software sched switch/migrate events
perf: Correctly align perf event tracing buffer
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 22 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 11 |
2 files changed, 23 insertions, 10 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 574ee58a3046..681af806d76b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1164 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1164 | struct perf_event_context *ctx = task->perf_event_ctxp; |
1165 | struct perf_event_context *next_ctx; | 1165 | struct perf_event_context *next_ctx; |
1166 | struct perf_event_context *parent; | 1166 | struct perf_event_context *parent; |
1167 | struct pt_regs *regs; | ||
1168 | int do_switch = 1; | 1167 | int do_switch = 1; |
1169 | 1168 | ||
1170 | regs = task_pt_regs(task); | 1169 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1171 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1172 | 1170 | ||
1173 | if (likely(!ctx || !cpuctx->task_ctx)) | 1171 | if (likely(!ctx || !cpuctx->task_ctx)) |
1174 | return; | 1172 | return; |
@@ -2786,12 +2784,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2786 | return NULL; | 2784 | return NULL; |
2787 | } | 2785 | } |
2788 | 2786 | ||
2789 | #ifdef CONFIG_EVENT_TRACING | ||
2790 | __weak | 2787 | __weak |
2791 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | 2788 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
2792 | { | 2789 | { |
2793 | } | 2790 | } |
2794 | #endif | 2791 | |
2795 | 2792 | ||
2796 | /* | 2793 | /* |
2797 | * Output | 2794 | * Output |
@@ -3378,15 +3375,23 @@ static void perf_event_task_output(struct perf_event *event, | |||
3378 | struct perf_task_event *task_event) | 3375 | struct perf_task_event *task_event) |
3379 | { | 3376 | { |
3380 | struct perf_output_handle handle; | 3377 | struct perf_output_handle handle; |
3381 | int size; | ||
3382 | struct task_struct *task = task_event->task; | 3378 | struct task_struct *task = task_event->task; |
3383 | int ret; | 3379 | unsigned long flags; |
3380 | int size, ret; | ||
3381 | |||
3382 | /* | ||
3383 | * If this CPU attempts to acquire an rq lock held by a CPU spinning | ||
3384 | * in perf_output_lock() from interrupt context, it's game over. | ||
3385 | */ | ||
3386 | local_irq_save(flags); | ||
3384 | 3387 | ||
3385 | size = task_event->event_id.header.size; | 3388 | size = task_event->event_id.header.size; |
3386 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3389 | ret = perf_output_begin(&handle, event, size, 0, 0); |
3387 | 3390 | ||
3388 | if (ret) | 3391 | if (ret) { |
3392 | local_irq_restore(flags); | ||
3389 | return; | 3393 | return; |
3394 | } | ||
3390 | 3395 | ||
3391 | task_event->event_id.pid = perf_event_pid(event, task); | 3396 | task_event->event_id.pid = perf_event_pid(event, task); |
3392 | task_event->event_id.ppid = perf_event_pid(event, current); | 3397 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3397,6 +3402,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
3397 | perf_output_put(&handle, task_event->event_id); | 3402 | perf_output_put(&handle, task_event->event_id); |
3398 | 3403 | ||
3399 | perf_output_end(&handle); | 3404 | perf_output_end(&handle); |
3405 | local_irq_restore(flags); | ||
3400 | } | 3406 | } |
3401 | 3407 | ||
3402 | static int perf_event_task_match(struct perf_event *event) | 3408 | static int perf_event_task_match(struct perf_event *event) |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
19 | 19 | ||
20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
22 | * suprises | ||
23 | */ | ||
24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
25 | perf_trace_t; | ||
21 | 26 | ||
22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
23 | static int total_ref_count; | 28 | static int total_ref_count; |
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
131 | int pc, cpu; | 136 | int pc, cpu; |
132 | 137 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
139 | |||
133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
134 | 141 | ||
135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
153 | 160 | ||
154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
156 | 163 | ||
157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |