diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-03-12 04:20:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-12 04:20:59 -0500 |
commit | 937779db13fb6cb621e28d9ae0a6cf1d05b57d05 (patch) | |
tree | 6c27402677c347c4dc01980de78c270630588847 /kernel/perf_event.c | |
parent | 6230f2c7ef01a69e2ba9370326572c287209d32a (diff) | |
parent | 9f591fd76afdc0e5192e9ed00a36f8efc0b4dfe6 (diff) |
Merge branch 'perf/urgent' into perf/core
Merge reason: We want to queue up a dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3853d49c7d56..8bf61273c58b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2790,6 +2790,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2790 | return NULL; | 2790 | return NULL; |
2791 | } | 2791 | } |
2792 | 2792 | ||
2793 | __weak | ||
2794 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | ||
2795 | { | ||
2796 | } | ||
2797 | |||
2793 | /* | 2798 | /* |
2794 | * Output | 2799 | * Output |
2795 | */ | 2800 | */ |
@@ -4317,9 +4322,8 @@ static const struct pmu perf_ops_task_clock = { | |||
4317 | #ifdef CONFIG_EVENT_TRACING | 4322 | #ifdef CONFIG_EVENT_TRACING |
4318 | 4323 | ||
4319 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | 4324 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
4320 | int entry_size) | 4325 | int entry_size, struct pt_regs *regs) |
4321 | { | 4326 | { |
4322 | struct pt_regs *regs = get_irq_regs(); | ||
4323 | struct perf_sample_data data; | 4327 | struct perf_sample_data data; |
4324 | struct perf_raw_record raw = { | 4328 | struct perf_raw_record raw = { |
4325 | .size = entry_size, | 4329 | .size = entry_size, |
@@ -4329,12 +4333,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | |||
4329 | perf_sample_data_init(&data, addr); | 4333 | perf_sample_data_init(&data, addr); |
4330 | data.raw = &raw; | 4334 | data.raw = &raw; |
4331 | 4335 | ||
4332 | if (!regs) | ||
4333 | regs = task_pt_regs(current); | ||
4334 | |||
4335 | /* Trace events already protected against recursion */ | 4336 | /* Trace events already protected against recursion */ |
4336 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | 4337 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, |
4337 | &data, regs); | 4338 | &data, regs); |
4338 | } | 4339 | } |
4339 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4340 | EXPORT_SYMBOL_GPL(perf_tp_event); |
4340 | 4341 | ||
@@ -4350,7 +4351,7 @@ static int perf_tp_event_match(struct perf_event *event, | |||
4350 | 4351 | ||
4351 | static void tp_perf_event_destroy(struct perf_event *event) | 4352 | static void tp_perf_event_destroy(struct perf_event *event) |
4352 | { | 4353 | { |
4353 | ftrace_profile_disable(event->attr.config); | 4354 | perf_trace_disable(event->attr.config); |
4354 | } | 4355 | } |
4355 | 4356 | ||
4356 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4357 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
@@ -4364,7 +4365,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) | |||
4364 | !capable(CAP_SYS_ADMIN)) | 4365 | !capable(CAP_SYS_ADMIN)) |
4365 | return ERR_PTR(-EPERM); | 4366 | return ERR_PTR(-EPERM); |
4366 | 4367 | ||
4367 | if (ftrace_profile_enable(event->attr.config)) | 4368 | if (perf_trace_enable(event->attr.config)) |
4368 | return NULL; | 4369 | return NULL; |
4369 | 4370 | ||
4370 | event->destroy = tp_perf_event_destroy; | 4371 | event->destroy = tp_perf_event_destroy; |
@@ -5371,12 +5372,22 @@ int perf_event_init_task(struct task_struct *child) | |||
5371 | return ret; | 5372 | return ret; |
5372 | } | 5373 | } |
5373 | 5374 | ||
5375 | static void __init perf_event_init_all_cpus(void) | ||
5376 | { | ||
5377 | int cpu; | ||
5378 | struct perf_cpu_context *cpuctx; | ||
5379 | |||
5380 | for_each_possible_cpu(cpu) { | ||
5381 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
5382 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
5383 | } | ||
5384 | } | ||
5385 | |||
5374 | static void __cpuinit perf_event_init_cpu(int cpu) | 5386 | static void __cpuinit perf_event_init_cpu(int cpu) |
5375 | { | 5387 | { |
5376 | struct perf_cpu_context *cpuctx; | 5388 | struct perf_cpu_context *cpuctx; |
5377 | 5389 | ||
5378 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 5390 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
5379 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
5380 | 5391 | ||
5381 | spin_lock(&perf_resource_lock); | 5392 | spin_lock(&perf_resource_lock); |
5382 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; | 5393 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; |
@@ -5442,6 +5453,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { | |||
5442 | 5453 | ||
5443 | void __init perf_event_init(void) | 5454 | void __init perf_event_init(void) |
5444 | { | 5455 | { |
5456 | perf_event_init_all_cpus(); | ||
5445 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 5457 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
5446 | (void *)(long)smp_processor_id()); | 5458 | (void *)(long)smp_processor_id()); |
5447 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | 5459 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, |