aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 574ee58a3046..2f3fbf84215a 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -15,6 +15,7 @@
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/file.h> 16#include <linux/file.h>
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/slab.h>
18#include <linux/sysfs.h> 19#include <linux/sysfs.h>
19#include <linux/dcache.h> 20#include <linux/dcache.h>
20#include <linux/percpu.h> 21#include <linux/percpu.h>
@@ -1164,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task,
1164 struct perf_event_context *ctx = task->perf_event_ctxp; 1165 struct perf_event_context *ctx = task->perf_event_ctxp;
1165 struct perf_event_context *next_ctx; 1166 struct perf_event_context *next_ctx;
1166 struct perf_event_context *parent; 1167 struct perf_event_context *parent;
1167 struct pt_regs *regs;
1168 int do_switch = 1; 1168 int do_switch = 1;
1169 1169
1170 regs = task_pt_regs(task); 1170 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1171 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1172 1171
1173 if (likely(!ctx || !cpuctx->task_ctx)) 1172 if (likely(!ctx || !cpuctx->task_ctx))
1174 return; 1173 return;
@@ -2786,12 +2785,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2786 return NULL; 2785 return NULL;
2787} 2786}
2788 2787
2789#ifdef CONFIG_EVENT_TRACING
2790__weak 2788__weak
2791void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 2789void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2792{ 2790{
2793} 2791}
2794#endif 2792
2795 2793
2796/* 2794/*
2797 * Output 2795 * Output
@@ -3378,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event,
3378 struct perf_task_event *task_event) 3376 struct perf_task_event *task_event)
3379{ 3377{
3380 struct perf_output_handle handle; 3378 struct perf_output_handle handle;
3381 int size;
3382 struct task_struct *task = task_event->task; 3379 struct task_struct *task = task_event->task;
3383 int ret; 3380 unsigned long flags;
3381 int size, ret;
3382
3383 /*
3384 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3385 * in perf_output_lock() from interrupt context, it's game over.
3386 */
3387 local_irq_save(flags);
3384 3388
3385 size = task_event->event_id.header.size; 3389 size = task_event->event_id.header.size;
3386 ret = perf_output_begin(&handle, event, size, 0, 0); 3390 ret = perf_output_begin(&handle, event, size, 0, 0);
3387 3391
3388 if (ret) 3392 if (ret) {
3393 local_irq_restore(flags);
3389 return; 3394 return;
3395 }
3390 3396
3391 task_event->event_id.pid = perf_event_pid(event, task); 3397 task_event->event_id.pid = perf_event_pid(event, task);
3392 task_event->event_id.ppid = perf_event_pid(event, current); 3398 task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3397,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event,
3397 perf_output_put(&handle, task_event->event_id); 3403 perf_output_put(&handle, task_event->event_id);
3398 3404
3399 perf_output_end(&handle); 3405 perf_output_end(&handle);
3406 local_irq_restore(flags);
3400} 3407}
3401 3408
3402static int perf_event_task_match(struct perf_event *event) 3409static int perf_event_task_match(struct perf_event *event)