aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 534e20d14d63..36f65e2b8b57 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1503,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1503 */ 1503 */
1504static void __perf_counter_read(void *info) 1504static void __perf_counter_read(void *info)
1505{ 1505{
1506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1506 struct perf_counter *counter = info; 1507 struct perf_counter *counter = info;
1507 struct perf_counter_context *ctx = counter->ctx; 1508 struct perf_counter_context *ctx = counter->ctx;
1508 unsigned long flags; 1509 unsigned long flags;
1509 1510
1511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
1510 local_irq_save(flags); 1521 local_irq_save(flags);
1511 if (ctx->is_active) 1522 if (ctx->is_active)
1512 update_context_time(ctx); 1523 update_context_time(ctx);
@@ -2008,6 +2019,10 @@ int perf_counter_task_disable(void)
2008 return 0; 2019 return 0;
2009} 2020}
2010 2021
2022#ifndef PERF_COUNTER_INDEX_OFFSET
2023# define PERF_COUNTER_INDEX_OFFSET 0
2024#endif
2025
2011static int perf_counter_index(struct perf_counter *counter) 2026static int perf_counter_index(struct perf_counter *counter)
2012{ 2027{
2013 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2028 if (counter->state != PERF_COUNTER_STATE_ACTIVE)