aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 534e20d14d63..f274e1959885 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1503,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1503 */ 1503 */
1504static void __perf_counter_read(void *info) 1504static void __perf_counter_read(void *info)
1505{ 1505{
1506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1506 struct perf_counter *counter = info; 1507 struct perf_counter *counter = info;
1507 struct perf_counter_context *ctx = counter->ctx; 1508 struct perf_counter_context *ctx = counter->ctx;
1508 unsigned long flags; 1509 unsigned long flags;
1509 1510
1511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
1510 local_irq_save(flags); 1521 local_irq_save(flags);
1511 if (ctx->is_active) 1522 if (ctx->is_active)
1512 update_context_time(ctx); 1523 update_context_time(ctx);
@@ -1780,7 +1791,7 @@ static int perf_counter_read_group(struct perf_counter *counter,
1780 size += err; 1791 size += err;
1781 1792
1782 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1793 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1783 err = perf_counter_read_entry(counter, read_format, 1794 err = perf_counter_read_entry(sub, read_format,
1784 buf + size); 1795 buf + size);
1785 if (err < 0) 1796 if (err < 0)
1786 return err; 1797 return err;
@@ -2008,6 +2019,10 @@ int perf_counter_task_disable(void)
2008 return 0; 2019 return 0;
2009} 2020}
2010 2021
2022#ifndef PERF_COUNTER_INDEX_OFFSET
2023# define PERF_COUNTER_INDEX_OFFSET 0
2024#endif
2025
2011static int perf_counter_index(struct perf_counter *counter) 2026static int perf_counter_index(struct perf_counter *counter)
2012{ 2027{
2013 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2028 if (counter->state != PERF_COUNTER_STATE_ACTIVE)