aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c3
-rw-r--r--kernel/sched.c3
2 files changed, 4 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index c10055416dea..2f410ea2cb39 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2559,7 +2559,8 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
2559} 2559}
2560 2560
2561/* 2561/*
2562 * 2562 * Log irq_period changes so that analyzing tools can re-normalize the
2563 * event flow.
2563 */ 2564 */
2564 2565
2565static void perf_log_period(struct perf_counter *counter, u64 period) 2566static void perf_log_period(struct perf_counter *counter, u64 period)
diff --git a/kernel/sched.c b/kernel/sched.c
index 4c0d58bce6b2..ad079f07c9c8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4875,9 +4875,10 @@ void scheduler_tick(void)
4875 update_rq_clock(rq); 4875 update_rq_clock(rq);
4876 update_cpu_load(rq); 4876 update_cpu_load(rq);
4877 curr->sched_class->task_tick(rq, curr, 0); 4877 curr->sched_class->task_tick(rq, curr, 0);
4878 perf_counter_task_tick(curr, cpu);
4879 spin_unlock(&rq->lock); 4878 spin_unlock(&rq->lock);
4880 4879
4880 perf_counter_task_tick(curr, cpu);
4881
4881#ifdef CONFIG_SMP 4882#ifdef CONFIG_SMP
4882 rq->idle_at_tick = idle_cpu(cpu); 4883 rq->idle_at_tick = idle_cpu(cpu);
4883 trigger_load_balance(rq, cpu); 4884 trigger_load_balance(rq, cpu);