aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index faf4d463bbff..291c8d213d13 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -2059,7 +2059,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2059 if (task_hot(p, old_rq->clock, NULL)) 2059 if (task_hot(p, old_rq->clock, NULL))
2060 schedstat_inc(p, se.nr_forced2_migrations); 2060 schedstat_inc(p, se.nr_forced2_migrations);
2061#endif 2061#endif
2062 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2062 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2063 1, 1, NULL, 0); 2063 1, 1, NULL, 0);
2064 } 2064 }
2065 p->se.vruntime -= old_cfsrq->min_vruntime - 2065 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2724,7 +2724,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2724 */ 2724 */
2725 prev_state = prev->state; 2725 prev_state = prev->state;
2726 finish_arch_switch(prev); 2726 finish_arch_switch(prev);
2727 perf_counter_task_sched_in(current, cpu_of(rq)); 2727 perf_event_task_sched_in(current, cpu_of(rq));
2728 finish_lock_switch(rq, prev); 2728 finish_lock_switch(rq, prev);
2729 2729
2730 fire_sched_in_preempt_notifiers(current); 2730 fire_sched_in_preempt_notifiers(current);
@@ -5199,7 +5199,7 @@ void scheduler_tick(void)
5199 curr->sched_class->task_tick(rq, curr, 0); 5199 curr->sched_class->task_tick(rq, curr, 0);
5200 spin_unlock(&rq->lock); 5200 spin_unlock(&rq->lock);
5201 5201
5202 perf_counter_task_tick(curr, cpu); 5202 perf_event_task_tick(curr, cpu);
5203 5203
5204#ifdef CONFIG_SMP 5204#ifdef CONFIG_SMP
5205 rq->idle_at_tick = idle_cpu(cpu); 5205 rq->idle_at_tick = idle_cpu(cpu);
@@ -5415,7 +5415,7 @@ need_resched_nonpreemptible:
5415 5415
5416 if (likely(prev != next)) { 5416 if (likely(prev != next)) {
5417 sched_info_switch(prev, next); 5417 sched_info_switch(prev, next);
5418 perf_counter_task_sched_out(prev, next, cpu); 5418 perf_event_task_sched_out(prev, next, cpu);
5419 5419
5420 rq->nr_switches++; 5420 rq->nr_switches++;
5421 rq->curr = next; 5421 rq->curr = next;
@@ -7692,7 +7692,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7692/* 7692/*
7693 * Register at high priority so that task migration (migrate_all_tasks) 7693 * Register at high priority so that task migration (migrate_all_tasks)
7694 * happens before everything else. This has to be lower priority than 7694 * happens before everything else. This has to be lower priority than
7695 * the notifier in the perf_counter subsystem, though. 7695 * the notifier in the perf_event subsystem, though.
7696 */ 7696 */
7697static struct notifier_block __cpuinitdata migration_notifier = { 7697static struct notifier_block __cpuinitdata migration_notifier = {
7698 .notifier_call = migration_call, 7698 .notifier_call = migration_call,
@@ -9549,7 +9549,7 @@ void __init sched_init(void)
9549 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9549 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9550#endif /* SMP */ 9550#endif /* SMP */
9551 9551
9552 perf_counter_init(); 9552 perf_event_init();
9553 9553
9554 scheduler_running = 1; 9554 scheduler_running = 1;
9555} 9555}