aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:15:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:15:07 -0400
commit43c1266ce4dc06bfd236cec31e11e9ecd69c0bef (patch)
tree40a86739ca4c36200f447f655b01c57cfe646e26 /kernel/sched.c
parentb8c7f1dc5ca4e0d10709182233cdab932cef593d (diff)
parent57c0c15b5244320065374ad2c54f4fbec77a6428 (diff)
Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Tidy up after the big rename perf: Do the big rename: Performance Counters -> Performance Events perf_counter: Rename 'event' to event_id/hw_event perf_counter: Rename list_entry -> group_entry, counter_list -> group_list Manually resolved some fairly trivial conflicts with the tracing tree in include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 830967e18285..91843ba7f237 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2053 if (task_hot(p, old_rq->clock, NULL)) 2053 if (task_hot(p, old_rq->clock, NULL))
2054 schedstat_inc(p, se.nr_forced2_migrations); 2054 schedstat_inc(p, se.nr_forced2_migrations);
2055#endif 2055#endif
2056 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2056 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2057 1, 1, NULL, 0); 2057 1, 1, NULL, 0);
2058 } 2058 }
2059 p->se.vruntime -= old_cfsrq->min_vruntime - 2059 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2718,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2718 */ 2718 */
2719 prev_state = prev->state; 2719 prev_state = prev->state;
2720 finish_arch_switch(prev); 2720 finish_arch_switch(prev);
2721 perf_counter_task_sched_in(current, cpu_of(rq)); 2721 perf_event_task_sched_in(current, cpu_of(rq));
2722 finish_lock_switch(rq, prev); 2722 finish_lock_switch(rq, prev);
2723 2723
2724 fire_sched_in_preempt_notifiers(current); 2724 fire_sched_in_preempt_notifiers(current);
@@ -5193,7 +5193,7 @@ void scheduler_tick(void)
5193 curr->sched_class->task_tick(rq, curr, 0); 5193 curr->sched_class->task_tick(rq, curr, 0);
5194 spin_unlock(&rq->lock); 5194 spin_unlock(&rq->lock);
5195 5195
5196 perf_counter_task_tick(curr, cpu); 5196 perf_event_task_tick(curr, cpu);
5197 5197
5198#ifdef CONFIG_SMP 5198#ifdef CONFIG_SMP
5199 rq->idle_at_tick = idle_cpu(cpu); 5199 rq->idle_at_tick = idle_cpu(cpu);
@@ -5409,7 +5409,7 @@ need_resched_nonpreemptible:
5409 5409
5410 if (likely(prev != next)) { 5410 if (likely(prev != next)) {
5411 sched_info_switch(prev, next); 5411 sched_info_switch(prev, next);
5412 perf_counter_task_sched_out(prev, next, cpu); 5412 perf_event_task_sched_out(prev, next, cpu);
5413 5413
5414 rq->nr_switches++; 5414 rq->nr_switches++;
5415 rq->curr = next; 5415 rq->curr = next;
@@ -7671,7 +7671,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7671/* 7671/*
7672 * Register at high priority so that task migration (migrate_all_tasks) 7672 * Register at high priority so that task migration (migrate_all_tasks)
7673 * happens before everything else. This has to be lower priority than 7673 * happens before everything else. This has to be lower priority than
7674 * the notifier in the perf_counter subsystem, though. 7674 * the notifier in the perf_event subsystem, though.
7675 */ 7675 */
7676static struct notifier_block __cpuinitdata migration_notifier = { 7676static struct notifier_block __cpuinitdata migration_notifier = {
7677 .notifier_call = migration_call, 7677 .notifier_call = migration_call,
@@ -9528,7 +9528,7 @@ void __init sched_init(void)
9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9529#endif /* SMP */ 9529#endif /* SMP */
9530 9530
9531 perf_counter_init(); 9531 perf_event_init();
9532 9532
9533 scheduler_running = 1; 9533 scheduler_running = 1;
9534} 9534}