aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c57
1 files changed, 51 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 14c447ae5d53..5b3f6ec1b0b3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,6 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h>
42#include <linux/security.h> 43#include <linux/security.h>
43#include <linux/notifier.h> 44#include <linux/notifier.h>
44#include <linux/profile.h> 45#include <linux/profile.h>
@@ -580,6 +581,7 @@ struct rq {
580 struct load_weight load; 581 struct load_weight load;
581 unsigned long nr_load_updates; 582 unsigned long nr_load_updates;
582 u64 nr_switches; 583 u64 nr_switches;
584 u64 nr_migrations_in;
583 585
584 struct cfs_rq cfs; 586 struct cfs_rq cfs;
585 struct rt_rq rt; 587 struct rt_rq rt;
@@ -692,7 +694,7 @@ static inline int cpu_of(struct rq *rq)
692#define task_rq(p) cpu_rq(task_cpu(p)) 694#define task_rq(p) cpu_rq(task_cpu(p))
693#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 695#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
694 696
695static inline void update_rq_clock(struct rq *rq) 697inline void update_rq_clock(struct rq *rq)
696{ 698{
697 rq->clock = sched_clock_cpu(cpu_of(rq)); 699 rq->clock = sched_clock_cpu(cpu_of(rq));
698} 700}
@@ -1969,12 +1971,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1969 p->se.sleep_start -= clock_offset; 1971 p->se.sleep_start -= clock_offset;
1970 if (p->se.block_start) 1972 if (p->se.block_start)
1971 p->se.block_start -= clock_offset; 1973 p->se.block_start -= clock_offset;
1974#endif
1972 if (old_cpu != new_cpu) { 1975 if (old_cpu != new_cpu) {
1973 schedstat_inc(p, se.nr_migrations); 1976 p->se.nr_migrations++;
1977 new_rq->nr_migrations_in++;
1978#ifdef CONFIG_SCHEDSTATS
1974 if (task_hot(p, old_rq->clock, NULL)) 1979 if (task_hot(p, old_rq->clock, NULL))
1975 schedstat_inc(p, se.nr_forced2_migrations); 1980 schedstat_inc(p, se.nr_forced2_migrations);
1976 }
1977#endif 1981#endif
1982 perf_counter_task_migration(p, new_cpu);
1983 }
1978 p->se.vruntime -= old_cfsrq->min_vruntime - 1984 p->se.vruntime -= old_cfsrq->min_vruntime -
1979 new_cfsrq->min_vruntime; 1985 new_cfsrq->min_vruntime;
1980 1986
@@ -2369,6 +2375,27 @@ static int sched_balance_self(int cpu, int flag)
2369 2375
2370#endif /* CONFIG_SMP */ 2376#endif /* CONFIG_SMP */
2371 2377
2378/**
2379 * task_oncpu_function_call - call a function on the cpu on which a task runs
2380 * @p: the task to evaluate
2381 * @func: the function to be called
2382 * @info: the function call argument
2383 *
2384 * Calls the function @func when the task is currently running. This might
2385 * be on the current CPU, which just calls the function directly
2386 */
2387void task_oncpu_function_call(struct task_struct *p,
2388 void (*func) (void *info), void *info)
2389{
2390 int cpu;
2391
2392 preempt_disable();
2393 cpu = task_cpu(p);
2394 if (task_curr(p))
2395 smp_call_function_single(cpu, func, info, 1);
2396 preempt_enable();
2397}
2398
2372/*** 2399/***
2373 * try_to_wake_up - wake up a thread 2400 * try_to_wake_up - wake up a thread
2374 * @p: the to-be-woken-up thread 2401 * @p: the to-be-woken-up thread
@@ -2536,6 +2563,7 @@ static void __sched_fork(struct task_struct *p)
2536 p->se.exec_start = 0; 2563 p->se.exec_start = 0;
2537 p->se.sum_exec_runtime = 0; 2564 p->se.sum_exec_runtime = 0;
2538 p->se.prev_sum_exec_runtime = 0; 2565 p->se.prev_sum_exec_runtime = 0;
2566 p->se.nr_migrations = 0;
2539 p->se.last_wakeup = 0; 2567 p->se.last_wakeup = 0;
2540 p->se.avg_overlap = 0; 2568 p->se.avg_overlap = 0;
2541 p->se.start_runtime = 0; 2569 p->se.start_runtime = 0;
@@ -2766,6 +2794,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2766 */ 2794 */
2767 prev_state = prev->state; 2795 prev_state = prev->state;
2768 finish_arch_switch(prev); 2796 finish_arch_switch(prev);
2797 perf_counter_task_sched_in(current, cpu_of(rq));
2769 finish_lock_switch(rq, prev); 2798 finish_lock_switch(rq, prev);
2770#ifdef CONFIG_SMP 2799#ifdef CONFIG_SMP
2771 if (post_schedule) 2800 if (post_schedule)
@@ -2981,6 +3010,15 @@ static void calc_load_account_active(struct rq *this_rq)
2981} 3010}
2982 3011
2983/* 3012/*
3013 * Externally visible per-cpu scheduler statistics:
3014 * cpu_nr_migrations(cpu) - number of migrations into that cpu
3015 */
3016u64 cpu_nr_migrations(int cpu)
3017{
3018 return cpu_rq(cpu)->nr_migrations_in;
3019}
3020
3021/*
2984 * Update rq->cpu_load[] statistics. This function is usually called every 3022 * Update rq->cpu_load[] statistics. This function is usually called every
2985 * scheduler tick (TICK_NSEC). 3023 * scheduler tick (TICK_NSEC).
2986 */ 3024 */
@@ -5078,6 +5116,8 @@ void scheduler_tick(void)
5078 curr->sched_class->task_tick(rq, curr, 0); 5116 curr->sched_class->task_tick(rq, curr, 0);
5079 spin_unlock(&rq->lock); 5117 spin_unlock(&rq->lock);
5080 5118
5119 perf_counter_task_tick(curr, cpu);
5120
5081#ifdef CONFIG_SMP 5121#ifdef CONFIG_SMP
5082 rq->idle_at_tick = idle_cpu(cpu); 5122 rq->idle_at_tick = idle_cpu(cpu);
5083 trigger_load_balance(rq, cpu); 5123 trigger_load_balance(rq, cpu);
@@ -5293,6 +5333,7 @@ need_resched_nonpreemptible:
5293 5333
5294 if (likely(prev != next)) { 5334 if (likely(prev != next)) {
5295 sched_info_switch(prev, next); 5335 sched_info_switch(prev, next);
5336 perf_counter_task_sched_out(prev, next, cpu);
5296 5337
5297 rq->nr_switches++; 5338 rq->nr_switches++;
5298 rq->curr = next; 5339 rq->curr = next;
@@ -7536,8 +7577,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7536 return NOTIFY_OK; 7577 return NOTIFY_OK;
7537} 7578}
7538 7579
7539/* Register at highest priority so that task migration (migrate_all_tasks) 7580/*
7540 * happens before everything else. 7581 * Register at high priority so that task migration (migrate_all_tasks)
7582 * happens before everything else. This has to be lower priority than
7583 * the notifier in the perf_counter subsystem, though.
7541 */ 7584 */
7542static struct notifier_block __cpuinitdata migration_notifier = { 7585static struct notifier_block __cpuinitdata migration_notifier = {
7543 .notifier_call = migration_call, 7586 .notifier_call = migration_call,
@@ -9218,7 +9261,7 @@ void __init sched_init(void)
9218 * 1024) and two child groups A0 and A1 (of weight 1024 each), 9261 * 1024) and two child groups A0 and A1 (of weight 1024 each),
9219 * then A0's share of the cpu resource is: 9262 * then A0's share of the cpu resource is:
9220 * 9263 *
9221 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 9264 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
9222 * 9265 *
9223 * We achieve this by letting init_task_group's tasks sit 9266 * We achieve this by letting init_task_group's tasks sit
9224 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 9267 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
@@ -9323,6 +9366,8 @@ void __init sched_init(void)
9323 alloc_bootmem_cpumask_var(&cpu_isolated_map); 9366 alloc_bootmem_cpumask_var(&cpu_isolated_map);
9324#endif /* SMP */ 9367#endif /* SMP */
9325 9368
9369 perf_counter_init();
9370
9326 scheduler_running = 1; 9371 scheduler_running = 1;
9327} 9372}
9328 9373