diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 41 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b902e587a3a0..2f600e30dcf0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -584,6 +584,7 @@ struct rq { | |||
584 | struct load_weight load; | 584 | struct load_weight load; |
585 | unsigned long nr_load_updates; | 585 | unsigned long nr_load_updates; |
586 | u64 nr_switches; | 586 | u64 nr_switches; |
587 | u64 nr_migrations_in; | ||
587 | 588 | ||
588 | struct cfs_rq cfs; | 589 | struct cfs_rq cfs; |
589 | struct rt_rq rt; | 590 | struct rt_rq rt; |
@@ -692,7 +693,7 @@ static inline int cpu_of(struct rq *rq) | |||
692 | #define task_rq(p) cpu_rq(task_cpu(p)) | 693 | #define task_rq(p) cpu_rq(task_cpu(p)) |
693 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 694 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
694 | 695 | ||
695 | static inline void update_rq_clock(struct rq *rq) | 696 | inline void update_rq_clock(struct rq *rq) |
696 | { | 697 | { |
697 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 698 | rq->clock = sched_clock_cpu(cpu_of(rq)); |
698 | } | 699 | } |
@@ -1967,12 +1968,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1967 | p->se.sleep_start -= clock_offset; | 1968 | p->se.sleep_start -= clock_offset; |
1968 | if (p->se.block_start) | 1969 | if (p->se.block_start) |
1969 | p->se.block_start -= clock_offset; | 1970 | p->se.block_start -= clock_offset; |
1971 | #endif | ||
1970 | if (old_cpu != new_cpu) { | 1972 | if (old_cpu != new_cpu) { |
1971 | schedstat_inc(p, se.nr_migrations); | 1973 | p->se.nr_migrations++; |
1974 | new_rq->nr_migrations_in++; | ||
1975 | #ifdef CONFIG_SCHEDSTATS | ||
1972 | if (task_hot(p, old_rq->clock, NULL)) | 1976 | if (task_hot(p, old_rq->clock, NULL)) |
1973 | schedstat_inc(p, se.nr_forced2_migrations); | 1977 | schedstat_inc(p, se.nr_forced2_migrations); |
1974 | } | ||
1975 | #endif | 1978 | #endif |
1979 | } | ||
1976 | p->se.vruntime -= old_cfsrq->min_vruntime - | 1980 | p->se.vruntime -= old_cfsrq->min_vruntime - |
1977 | new_cfsrq->min_vruntime; | 1981 | new_cfsrq->min_vruntime; |
1978 | 1982 | ||
@@ -2324,6 +2328,27 @@ static int sched_balance_self(int cpu, int flag) | |||
2324 | 2328 | ||
2325 | #endif /* CONFIG_SMP */ | 2329 | #endif /* CONFIG_SMP */ |
2326 | 2330 | ||
2331 | /** | ||
2332 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2333 | * @p: the task to evaluate | ||
2334 | * @func: the function to be called | ||
2335 | * @info: the function call argument | ||
2336 | * | ||
2337 | * Calls the function @func when the task is currently running. This might | ||
2338 | * be on the current CPU, which just calls the function directly | ||
2339 | */ | ||
2340 | void task_oncpu_function_call(struct task_struct *p, | ||
2341 | void (*func) (void *info), void *info) | ||
2342 | { | ||
2343 | int cpu; | ||
2344 | |||
2345 | preempt_disable(); | ||
2346 | cpu = task_cpu(p); | ||
2347 | if (task_curr(p)) | ||
2348 | smp_call_function_single(cpu, func, info, 1); | ||
2349 | preempt_enable(); | ||
2350 | } | ||
2351 | |||
2327 | /*** | 2352 | /*** |
2328 | * try_to_wake_up - wake up a thread | 2353 | * try_to_wake_up - wake up a thread |
2329 | * @p: the to-be-woken-up thread | 2354 | * @p: the to-be-woken-up thread |
@@ -2480,6 +2505,7 @@ static void __sched_fork(struct task_struct *p) | |||
2480 | p->se.exec_start = 0; | 2505 | p->se.exec_start = 0; |
2481 | p->se.sum_exec_runtime = 0; | 2506 | p->se.sum_exec_runtime = 0; |
2482 | p->se.prev_sum_exec_runtime = 0; | 2507 | p->se.prev_sum_exec_runtime = 0; |
2508 | p->se.nr_migrations = 0; | ||
2483 | p->se.last_wakeup = 0; | 2509 | p->se.last_wakeup = 0; |
2484 | p->se.avg_overlap = 0; | 2510 | p->se.avg_overlap = 0; |
2485 | p->se.start_runtime = 0; | 2511 | p->se.start_runtime = 0; |
@@ -2710,6 +2736,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2710 | */ | 2736 | */ |
2711 | prev_state = prev->state; | 2737 | prev_state = prev->state; |
2712 | finish_arch_switch(prev); | 2738 | finish_arch_switch(prev); |
2739 | perf_counter_task_sched_in(current, cpu_of(rq)); | ||
2713 | finish_lock_switch(rq, prev); | 2740 | finish_lock_switch(rq, prev); |
2714 | #ifdef CONFIG_SMP | 2741 | #ifdef CONFIG_SMP |
2715 | if (post_schedule) | 2742 | if (post_schedule) |
@@ -2872,6 +2899,15 @@ unsigned long nr_active(void) | |||
2872 | } | 2899 | } |
2873 | 2900 | ||
2874 | /* | 2901 | /* |
2902 | * Externally visible per-cpu scheduler statistics: | ||
2903 | * cpu_nr_migrations(cpu) - number of migrations into that cpu | ||
2904 | */ | ||
2905 | u64 cpu_nr_migrations(int cpu) | ||
2906 | { | ||
2907 | return cpu_rq(cpu)->nr_migrations_in; | ||
2908 | } | ||
2909 | |||
2910 | /* | ||
2875 | * Update rq->cpu_load[] statistics. This function is usually called every | 2911 | * Update rq->cpu_load[] statistics. This function is usually called every |
2876 | * scheduler tick (TICK_NSEC). | 2912 | * scheduler tick (TICK_NSEC). |
2877 | */ | 2913 | */ |
@@ -4838,6 +4874,7 @@ void scheduler_tick(void) | |||
4838 | update_rq_clock(rq); | 4874 | update_rq_clock(rq); |
4839 | update_cpu_load(rq); | 4875 | update_cpu_load(rq); |
4840 | curr->sched_class->task_tick(rq, curr, 0); | 4876 | curr->sched_class->task_tick(rq, curr, 0); |
4877 | perf_counter_task_tick(curr, cpu); | ||
4841 | spin_unlock(&rq->lock); | 4878 | spin_unlock(&rq->lock); |
4842 | 4879 | ||
4843 | #ifdef CONFIG_SMP | 4880 | #ifdef CONFIG_SMP |
@@ -5053,6 +5090,7 @@ need_resched_nonpreemptible: | |||
5053 | 5090 | ||
5054 | if (likely(prev != next)) { | 5091 | if (likely(prev != next)) { |
5055 | sched_info_switch(prev, next); | 5092 | sched_info_switch(prev, next); |
5093 | perf_counter_task_sched_out(prev, cpu); | ||
5056 | 5094 | ||
5057 | rq->nr_switches++; | 5095 | rq->nr_switches++; |
5058 | rq->curr = next; | 5096 | rq->curr = next; |