diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 100 |
1 files changed, 91 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8e2558c2ba67..78f4424b7c43 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -558,6 +558,7 @@ struct rq { | |||
558 | struct load_weight load; | 558 | struct load_weight load; |
559 | unsigned long nr_load_updates; | 559 | unsigned long nr_load_updates; |
560 | u64 nr_switches; | 560 | u64 nr_switches; |
561 | u64 nr_migrations_in; | ||
561 | 562 | ||
562 | struct cfs_rq cfs; | 563 | struct cfs_rq cfs; |
563 | struct rt_rq rt; | 564 | struct rt_rq rt; |
@@ -668,7 +669,7 @@ static inline int cpu_of(struct rq *rq) | |||
668 | #define task_rq(p) cpu_rq(task_cpu(p)) | 669 | #define task_rq(p) cpu_rq(task_cpu(p)) |
669 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 670 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
670 | 671 | ||
671 | static inline void update_rq_clock(struct rq *rq) | 672 | inline void update_rq_clock(struct rq *rq) |
672 | { | 673 | { |
673 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 674 | rq->clock = sched_clock_cpu(cpu_of(rq)); |
674 | } | 675 | } |
@@ -979,6 +980,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
979 | } | 980 | } |
980 | } | 981 | } |
981 | 982 | ||
983 | void curr_rq_lock_irq_save(unsigned long *flags) | ||
984 | __acquires(rq->lock) | ||
985 | { | ||
986 | struct rq *rq; | ||
987 | |||
988 | local_irq_save(*flags); | ||
989 | rq = cpu_rq(smp_processor_id()); | ||
990 | spin_lock(&rq->lock); | ||
991 | } | ||
992 | |||
993 | void curr_rq_unlock_irq_restore(unsigned long *flags) | ||
994 | __releases(rq->lock) | ||
995 | { | ||
996 | struct rq *rq; | ||
997 | |||
998 | rq = cpu_rq(smp_processor_id()); | ||
999 | spin_unlock(&rq->lock); | ||
1000 | local_irq_restore(*flags); | ||
1001 | } | ||
1002 | |||
982 | void task_rq_unlock_wait(struct task_struct *p) | 1003 | void task_rq_unlock_wait(struct task_struct *p) |
983 | { | 1004 | { |
984 | struct rq *rq = task_rq(p); | 1005 | struct rq *rq = task_rq(p); |
@@ -1885,12 +1906,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1885 | p->se.sleep_start -= clock_offset; | 1906 | p->se.sleep_start -= clock_offset; |
1886 | if (p->se.block_start) | 1907 | if (p->se.block_start) |
1887 | p->se.block_start -= clock_offset; | 1908 | p->se.block_start -= clock_offset; |
1909 | #endif | ||
1888 | if (old_cpu != new_cpu) { | 1910 | if (old_cpu != new_cpu) { |
1889 | schedstat_inc(p, se.nr_migrations); | 1911 | p->se.nr_migrations++; |
1912 | new_rq->nr_migrations_in++; | ||
1913 | #ifdef CONFIG_SCHEDSTATS | ||
1890 | if (task_hot(p, old_rq->clock, NULL)) | 1914 | if (task_hot(p, old_rq->clock, NULL)) |
1891 | schedstat_inc(p, se.nr_forced2_migrations); | 1915 | schedstat_inc(p, se.nr_forced2_migrations); |
1892 | } | ||
1893 | #endif | 1916 | #endif |
1917 | } | ||
1894 | p->se.vruntime -= old_cfsrq->min_vruntime - | 1918 | p->se.vruntime -= old_cfsrq->min_vruntime - |
1895 | new_cfsrq->min_vruntime; | 1919 | new_cfsrq->min_vruntime; |
1896 | 1920 | ||
@@ -2242,6 +2266,27 @@ static int sched_balance_self(int cpu, int flag) | |||
2242 | 2266 | ||
2243 | #endif /* CONFIG_SMP */ | 2267 | #endif /* CONFIG_SMP */ |
2244 | 2268 | ||
2269 | /** | ||
2270 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2271 | * @p: the task to evaluate | ||
2272 | * @func: the function to be called | ||
2273 | * @info: the function call argument | ||
2274 | * | ||
2275 | * Calls the function @func when the task is currently running. This might | ||
2276 | * be on the current CPU, which just calls the function directly | ||
2277 | */ | ||
2278 | void task_oncpu_function_call(struct task_struct *p, | ||
2279 | void (*func) (void *info), void *info) | ||
2280 | { | ||
2281 | int cpu; | ||
2282 | |||
2283 | preempt_disable(); | ||
2284 | cpu = task_cpu(p); | ||
2285 | if (task_curr(p)) | ||
2286 | smp_call_function_single(cpu, func, info, 1); | ||
2287 | preempt_enable(); | ||
2288 | } | ||
2289 | |||
2245 | /*** | 2290 | /*** |
2246 | * try_to_wake_up - wake up a thread | 2291 | * try_to_wake_up - wake up a thread |
2247 | * @p: the to-be-woken-up thread | 2292 | * @p: the to-be-woken-up thread |
@@ -2384,6 +2429,7 @@ static void __sched_fork(struct task_struct *p) | |||
2384 | p->se.exec_start = 0; | 2429 | p->se.exec_start = 0; |
2385 | p->se.sum_exec_runtime = 0; | 2430 | p->se.sum_exec_runtime = 0; |
2386 | p->se.prev_sum_exec_runtime = 0; | 2431 | p->se.prev_sum_exec_runtime = 0; |
2432 | p->se.nr_migrations = 0; | ||
2387 | p->se.last_wakeup = 0; | 2433 | p->se.last_wakeup = 0; |
2388 | p->se.avg_overlap = 0; | 2434 | p->se.avg_overlap = 0; |
2389 | 2435 | ||
@@ -2604,6 +2650,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2604 | */ | 2650 | */ |
2605 | prev_state = prev->state; | 2651 | prev_state = prev->state; |
2606 | finish_arch_switch(prev); | 2652 | finish_arch_switch(prev); |
2653 | perf_counter_task_sched_in(current, cpu_of(rq)); | ||
2607 | finish_lock_switch(rq, prev); | 2654 | finish_lock_switch(rq, prev); |
2608 | #ifdef CONFIG_SMP | 2655 | #ifdef CONFIG_SMP |
2609 | if (current->sched_class->post_schedule) | 2656 | if (current->sched_class->post_schedule) |
@@ -2766,6 +2813,21 @@ unsigned long nr_active(void) | |||
2766 | } | 2813 | } |
2767 | 2814 | ||
2768 | /* | 2815 | /* |
2816 | * Externally visible per-cpu scheduler statistics: | ||
2817 | * cpu_nr_switches(cpu) - number of context switches on that cpu | ||
2818 | * cpu_nr_migrations(cpu) - number of migrations into that cpu | ||
2819 | */ | ||
2820 | u64 cpu_nr_switches(int cpu) | ||
2821 | { | ||
2822 | return cpu_rq(cpu)->nr_switches; | ||
2823 | } | ||
2824 | |||
2825 | u64 cpu_nr_migrations(int cpu) | ||
2826 | { | ||
2827 | return cpu_rq(cpu)->nr_migrations_in; | ||
2828 | } | ||
2829 | |||
2830 | /* | ||
2769 | * Update rq->cpu_load[] statistics. This function is usually called every | 2831 | * Update rq->cpu_load[] statistics. This function is usually called every |
2770 | * scheduler tick (TICK_NSEC). | 2832 | * scheduler tick (TICK_NSEC). |
2771 | */ | 2833 | */ |
@@ -4137,6 +4199,29 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
4137 | * Return any ns on the sched_clock that have not yet been banked in | 4199 | * Return any ns on the sched_clock that have not yet been banked in |
4138 | * @p in case that task is currently running. | 4200 | * @p in case that task is currently running. |
4139 | */ | 4201 | */ |
4202 | unsigned long long __task_delta_exec(struct task_struct *p, int update) | ||
4203 | { | ||
4204 | s64 delta_exec; | ||
4205 | struct rq *rq; | ||
4206 | |||
4207 | rq = task_rq(p); | ||
4208 | WARN_ON_ONCE(!runqueue_is_locked()); | ||
4209 | WARN_ON_ONCE(!task_current(rq, p)); | ||
4210 | |||
4211 | if (update) | ||
4212 | update_rq_clock(rq); | ||
4213 | |||
4214 | delta_exec = rq->clock - p->se.exec_start; | ||
4215 | |||
4216 | WARN_ON_ONCE(delta_exec < 0); | ||
4217 | |||
4218 | return delta_exec; | ||
4219 | } | ||
4220 | |||
4221 | /* | ||
4222 | * Return any ns on the sched_clock that have not yet been banked in | ||
4223 | * @p in case that task is currently running. | ||
4224 | */ | ||
4140 | unsigned long long task_delta_exec(struct task_struct *p) | 4225 | unsigned long long task_delta_exec(struct task_struct *p) |
4141 | { | 4226 | { |
4142 | unsigned long flags; | 4227 | unsigned long flags; |
@@ -4396,6 +4481,7 @@ void scheduler_tick(void) | |||
4396 | update_rq_clock(rq); | 4481 | update_rq_clock(rq); |
4397 | update_cpu_load(rq); | 4482 | update_cpu_load(rq); |
4398 | curr->sched_class->task_tick(rq, curr, 0); | 4483 | curr->sched_class->task_tick(rq, curr, 0); |
4484 | perf_counter_task_tick(curr, cpu); | ||
4399 | spin_unlock(&rq->lock); | 4485 | spin_unlock(&rq->lock); |
4400 | 4486 | ||
4401 | #ifdef CONFIG_SMP | 4487 | #ifdef CONFIG_SMP |
@@ -4591,6 +4677,7 @@ need_resched_nonpreemptible: | |||
4591 | 4677 | ||
4592 | if (likely(prev != next)) { | 4678 | if (likely(prev != next)) { |
4593 | sched_info_switch(prev, next); | 4679 | sched_info_switch(prev, next); |
4680 | perf_counter_task_sched_out(prev, cpu); | ||
4594 | 4681 | ||
4595 | rq->nr_switches++; | 4682 | rq->nr_switches++; |
4596 | rq->curr = next; | 4683 | rq->curr = next; |
@@ -5944,12 +6031,7 @@ void sched_show_task(struct task_struct *p) | |||
5944 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); | 6031 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
5945 | #endif | 6032 | #endif |
5946 | #ifdef CONFIG_DEBUG_STACK_USAGE | 6033 | #ifdef CONFIG_DEBUG_STACK_USAGE |
5947 | { | 6034 | free = stack_not_used(p); |
5948 | unsigned long *n = end_of_stack(p); | ||
5949 | while (!*n) | ||
5950 | n++; | ||
5951 | free = (unsigned long)n - (unsigned long)end_of_stack(p); | ||
5952 | } | ||
5953 | #endif | 6035 | #endif |
5954 | printk(KERN_CONT "%5lu %5d %6d\n", free, | 6036 | printk(KERN_CONT "%5lu %5d %6d\n", free, |
5955 | task_pid_nr(p), task_pid_nr(p->real_parent)); | 6037 | task_pid_nr(p), task_pid_nr(p->real_parent)); |