diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 76 |
1 files changed, 73 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a8..40d70d9c0af3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -668,7 +668,7 @@ static inline int cpu_of(struct rq *rq) | |||
668 | #define task_rq(p) cpu_rq(task_cpu(p)) | 668 | #define task_rq(p) cpu_rq(task_cpu(p)) |
669 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 669 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
670 | 670 | ||
671 | static inline void update_rq_clock(struct rq *rq) | 671 | inline void update_rq_clock(struct rq *rq) |
672 | { | 672 | { |
673 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 673 | rq->clock = sched_clock_cpu(cpu_of(rq)); |
674 | } | 674 | } |
@@ -979,6 +979,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
979 | } | 979 | } |
980 | } | 980 | } |
981 | 981 | ||
982 | void curr_rq_lock_irq_save(unsigned long *flags) | ||
983 | __acquires(rq->lock) | ||
984 | { | ||
985 | struct rq *rq; | ||
986 | |||
987 | local_irq_save(*flags); | ||
988 | rq = cpu_rq(smp_processor_id()); | ||
989 | spin_lock(&rq->lock); | ||
990 | } | ||
991 | |||
992 | void curr_rq_unlock_irq_restore(unsigned long *flags) | ||
993 | __releases(rq->lock) | ||
994 | { | ||
995 | struct rq *rq; | ||
996 | |||
997 | rq = cpu_rq(smp_processor_id()); | ||
998 | spin_unlock(&rq->lock); | ||
999 | local_irq_restore(*flags); | ||
1000 | } | ||
1001 | |||
982 | void task_rq_unlock_wait(struct task_struct *p) | 1002 | void task_rq_unlock_wait(struct task_struct *p) |
983 | { | 1003 | { |
984 | struct rq *rq = task_rq(p); | 1004 | struct rq *rq = task_rq(p); |
@@ -1885,12 +1905,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1885 | p->se.sleep_start -= clock_offset; | 1905 | p->se.sleep_start -= clock_offset; |
1886 | if (p->se.block_start) | 1906 | if (p->se.block_start) |
1887 | p->se.block_start -= clock_offset; | 1907 | p->se.block_start -= clock_offset; |
1908 | #endif | ||
1888 | if (old_cpu != new_cpu) { | 1909 | if (old_cpu != new_cpu) { |
1889 | schedstat_inc(p, se.nr_migrations); | 1910 | p->se.nr_migrations++; |
1911 | #ifdef CONFIG_SCHEDSTATS | ||
1890 | if (task_hot(p, old_rq->clock, NULL)) | 1912 | if (task_hot(p, old_rq->clock, NULL)) |
1891 | schedstat_inc(p, se.nr_forced2_migrations); | 1913 | schedstat_inc(p, se.nr_forced2_migrations); |
1892 | } | ||
1893 | #endif | 1914 | #endif |
1915 | } | ||
1894 | p->se.vruntime -= old_cfsrq->min_vruntime - | 1916 | p->se.vruntime -= old_cfsrq->min_vruntime - |
1895 | new_cfsrq->min_vruntime; | 1917 | new_cfsrq->min_vruntime; |
1896 | 1918 | ||
@@ -2242,6 +2264,27 @@ static int sched_balance_self(int cpu, int flag) | |||
2242 | 2264 | ||
2243 | #endif /* CONFIG_SMP */ | 2265 | #endif /* CONFIG_SMP */ |
2244 | 2266 | ||
2267 | /** | ||
2268 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2269 | * @p: the task to evaluate | ||
2270 | * @func: the function to be called | ||
2271 | * @info: the function call argument | ||
2272 | * | ||
2273 | * Calls the function @func when the task is currently running. This might | ||
2274 | * be on the current CPU, which just calls the function directly | ||
2275 | */ | ||
2276 | void task_oncpu_function_call(struct task_struct *p, | ||
2277 | void (*func) (void *info), void *info) | ||
2278 | { | ||
2279 | int cpu; | ||
2280 | |||
2281 | preempt_disable(); | ||
2282 | cpu = task_cpu(p); | ||
2283 | if (task_curr(p)) | ||
2284 | smp_call_function_single(cpu, func, info, 1); | ||
2285 | preempt_enable(); | ||
2286 | } | ||
2287 | |||
2245 | /*** | 2288 | /*** |
2246 | * try_to_wake_up - wake up a thread | 2289 | * try_to_wake_up - wake up a thread |
2247 | * @p: the to-be-woken-up thread | 2290 | * @p: the to-be-woken-up thread |
@@ -2384,6 +2427,7 @@ static void __sched_fork(struct task_struct *p) | |||
2384 | p->se.exec_start = 0; | 2427 | p->se.exec_start = 0; |
2385 | p->se.sum_exec_runtime = 0; | 2428 | p->se.sum_exec_runtime = 0; |
2386 | p->se.prev_sum_exec_runtime = 0; | 2429 | p->se.prev_sum_exec_runtime = 0; |
2430 | p->se.nr_migrations = 0; | ||
2387 | p->se.last_wakeup = 0; | 2431 | p->se.last_wakeup = 0; |
2388 | p->se.avg_overlap = 0; | 2432 | p->se.avg_overlap = 0; |
2389 | 2433 | ||
@@ -2604,6 +2648,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2604 | */ | 2648 | */ |
2605 | prev_state = prev->state; | 2649 | prev_state = prev->state; |
2606 | finish_arch_switch(prev); | 2650 | finish_arch_switch(prev); |
2651 | perf_counter_task_sched_in(current, cpu_of(rq)); | ||
2607 | finish_lock_switch(rq, prev); | 2652 | finish_lock_switch(rq, prev); |
2608 | #ifdef CONFIG_SMP | 2653 | #ifdef CONFIG_SMP |
2609 | if (current->sched_class->post_schedule) | 2654 | if (current->sched_class->post_schedule) |
@@ -4132,6 +4177,29 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
4132 | * Return any ns on the sched_clock that have not yet been banked in | 4177 | * Return any ns on the sched_clock that have not yet been banked in |
4133 | * @p in case that task is currently running. | 4178 | * @p in case that task is currently running. |
4134 | */ | 4179 | */ |
4180 | unsigned long long __task_delta_exec(struct task_struct *p, int update) | ||
4181 | { | ||
4182 | s64 delta_exec; | ||
4183 | struct rq *rq; | ||
4184 | |||
4185 | rq = task_rq(p); | ||
4186 | WARN_ON_ONCE(!runqueue_is_locked()); | ||
4187 | WARN_ON_ONCE(!task_current(rq, p)); | ||
4188 | |||
4189 | if (update) | ||
4190 | update_rq_clock(rq); | ||
4191 | |||
4192 | delta_exec = rq->clock - p->se.exec_start; | ||
4193 | |||
4194 | WARN_ON_ONCE(delta_exec < 0); | ||
4195 | |||
4196 | return delta_exec; | ||
4197 | } | ||
4198 | |||
4199 | /* | ||
4200 | * Return any ns on the sched_clock that have not yet been banked in | ||
4201 | * @p in case that task is currently running. | ||
4202 | */ | ||
4135 | unsigned long long task_delta_exec(struct task_struct *p) | 4203 | unsigned long long task_delta_exec(struct task_struct *p) |
4136 | { | 4204 | { |
4137 | unsigned long flags; | 4205 | unsigned long flags; |
@@ -4391,6 +4459,7 @@ void scheduler_tick(void) | |||
4391 | update_rq_clock(rq); | 4459 | update_rq_clock(rq); |
4392 | update_cpu_load(rq); | 4460 | update_cpu_load(rq); |
4393 | curr->sched_class->task_tick(rq, curr, 0); | 4461 | curr->sched_class->task_tick(rq, curr, 0); |
4462 | perf_counter_task_tick(curr, cpu); | ||
4394 | spin_unlock(&rq->lock); | 4463 | spin_unlock(&rq->lock); |
4395 | 4464 | ||
4396 | #ifdef CONFIG_SMP | 4465 | #ifdef CONFIG_SMP |
@@ -4586,6 +4655,7 @@ need_resched_nonpreemptible: | |||
4586 | 4655 | ||
4587 | if (likely(prev != next)) { | 4656 | if (likely(prev != next)) { |
4588 | sched_info_switch(prev, next); | 4657 | sched_info_switch(prev, next); |
4658 | perf_counter_task_sched_out(prev, cpu); | ||
4589 | 4659 | ||
4590 | rq->nr_switches++; | 4660 | rq->nr_switches++; |
4591 | rq->curr = next; | 4661 | rq->curr = next; |