diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 76 |
1 files changed, 73 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 748ff924a290..3dfbff5fb1ac 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -658,7 +658,7 @@ static inline int cpu_of(struct rq *rq) | |||
658 | #define task_rq(p) cpu_rq(task_cpu(p)) | 658 | #define task_rq(p) cpu_rq(task_cpu(p)) |
659 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 659 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
660 | 660 | ||
661 | static inline void update_rq_clock(struct rq *rq) | 661 | inline void update_rq_clock(struct rq *rq) |
662 | { | 662 | { |
663 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 663 | rq->clock = sched_clock_cpu(cpu_of(rq)); |
664 | } | 664 | } |
@@ -969,6 +969,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
969 | } | 969 | } |
970 | } | 970 | } |
971 | 971 | ||
972 | void curr_rq_lock_irq_save(unsigned long *flags) | ||
973 | __acquires(rq->lock) | ||
974 | { | ||
975 | struct rq *rq; | ||
976 | |||
977 | local_irq_save(*flags); | ||
978 | rq = cpu_rq(smp_processor_id()); | ||
979 | spin_lock(&rq->lock); | ||
980 | } | ||
981 | |||
982 | void curr_rq_unlock_irq_restore(unsigned long *flags) | ||
983 | __releases(rq->lock) | ||
984 | { | ||
985 | struct rq *rq; | ||
986 | |||
987 | rq = cpu_rq(smp_processor_id()); | ||
988 | spin_unlock(&rq->lock); | ||
989 | local_irq_restore(*flags); | ||
990 | } | ||
991 | |||
972 | void task_rq_unlock_wait(struct task_struct *p) | 992 | void task_rq_unlock_wait(struct task_struct *p) |
973 | { | 993 | { |
974 | struct rq *rq = task_rq(p); | 994 | struct rq *rq = task_rq(p); |
@@ -1876,12 +1896,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1876 | p->se.sleep_start -= clock_offset; | 1896 | p->se.sleep_start -= clock_offset; |
1877 | if (p->se.block_start) | 1897 | if (p->se.block_start) |
1878 | p->se.block_start -= clock_offset; | 1898 | p->se.block_start -= clock_offset; |
1899 | #endif | ||
1879 | if (old_cpu != new_cpu) { | 1900 | if (old_cpu != new_cpu) { |
1880 | schedstat_inc(p, se.nr_migrations); | 1901 | p->se.nr_migrations++; |
1902 | #ifdef CONFIG_SCHEDSTATS | ||
1881 | if (task_hot(p, old_rq->clock, NULL)) | 1903 | if (task_hot(p, old_rq->clock, NULL)) |
1882 | schedstat_inc(p, se.nr_forced2_migrations); | 1904 | schedstat_inc(p, se.nr_forced2_migrations); |
1883 | } | ||
1884 | #endif | 1905 | #endif |
1906 | } | ||
1885 | p->se.vruntime -= old_cfsrq->min_vruntime - | 1907 | p->se.vruntime -= old_cfsrq->min_vruntime - |
1886 | new_cfsrq->min_vruntime; | 1908 | new_cfsrq->min_vruntime; |
1887 | 1909 | ||
@@ -2236,6 +2258,27 @@ static int sched_balance_self(int cpu, int flag) | |||
2236 | 2258 | ||
2237 | #endif /* CONFIG_SMP */ | 2259 | #endif /* CONFIG_SMP */ |
2238 | 2260 | ||
2261 | /** | ||
2262 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2263 | * @p: the task to evaluate | ||
2264 | * @func: the function to be called | ||
2265 | * @info: the function call argument | ||
2266 | * | ||
2267 | * Calls the function @func when the task is currently running. This might | ||
2268 | * be on the current CPU, which just calls the function directly | ||
2269 | */ | ||
2270 | void task_oncpu_function_call(struct task_struct *p, | ||
2271 | void (*func) (void *info), void *info) | ||
2272 | { | ||
2273 | int cpu; | ||
2274 | |||
2275 | preempt_disable(); | ||
2276 | cpu = task_cpu(p); | ||
2277 | if (task_curr(p)) | ||
2278 | smp_call_function_single(cpu, func, info, 1); | ||
2279 | preempt_enable(); | ||
2280 | } | ||
2281 | |||
2239 | /*** | 2282 | /*** |
2240 | * try_to_wake_up - wake up a thread | 2283 | * try_to_wake_up - wake up a thread |
2241 | * @p: the to-be-woken-up thread | 2284 | * @p: the to-be-woken-up thread |
@@ -2378,6 +2421,7 @@ static void __sched_fork(struct task_struct *p) | |||
2378 | p->se.exec_start = 0; | 2421 | p->se.exec_start = 0; |
2379 | p->se.sum_exec_runtime = 0; | 2422 | p->se.sum_exec_runtime = 0; |
2380 | p->se.prev_sum_exec_runtime = 0; | 2423 | p->se.prev_sum_exec_runtime = 0; |
2424 | p->se.nr_migrations = 0; | ||
2381 | p->se.last_wakeup = 0; | 2425 | p->se.last_wakeup = 0; |
2382 | p->se.avg_overlap = 0; | 2426 | p->se.avg_overlap = 0; |
2383 | 2427 | ||
@@ -2598,6 +2642,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2598 | */ | 2642 | */ |
2599 | prev_state = prev->state; | 2643 | prev_state = prev->state; |
2600 | finish_arch_switch(prev); | 2644 | finish_arch_switch(prev); |
2645 | perf_counter_task_sched_in(current, cpu_of(rq)); | ||
2601 | finish_lock_switch(rq, prev); | 2646 | finish_lock_switch(rq, prev); |
2602 | #ifdef CONFIG_SMP | 2647 | #ifdef CONFIG_SMP |
2603 | if (current->sched_class->post_schedule) | 2648 | if (current->sched_class->post_schedule) |
@@ -4056,6 +4101,29 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
4056 | * Return any ns on the sched_clock that have not yet been banked in | 4101 | * Return any ns on the sched_clock that have not yet been banked in |
4057 | * @p in case that task is currently running. | 4102 | * @p in case that task is currently running. |
4058 | */ | 4103 | */ |
4104 | unsigned long long __task_delta_exec(struct task_struct *p, int update) | ||
4105 | { | ||
4106 | s64 delta_exec; | ||
4107 | struct rq *rq; | ||
4108 | |||
4109 | rq = task_rq(p); | ||
4110 | WARN_ON_ONCE(!runqueue_is_locked()); | ||
4111 | WARN_ON_ONCE(!task_current(rq, p)); | ||
4112 | |||
4113 | if (update) | ||
4114 | update_rq_clock(rq); | ||
4115 | |||
4116 | delta_exec = rq->clock - p->se.exec_start; | ||
4117 | |||
4118 | WARN_ON_ONCE(delta_exec < 0); | ||
4119 | |||
4120 | return delta_exec; | ||
4121 | } | ||
4122 | |||
4123 | /* | ||
4124 | * Return any ns on the sched_clock that have not yet been banked in | ||
4125 | * @p in case that task is currently running. | ||
4126 | */ | ||
4059 | unsigned long long task_delta_exec(struct task_struct *p) | 4127 | unsigned long long task_delta_exec(struct task_struct *p) |
4060 | { | 4128 | { |
4061 | unsigned long flags; | 4129 | unsigned long flags; |
@@ -4279,6 +4347,7 @@ void scheduler_tick(void) | |||
4279 | update_rq_clock(rq); | 4347 | update_rq_clock(rq); |
4280 | update_cpu_load(rq); | 4348 | update_cpu_load(rq); |
4281 | curr->sched_class->task_tick(rq, curr, 0); | 4349 | curr->sched_class->task_tick(rq, curr, 0); |
4350 | perf_counter_task_tick(curr, cpu); | ||
4282 | spin_unlock(&rq->lock); | 4351 | spin_unlock(&rq->lock); |
4283 | 4352 | ||
4284 | #ifdef CONFIG_SMP | 4353 | #ifdef CONFIG_SMP |
@@ -4474,6 +4543,7 @@ need_resched_nonpreemptible: | |||
4474 | 4543 | ||
4475 | if (likely(prev != next)) { | 4544 | if (likely(prev != next)) { |
4476 | sched_info_switch(prev, next); | 4545 | sched_info_switch(prev, next); |
4546 | perf_counter_task_sched_out(prev, cpu); | ||
4477 | 4547 | ||
4478 | rq->nr_switches++; | 4548 | rq->nr_switches++; |
4479 | rq->curr = next; | 4549 | rq->curr = next; |