diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 29 |
1 files changed, 4 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fbe86cb04b61..e142e92f38da 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2265,27 +2265,6 @@ void kick_process(struct task_struct *p) | |||
2265 | EXPORT_SYMBOL_GPL(kick_process); | 2265 | EXPORT_SYMBOL_GPL(kick_process); |
2266 | #endif /* CONFIG_SMP */ | 2266 | #endif /* CONFIG_SMP */ |
2267 | 2267 | ||
2268 | /** | ||
2269 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2270 | * @p: the task to evaluate | ||
2271 | * @func: the function to be called | ||
2272 | * @info: the function call argument | ||
2273 | * | ||
2274 | * Calls the function @func when the task is currently running. This might | ||
2275 | * be on the current CPU, which just calls the function directly | ||
2276 | */ | ||
2277 | void task_oncpu_function_call(struct task_struct *p, | ||
2278 | void (*func) (void *info), void *info) | ||
2279 | { | ||
2280 | int cpu; | ||
2281 | |||
2282 | preempt_disable(); | ||
2283 | cpu = task_cpu(p); | ||
2284 | if (task_curr(p)) | ||
2285 | smp_call_function_single(cpu, func, info, 1); | ||
2286 | preempt_enable(); | ||
2287 | } | ||
2288 | |||
2289 | #ifdef CONFIG_SMP | 2268 | #ifdef CONFIG_SMP |
2290 | /* | 2269 | /* |
2291 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | 2270 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. |
@@ -2776,9 +2755,12 @@ static inline void | |||
2776 | prepare_task_switch(struct rq *rq, struct task_struct *prev, | 2755 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
2777 | struct task_struct *next) | 2756 | struct task_struct *next) |
2778 | { | 2757 | { |
2758 | sched_info_switch(prev, next); | ||
2759 | perf_event_task_sched_out(prev, next); | ||
2779 | fire_sched_out_preempt_notifiers(prev, next); | 2760 | fire_sched_out_preempt_notifiers(prev, next); |
2780 | prepare_lock_switch(rq, next); | 2761 | prepare_lock_switch(rq, next); |
2781 | prepare_arch_switch(next); | 2762 | prepare_arch_switch(next); |
2763 | trace_sched_switch(prev, next); | ||
2782 | } | 2764 | } |
2783 | 2765 | ||
2784 | /** | 2766 | /** |
@@ -2911,7 +2893,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2911 | struct mm_struct *mm, *oldmm; | 2893 | struct mm_struct *mm, *oldmm; |
2912 | 2894 | ||
2913 | prepare_task_switch(rq, prev, next); | 2895 | prepare_task_switch(rq, prev, next); |
2914 | trace_sched_switch(prev, next); | 2896 | |
2915 | mm = next->mm; | 2897 | mm = next->mm; |
2916 | oldmm = prev->active_mm; | 2898 | oldmm = prev->active_mm; |
2917 | /* | 2899 | /* |
@@ -3989,9 +3971,6 @@ need_resched_nonpreemptible: | |||
3989 | rq->skip_clock_update = 0; | 3971 | rq->skip_clock_update = 0; |
3990 | 3972 | ||
3991 | if (likely(prev != next)) { | 3973 | if (likely(prev != next)) { |
3992 | sched_info_switch(prev, next); | ||
3993 | perf_event_task_sched_out(prev, next); | ||
3994 | |||
3995 | rq->nr_switches++; | 3974 | rq->nr_switches++; |
3996 | rq->curr = next; | 3975 | rq->curr = next; |
3997 | ++*switch_count; | 3976 | ++*switch_count; |