aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c47
1 files changed, 18 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c92670f8e097..33c903573132 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1811,6 +1811,20 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1811 1811
1812static void calc_load_account_active(struct rq *this_rq); 1812static void calc_load_account_active(struct rq *this_rq);
1813 1813
1814static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1815{
1816 set_task_rq(p, cpu);
1817#ifdef CONFIG_SMP
1818 /*
1819 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1820 * successfuly executed on another CPU. We must ensure that updates of
1821 * per-task data have been completed by this moment.
1822 */
1823 smp_wmb();
1824 task_thread_info(p)->cpu = cpu;
1825#endif
1826}
1827
1814#include "sched_stats.h" 1828#include "sched_stats.h"
1815#include "sched_idletask.c" 1829#include "sched_idletask.c"
1816#include "sched_fair.c" 1830#include "sched_fair.c"
@@ -1967,20 +1981,6 @@ inline int task_curr(const struct task_struct *p)
1967 return cpu_curr(task_cpu(p)) == p; 1981 return cpu_curr(task_cpu(p)) == p;
1968} 1982}
1969 1983
1970static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1971{
1972 set_task_rq(p, cpu);
1973#ifdef CONFIG_SMP
1974 /*
1975 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1976 * successfuly executed on another CPU. We must ensure that updates of
1977 * per-task data have been completed by this moment.
1978 */
1979 smp_wmb();
1980 task_thread_info(p)->cpu = cpu;
1981#endif
1982}
1983
1984static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1984static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1985 const struct sched_class *prev_class, 1985 const struct sched_class *prev_class,
1986 int oldprio, int running) 1986 int oldprio, int running)
@@ -2552,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
2552void sched_fork(struct task_struct *p, int clone_flags) 2552void sched_fork(struct task_struct *p, int clone_flags)
2553{ 2553{
2554 int cpu = get_cpu(); 2554 int cpu = get_cpu();
2555 unsigned long flags;
2556 2555
2557 __sched_fork(p); 2556 __sched_fork(p);
2558 2557
@@ -2586,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
2586 if (!rt_prio(p->prio)) 2585 if (!rt_prio(p->prio))
2587 p->sched_class = &fair_sched_class; 2586 p->sched_class = &fair_sched_class;
2588 2587
2588 if (p->sched_class->task_fork)
2589 p->sched_class->task_fork(p);
2590
2589#ifdef CONFIG_SMP 2591#ifdef CONFIG_SMP
2590 cpu = select_task_rq(p, SD_BALANCE_FORK, 0); 2592 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2591#endif 2593#endif
2592 local_irq_save(flags);
2593 update_rq_clock(cpu_rq(cpu));
2594 set_task_cpu(p, cpu); 2594 set_task_cpu(p, cpu);
2595 local_irq_restore(flags);
2596 2595
2597#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2596#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2598 if (likely(sched_info_on())) 2597 if (likely(sched_info_on()))
@@ -2625,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2625 rq = task_rq_lock(p, &flags); 2624 rq = task_rq_lock(p, &flags);
2626 BUG_ON(p->state != TASK_RUNNING); 2625 BUG_ON(p->state != TASK_RUNNING);
2627 update_rq_clock(rq); 2626 update_rq_clock(rq);
2628 2627 activate_task(rq, p, 0);
2629 if (!p->sched_class->task_new || !current->se.on_rq) {
2630 activate_task(rq, p, 0);
2631 } else {
2632 /*
2633 * Let the scheduling class do new task startup
2634 * management (if any):
2635 */
2636 p->sched_class->task_new(rq, p);
2637 inc_nr_running(rq);
2638 }
2639 trace_sched_wakeup_new(rq, p, 1); 2628 trace_sched_wakeup_new(rq, p, 1);
2640 check_preempt_curr(rq, p, WF_FORK); 2629 check_preempt_curr(rq, p, WF_FORK);
2641#ifdef CONFIG_SMP 2630#ifdef CONFIG_SMP