aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched.c20
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_rt.c38
-rw-r--r--kernel/sched_stoptask.c3
6 files changed, 41 insertions, 27 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b33a700652dc..ff4e2f9c24a7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1067,8 +1067,7 @@ struct sched_class {
1067 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1067 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1068 1068
1069#ifdef CONFIG_SMP 1069#ifdef CONFIG_SMP
1070 int (*select_task_rq)(struct rq *rq, struct task_struct *p, 1070 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1071 int sd_flag, int flags);
1072 1071
1073 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1072 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1074 void (*post_schedule) (struct rq *this_rq); 1073 void (*post_schedule) (struct rq *this_rq);
diff --git a/kernel/sched.c b/kernel/sched.c
index d398f2f0a3c9..d4b815d345b3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2195,13 +2195,15 @@ static int migration_cpu_stop(void *data);
2195 * The task's runqueue lock must be held. 2195 * The task's runqueue lock must be held.
2196 * Returns true if you have to wait for migration thread. 2196 * Returns true if you have to wait for migration thread.
2197 */ 2197 */
2198static bool migrate_task(struct task_struct *p, struct rq *rq) 2198static bool need_migrate_task(struct task_struct *p)
2199{ 2199{
2200 /* 2200 /*
2201 * If the task is not on a runqueue (and not running), then 2201 * If the task is not on a runqueue (and not running), then
2202 * the next wake-up will properly place the task. 2202 * the next wake-up will properly place the task.
2203 */ 2203 */
2204 return p->on_rq || task_running(rq, p); 2204 bool running = p->on_rq || p->on_cpu;
2205 smp_rmb(); /* finish_lock_switch() */
2206 return running;
2205} 2207}
2206 2208
2207/* 2209/*
@@ -2376,9 +2378,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2376 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 2378 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
2377 */ 2379 */
2378static inline 2380static inline
2379int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) 2381int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2380{ 2382{
2381 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); 2383 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2382 2384
2383 /* 2385 /*
2384 * In order not to call set_task_cpu() on a blocking task we need 2386 * In order not to call set_task_cpu() on a blocking task we need
@@ -2533,7 +2535,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2533 en_flags |= ENQUEUE_WAKING; 2535 en_flags |= ENQUEUE_WAKING;
2534 } 2536 }
2535 2537
2536 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); 2538 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2537 if (cpu != orig_cpu) 2539 if (cpu != orig_cpu)
2538 set_task_cpu(p, cpu); 2540 set_task_cpu(p, cpu);
2539 __task_rq_unlock(rq); 2541 __task_rq_unlock(rq);
@@ -2744,7 +2746,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2744 * We set TASK_WAKING so that select_task_rq() can drop rq->lock 2746 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2745 * without people poking at ->cpus_allowed. 2747 * without people poking at ->cpus_allowed.
2746 */ 2748 */
2747 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); 2749 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2748 set_task_cpu(p, cpu); 2750 set_task_cpu(p, cpu);
2749 2751
2750 p->state = TASK_RUNNING; 2752 p->state = TASK_RUNNING;
@@ -3474,7 +3476,7 @@ void sched_exec(void)
3474 int dest_cpu; 3476 int dest_cpu;
3475 3477
3476 rq = task_rq_lock(p, &flags); 3478 rq = task_rq_lock(p, &flags);
3477 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); 3479 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
3478 if (dest_cpu == smp_processor_id()) 3480 if (dest_cpu == smp_processor_id())
3479 goto unlock; 3481 goto unlock;
3480 3482
@@ -3482,7 +3484,7 @@ void sched_exec(void)
3482 * select_task_rq() can race against ->cpus_allowed 3484 * select_task_rq() can race against ->cpus_allowed
3483 */ 3485 */
3484 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && 3486 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3485 likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { 3487 likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
3486 struct migration_arg arg = { p, dest_cpu }; 3488 struct migration_arg arg = { p, dest_cpu };
3487 3489
3488 task_rq_unlock(rq, &flags); 3490 task_rq_unlock(rq, &flags);
@@ -5911,7 +5913,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5911 goto out; 5913 goto out;
5912 5914
5913 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 5915 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5914 if (migrate_task(p, rq)) { 5916 if (need_migrate_task(p)) {
5915 struct migration_arg arg = { p, dest_cpu }; 5917 struct migration_arg arg = { p, dest_cpu };
5916 /* Need help from migration thread: drop lock and wait. */ 5918 /* Need help from migration thread: drop lock and wait. */
5917 __task_rq_unlock(rq); 5919 __task_rq_unlock(rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4ee50f0af8d1..96b2c95ac356 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1657,7 +1657,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
1657 * preempt must be disabled. 1657 * preempt must be disabled.
1658 */ 1658 */
1659static int 1659static int
1660select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) 1660select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1661{ 1661{
1662 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; 1662 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1663 int cpu = smp_processor_id(); 1663 int cpu = smp_processor_id();
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a776a6396427..0a51882534ea 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -7,7 +7,7 @@
7 7
8#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
9static int 9static int
10select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) 10select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
11{ 11{
12 return task_cpu(p); /* IDLE tasks as never migrated */ 12 return task_cpu(p); /* IDLE tasks as never migrated */
13} 13}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9ca4f5f879c4..19ecb3127379 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
977static int find_lowest_rq(struct task_struct *task); 977static int find_lowest_rq(struct task_struct *task);
978 978
979static int 979static int
980select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) 980select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
981{ 981{
982 struct task_struct *curr;
983 struct rq *rq;
984 int cpu;
985
982 if (sd_flag != SD_BALANCE_WAKE) 986 if (sd_flag != SD_BALANCE_WAKE)
983 return smp_processor_id(); 987 return smp_processor_id();
984 988
989 cpu = task_cpu(p);
990 rq = cpu_rq(cpu);
991
992 rcu_read_lock();
993 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
994
985 /* 995 /*
986 * If the current task is an RT task, then 996 * If the current task on @p's runqueue is an RT task, then
987 * try to see if we can wake this RT task up on another 997 * try to see if we can wake this RT task up on another
988 * runqueue. Otherwise simply start this RT task 998 * runqueue. Otherwise simply start this RT task
989 * on its current runqueue. 999 * on its current runqueue.
@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
997 * lock? 1007 * lock?
998 * 1008 *
999 * For equal prio tasks, we just let the scheduler sort it out. 1009 * For equal prio tasks, we just let the scheduler sort it out.
1010 *
1011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
1013 *
1014 * This test is optimistic, if we get it wrong the load-balancer
1015 * will have to sort it out.
1000 */ 1016 */
1001 if (unlikely(rt_task(rq->curr)) && 1017 if (curr && unlikely(rt_task(curr)) &&
1002 (rq->curr->rt.nr_cpus_allowed < 2 || 1018 (curr->rt.nr_cpus_allowed < 2 ||
1003 rq->curr->prio < p->prio) && 1019 curr->prio < p->prio) &&
1004 (p->rt.nr_cpus_allowed > 1)) { 1020 (p->rt.nr_cpus_allowed > 1)) {
1005 int cpu = find_lowest_rq(p); 1021 int target = find_lowest_rq(p);
1006 1022
1007 return (cpu == -1) ? task_cpu(p) : cpu; 1023 if (target != -1)
1024 cpu = target;
1008 } 1025 }
1026 rcu_read_unlock();
1009 1027
1010 /* 1028 return cpu;
1011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
1013 */
1014 return task_cpu(p);
1015} 1029}
1016 1030
1017static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1031static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index f607de42e6fc..6f437632afab 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -9,8 +9,7 @@
9 9
10#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
11static int 11static int
12select_task_rq_stop(struct rq *rq, struct task_struct *p, 12select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
13 int sd_flag, int flags)
14{ 13{
15 return task_cpu(p); /* stop tasks as never migrate */ 14 return task_cpu(p); /* stop tasks as never migrate */
16} 15}