diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-05 11:23:46 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-14 02:52:36 -0400 |
commit | 7608dec2ce2004c234339bef8c8074e5e601d0e9 (patch) | |
tree | a855754a4fa3de6fe0d287c9d94d58d7bd6e8978 /kernel/sched_rt.c | |
parent | 013fdb8086acaae5f8eb96f9ad48fcd98882ac46 (diff) |
sched: Drop the rq argument to sched_class::select_task_rq()
In preparation of calling select_task_rq() without rq->lock held, drop
the dependency on the rq argument.
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152729.031077745@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 38 |
1 files changed, 26 insertions, 12 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 9ca4f5f879c4..19ecb3127379 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq) | |||
977 | static int find_lowest_rq(struct task_struct *task); | 977 | static int find_lowest_rq(struct task_struct *task); |
978 | 978 | ||
979 | static int | 979 | static int |
980 | select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | 980 | select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) |
981 | { | 981 | { |
982 | struct task_struct *curr; | ||
983 | struct rq *rq; | ||
984 | int cpu; | ||
985 | |||
982 | if (sd_flag != SD_BALANCE_WAKE) | 986 | if (sd_flag != SD_BALANCE_WAKE) |
983 | return smp_processor_id(); | 987 | return smp_processor_id(); |
984 | 988 | ||
989 | cpu = task_cpu(p); | ||
990 | rq = cpu_rq(cpu); | ||
991 | |||
992 | rcu_read_lock(); | ||
993 | curr = ACCESS_ONCE(rq->curr); /* unlocked access */ | ||
994 | |||
985 | /* | 995 | /* |
986 | * If the current task is an RT task, then | 996 | * If the current task on @p's runqueue is an RT task, then |
987 | * try to see if we can wake this RT task up on another | 997 | * try to see if we can wake this RT task up on another |
988 | * runqueue. Otherwise simply start this RT task | 998 | * runqueue. Otherwise simply start this RT task |
989 | * on its current runqueue. | 999 | * on its current runqueue. |
@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) | |||
997 | * lock? | 1007 | * lock? |
998 | * | 1008 | * |
999 | * For equal prio tasks, we just let the scheduler sort it out. | 1009 | * For equal prio tasks, we just let the scheduler sort it out. |
1010 | * | ||
1011 | * Otherwise, just let it ride on the affined RQ and the | ||
1012 | * post-schedule router will push the preempted task away | ||
1013 | * | ||
1014 | * This test is optimistic, if we get it wrong the load-balancer | ||
1015 | * will have to sort it out. | ||
1000 | */ | 1016 | */ |
1001 | if (unlikely(rt_task(rq->curr)) && | 1017 | if (curr && unlikely(rt_task(curr)) && |
1002 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1018 | (curr->rt.nr_cpus_allowed < 2 || |
1003 | rq->curr->prio < p->prio) && | 1019 | curr->prio < p->prio) && |
1004 | (p->rt.nr_cpus_allowed > 1)) { | 1020 | (p->rt.nr_cpus_allowed > 1)) { |
1005 | int cpu = find_lowest_rq(p); | 1021 | int target = find_lowest_rq(p); |
1006 | 1022 | ||
1007 | return (cpu == -1) ? task_cpu(p) : cpu; | 1023 | if (target != -1) |
1024 | cpu = target; | ||
1008 | } | 1025 | } |
1026 | rcu_read_unlock(); | ||
1009 | 1027 | ||
1010 | /* | 1028 | return cpu; |
1011 | * Otherwise, just let it ride on the affined RQ and the | ||
1012 | * post-schedule router will push the preempted task away | ||
1013 | */ | ||
1014 | return task_cpu(p); | ||
1015 | } | 1029 | } |
1016 | 1030 | ||
1017 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 1031 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |