diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-25 16:05:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 14:12:04 -0400 |
commit | 65cc8e4859ff29a9ddc989c88557d6059834c2a2 (patch) | |
tree | 7f12c2532a92d607b5c52fa71c1799631a7a91ff /kernel/sched.c | |
parent | 0017d735092844118bef006696a750a0e4ef6ebd (diff) |
sched: Optimize task_rq_lock()
Now that we hold the rq->lock over set_task_cpu() again, we can do
away with most of the TASK_WAKING checks and reduce them again to
set_cpus_allowed_ptr().
Removes some conditionals from scheduling hot-paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index dcd17736dae1..51d336e08a92 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -914,8 +914,8 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
914 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 914 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
915 | 915 | ||
916 | /* | 916 | /* |
917 | * Check whether the task is waking, we use this to synchronize against | 917 | * Check whether the task is waking, we use this to synchronize ->cpus_allowed |
918 | * ttwu() so that task_cpu() reports a stable number. | 918 | * against ttwu(). |
919 | */ | 919 | */ |
920 | static inline int task_is_waking(struct task_struct *p) | 920 | static inline int task_is_waking(struct task_struct *p) |
921 | { | 921 | { |
@@ -932,11 +932,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
932 | struct rq *rq; | 932 | struct rq *rq; |
933 | 933 | ||
934 | for (;;) { | 934 | for (;;) { |
935 | while (task_is_waking(p)) | ||
936 | cpu_relax(); | ||
937 | rq = task_rq(p); | 935 | rq = task_rq(p); |
938 | raw_spin_lock(&rq->lock); | 936 | raw_spin_lock(&rq->lock); |
939 | if (likely(rq == task_rq(p) && !task_is_waking(p))) | 937 | if (likely(rq == task_rq(p))) |
940 | return rq; | 938 | return rq; |
941 | raw_spin_unlock(&rq->lock); | 939 | raw_spin_unlock(&rq->lock); |
942 | } | 940 | } |
@@ -953,12 +951,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
953 | struct rq *rq; | 951 | struct rq *rq; |
954 | 952 | ||
955 | for (;;) { | 953 | for (;;) { |
956 | while (task_is_waking(p)) | ||
957 | cpu_relax(); | ||
958 | local_irq_save(*flags); | 954 | local_irq_save(*flags); |
959 | rq = task_rq(p); | 955 | rq = task_rq(p); |
960 | raw_spin_lock(&rq->lock); | 956 | raw_spin_lock(&rq->lock); |
961 | if (likely(rq == task_rq(p) && !task_is_waking(p))) | 957 | if (likely(rq == task_rq(p))) |
962 | return rq; | 958 | return rq; |
963 | raw_spin_unlock_irqrestore(&rq->lock, *flags); | 959 | raw_spin_unlock_irqrestore(&rq->lock, *flags); |
964 | } | 960 | } |
@@ -5262,7 +5258,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5262 | struct rq *rq; | 5258 | struct rq *rq; |
5263 | int ret = 0; | 5259 | int ret = 0; |
5264 | 5260 | ||
5261 | /* | ||
5262 | * Serialize against TASK_WAKING so that ttwu() and wunt() can | ||
5263 | * drop the rq->lock and still rely on ->cpus_allowed. | ||
5264 | */ | ||
5265 | again: | ||
5266 | while (task_is_waking(p)) | ||
5267 | cpu_relax(); | ||
5265 | rq = task_rq_lock(p, &flags); | 5268 | rq = task_rq_lock(p, &flags); |
5269 | if (task_is_waking(p)) { | ||
5270 | task_rq_unlock(rq, &flags); | ||
5271 | goto again; | ||
5272 | } | ||
5266 | 5273 | ||
5267 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { | 5274 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
5268 | ret = -EINVAL; | 5275 | ret = -EINVAL; |