diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-05 11:23:45 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-14 02:52:35 -0400 |
commit | 013fdb8086acaae5f8eb96f9ad48fcd98882ac46 (patch) | |
tree | dfc5cf5dcdfaa593f836b08642609cc32b0d5936 /kernel/sched.c | |
parent | fd2f4419b4cbe8fe90796df9617c355762afd6a4 (diff) |
sched: Serialize p->cpus_allowed and ttwu() using p->pi_lock
Currently p->pi_lock already serializes p->sched_class, also put
p->cpus_allowed and try_to_wake_up() under it, this prepares the way
to do the first part of ttwu() without holding rq->lock.
By having p->sched_class and p->cpus_allowed serialized by p->pi_lock,
we prepare the way to call select_task_rq() without holding rq->lock.
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110405152728.990364093@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index dece28e505c9..d398f2f0a3c9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2340,7 +2340,7 @@ EXPORT_SYMBOL_GPL(kick_process); | |||
2340 | 2340 | ||
2341 | #ifdef CONFIG_SMP | 2341 | #ifdef CONFIG_SMP |
2342 | /* | 2342 | /* |
2343 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | 2343 | * ->cpus_allowed is protected by both rq->lock and p->pi_lock |
2344 | */ | 2344 | */ |
2345 | static int select_fallback_rq(int cpu, struct task_struct *p) | 2345 | static int select_fallback_rq(int cpu, struct task_struct *p) |
2346 | { | 2346 | { |
@@ -2373,7 +2373,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2373 | } | 2373 | } |
2374 | 2374 | ||
2375 | /* | 2375 | /* |
2376 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. | 2376 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
2377 | */ | 2377 | */ |
2378 | static inline | 2378 | static inline |
2379 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) | 2379 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) |
@@ -2499,7 +2499,8 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2499 | this_cpu = get_cpu(); | 2499 | this_cpu = get_cpu(); |
2500 | 2500 | ||
2501 | smp_wmb(); | 2501 | smp_wmb(); |
2502 | rq = task_rq_lock(p, &flags); | 2502 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
2503 | rq = __task_rq_lock(p); | ||
2503 | if (!(p->state & state)) | 2504 | if (!(p->state & state)) |
2504 | goto out; | 2505 | goto out; |
2505 | 2506 | ||
@@ -2557,7 +2558,8 @@ out_running: | |||
2557 | ttwu_stat(rq, p, cpu, wake_flags); | 2558 | ttwu_stat(rq, p, cpu, wake_flags); |
2558 | success = 1; | 2559 | success = 1; |
2559 | out: | 2560 | out: |
2560 | task_rq_unlock(rq, &flags); | 2561 | __task_rq_unlock(rq); |
2562 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
2561 | put_cpu(); | 2563 | put_cpu(); |
2562 | 2564 | ||
2563 | return success; | 2565 | return success; |
@@ -4694,6 +4696,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4694 | 4696 | ||
4695 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 4697 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
4696 | 4698 | ||
4699 | lockdep_assert_held(&p->pi_lock); | ||
4700 | |||
4697 | rq = task_rq_lock(p, &flags); | 4701 | rq = task_rq_lock(p, &flags); |
4698 | 4702 | ||
4699 | trace_sched_pi_setprio(p, prio); | 4703 | trace_sched_pi_setprio(p, prio); |
@@ -5317,7 +5321,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
5317 | { | 5321 | { |
5318 | struct task_struct *p; | 5322 | struct task_struct *p; |
5319 | unsigned long flags; | 5323 | unsigned long flags; |
5320 | struct rq *rq; | ||
5321 | int retval; | 5324 | int retval; |
5322 | 5325 | ||
5323 | get_online_cpus(); | 5326 | get_online_cpus(); |
@@ -5332,9 +5335,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) | |||
5332 | if (retval) | 5335 | if (retval) |
5333 | goto out_unlock; | 5336 | goto out_unlock; |
5334 | 5337 | ||
5335 | rq = task_rq_lock(p, &flags); | 5338 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
5336 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); | 5339 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5337 | task_rq_unlock(rq, &flags); | 5340 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
5338 | 5341 | ||
5339 | out_unlock: | 5342 | out_unlock: |
5340 | rcu_read_unlock(); | 5343 | rcu_read_unlock(); |
@@ -5882,18 +5885,8 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5882 | unsigned int dest_cpu; | 5885 | unsigned int dest_cpu; |
5883 | int ret = 0; | 5886 | int ret = 0; |
5884 | 5887 | ||
5885 | /* | 5888 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
5886 | * Serialize against TASK_WAKING so that ttwu() and wunt() can | 5889 | rq = __task_rq_lock(p); |
5887 | * drop the rq->lock and still rely on ->cpus_allowed. | ||
5888 | */ | ||
5889 | again: | ||
5890 | while (task_is_waking(p)) | ||
5891 | cpu_relax(); | ||
5892 | rq = task_rq_lock(p, &flags); | ||
5893 | if (task_is_waking(p)) { | ||
5894 | task_rq_unlock(rq, &flags); | ||
5895 | goto again; | ||
5896 | } | ||
5897 | 5890 | ||
5898 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { | 5891 | if (!cpumask_intersects(new_mask, cpu_active_mask)) { |
5899 | ret = -EINVAL; | 5892 | ret = -EINVAL; |
@@ -5921,13 +5914,15 @@ again: | |||
5921 | if (migrate_task(p, rq)) { | 5914 | if (migrate_task(p, rq)) { |
5922 | struct migration_arg arg = { p, dest_cpu }; | 5915 | struct migration_arg arg = { p, dest_cpu }; |
5923 | /* Need help from migration thread: drop lock and wait. */ | 5916 | /* Need help from migration thread: drop lock and wait. */ |
5924 | task_rq_unlock(rq, &flags); | 5917 | __task_rq_unlock(rq); |
5918 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5925 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); | 5919 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
5926 | tlb_migrate_finish(p->mm); | 5920 | tlb_migrate_finish(p->mm); |
5927 | return 0; | 5921 | return 0; |
5928 | } | 5922 | } |
5929 | out: | 5923 | out: |
5930 | task_rq_unlock(rq, &flags); | 5924 | __task_rq_unlock(rq); |
5925 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
5931 | 5926 | ||
5932 | return ret; | 5927 | return ret; |
5933 | } | 5928 | } |