diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-10-17 02:30:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 11:43:03 -0400 |
commit | f7b4cddcc5aca03e80e357360c9424dfba5056c2 (patch) | |
tree | af2eae61dcd0311a4c51dc7398e9d0f049f37367 /kernel/sched.c | |
parent | cf7a44168d743ca97863b7f34ed42a45fd7b5ab0 (diff) |
do CPU_DEAD migrating under read_lock(tasklist) instead of write_lock_irq(tasklist)
Currently move_task_off_dead_cpu() is called under
write_lock_irq(tasklist). This means it can't use task_lock() which is
needed to improve migrating to take task's ->cpuset into account.
Change the code to call move_task_off_dead_cpu() with irqs enabled, and
change migrate_live_tasks() to use read_lock(tasklist).
This all is a preparation for the futher changes proposed by Cliff Wickman, see
http://marc.info/?t=117327786100003
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Cliff Wickman <cpw@sgi.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0da2b2635c54..c747bc9f3c24 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5060,6 +5060,17 @@ wait_to_die: | |||
5060 | } | 5060 | } |
5061 | 5061 | ||
5062 | #ifdef CONFIG_HOTPLUG_CPU | 5062 | #ifdef CONFIG_HOTPLUG_CPU |
5063 | |||
5064 | static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | ||
5065 | { | ||
5066 | int ret; | ||
5067 | |||
5068 | local_irq_disable(); | ||
5069 | ret = __migrate_task(p, src_cpu, dest_cpu); | ||
5070 | local_irq_enable(); | ||
5071 | return ret; | ||
5072 | } | ||
5073 | |||
5063 | /* | 5074 | /* |
5064 | * Figure out where task on dead CPU should go, use force if neccessary. | 5075 | * Figure out where task on dead CPU should go, use force if neccessary. |
5065 | * NOTE: interrupts should be disabled by the caller | 5076 | * NOTE: interrupts should be disabled by the caller |
@@ -5098,7 +5109,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
5098 | "longer affine to cpu%d\n", | 5109 | "longer affine to cpu%d\n", |
5099 | p->pid, p->comm, dead_cpu); | 5110 | p->pid, p->comm, dead_cpu); |
5100 | } | 5111 | } |
5101 | } while (!__migrate_task(p, dead_cpu, dest_cpu)); | 5112 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); |
5102 | } | 5113 | } |
5103 | 5114 | ||
5104 | /* | 5115 | /* |
@@ -5126,7 +5137,7 @@ static void migrate_live_tasks(int src_cpu) | |||
5126 | { | 5137 | { |
5127 | struct task_struct *p, *t; | 5138 | struct task_struct *p, *t; |
5128 | 5139 | ||
5129 | write_lock_irq(&tasklist_lock); | 5140 | read_lock(&tasklist_lock); |
5130 | 5141 | ||
5131 | do_each_thread(t, p) { | 5142 | do_each_thread(t, p) { |
5132 | if (p == current) | 5143 | if (p == current) |
@@ -5136,7 +5147,7 @@ static void migrate_live_tasks(int src_cpu) | |||
5136 | move_task_off_dead_cpu(src_cpu, p); | 5147 | move_task_off_dead_cpu(src_cpu, p); |
5137 | } while_each_thread(t, p); | 5148 | } while_each_thread(t, p); |
5138 | 5149 | ||
5139 | write_unlock_irq(&tasklist_lock); | 5150 | read_unlock(&tasklist_lock); |
5140 | } | 5151 | } |
5141 | 5152 | ||
5142 | /* | 5153 | /* |
@@ -5214,11 +5225,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
5214 | * Drop lock around migration; if someone else moves it, | 5225 | * Drop lock around migration; if someone else moves it, |
5215 | * that's OK. No task can be added to this CPU, so iteration is | 5226 | * that's OK. No task can be added to this CPU, so iteration is |
5216 | * fine. | 5227 | * fine. |
5217 | * NOTE: interrupts should be left disabled --dev@ | ||
5218 | */ | 5228 | */ |
5219 | spin_unlock(&rq->lock); | 5229 | spin_unlock_irq(&rq->lock); |
5220 | move_task_off_dead_cpu(dead_cpu, p); | 5230 | move_task_off_dead_cpu(dead_cpu, p); |
5221 | spin_lock(&rq->lock); | 5231 | spin_lock_irq(&rq->lock); |
5222 | 5232 | ||
5223 | put_task_struct(p); | 5233 | put_task_struct(p); |
5224 | } | 5234 | } |