aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-11-13 13:32:29 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:46 -0500
commit48c5ccae88dcd989d9de507e8510313c6cbd352b (patch)
tree06fe8ce2ac28e9f5844de8bc32ecbef97e40d68b /kernel/cpu.c
parent92fd4d4d67b945c0766416284d4ab236b31542c4 (diff)
sched: Simplify cpu-hot-unplug task migration
While discussing the need for sched_idle_next(), Oleg remarked that since try_to_wake_up() ensures sleeping tasks will end up running on a sane cpu, we can do away with migrate_live_tasks(). If we then extend the existing hack of migrating current from CPU_DYING to migrating the full rq worth of tasks from CPU_DYING, the need for the sched_idle_next() abomination disappears as well, since idle will be the only possible thread left after the migration thread stops. This greatly simplifies the hot-unplug task migration path, as can be seen from the resulting code reduction (and about half the new lines are comments). Suggested-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1289851597.2109.547.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f6e726f18491..8615aa65d927 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -189,7 +189,6 @@ static inline void check_for_tasks(int cpu)
189} 189}
190 190
191struct take_cpu_down_param { 191struct take_cpu_down_param {
192 struct task_struct *caller;
193 unsigned long mod; 192 unsigned long mod;
194 void *hcpu; 193 void *hcpu;
195}; 194};
@@ -208,11 +207,6 @@ static int __ref take_cpu_down(void *_param)
208 207
209 cpu_notify(CPU_DYING | param->mod, param->hcpu); 208 cpu_notify(CPU_DYING | param->mod, param->hcpu);
210 209
211 if (task_cpu(param->caller) == cpu)
212 move_task_off_dead_cpu(cpu, param->caller);
213 /* Force idle task to run as soon as we yield: it should
214 immediately notice cpu is offline and die quickly. */
215 sched_idle_next();
216 return 0; 210 return 0;
217} 211}
218 212
@@ -223,7 +217,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
223 void *hcpu = (void *)(long)cpu; 217 void *hcpu = (void *)(long)cpu;
224 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 218 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
225 struct take_cpu_down_param tcd_param = { 219 struct take_cpu_down_param tcd_param = {
226 .caller = current,
227 .mod = mod, 220 .mod = mod,
228 .hcpu = hcpu, 221 .hcpu = hcpu,
229 }; 222 };
@@ -253,9 +246,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
253 } 246 }
254 BUG_ON(cpu_online(cpu)); 247 BUG_ON(cpu_online(cpu));
255 248
256 /* Wait for it to sleep (leaving idle task). */ 249 /*
257 while (!idle_cpu(cpu)) 250 * The migration_call() CPU_DYING callback will have removed all
258 yield(); 251 * runnable tasks from the cpu, there's only the idle task left now
252 * that the migration thread is done doing the stop_machine thing.
253 */
254 BUG_ON(!idle_cpu(cpu));
259 255
260 /* This actually kills the CPU. */ 256 /* This actually kills the CPU. */
261 __cpu_die(cpu); 257 __cpu_die(cpu);