diff options
author | Gregory Haskins <ghaskins@novell.com> | 2008-03-08 00:10:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-09 13:05:14 -0400 |
commit | 393d94d98b19089ec172566e23557997931b137e (patch) | |
tree | 62154269fd93690ffc07ad885cf371542b04ecc5 /kernel | |
parent | 6efcae460186c0c1c94afff58a92784e1fc0d10b (diff) |
cpu hotplug: adjust root-domain->online span in response to hotplug event
We currently set the root-domain online span automatically when the
domain is added to the cpu if the cpu is already a member of
cpu_online_map.
This was done as a hack/bug-fix for s2ram, but it also causes a problem
with hotplug CPU_DOWN transitioning. The right way to fix the original
problem is to actually respond to CPU_UP events, instead of CPU_ONLINE,
which is already too late.
This solves the hung reboot regression reported by Andrew Morton and
others.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 52b98675acb2..b02e4fc25645 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5813,6 +5813,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5813 | /* Must be high prio: stop_machine expects to yield to it. */ | 5813 | /* Must be high prio: stop_machine expects to yield to it. */ |
5814 | rq = task_rq_lock(p, &flags); | 5814 | rq = task_rq_lock(p, &flags); |
5815 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 5815 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
5816 | |||
5817 | /* Update our root-domain */ | ||
5818 | if (rq->rd) { | ||
5819 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | ||
5820 | cpu_set(cpu, rq->rd->online); | ||
5821 | } | ||
5822 | |||
5816 | task_rq_unlock(rq, &flags); | 5823 | task_rq_unlock(rq, &flags); |
5817 | cpu_rq(cpu)->migration_thread = p; | 5824 | cpu_rq(cpu)->migration_thread = p; |
5818 | break; | 5825 | break; |
@@ -5821,15 +5828,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5821 | case CPU_ONLINE_FROZEN: | 5828 | case CPU_ONLINE_FROZEN: |
5822 | /* Strictly unnecessary, as first user will wake it. */ | 5829 | /* Strictly unnecessary, as first user will wake it. */ |
5823 | wake_up_process(cpu_rq(cpu)->migration_thread); | 5830 | wake_up_process(cpu_rq(cpu)->migration_thread); |
5824 | |||
5825 | /* Update our root-domain */ | ||
5826 | rq = cpu_rq(cpu); | ||
5827 | spin_lock_irqsave(&rq->lock, flags); | ||
5828 | if (rq->rd) { | ||
5829 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | ||
5830 | cpu_set(cpu, rq->rd->online); | ||
5831 | } | ||
5832 | spin_unlock_irqrestore(&rq->lock, flags); | ||
5833 | break; | 5831 | break; |
5834 | 5832 | ||
5835 | #ifdef CONFIG_HOTPLUG_CPU | 5833 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -6105,8 +6103,6 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6105 | rq->rd = rd; | 6103 | rq->rd = rd; |
6106 | 6104 | ||
6107 | cpu_set(rq->cpu, rd->span); | 6105 | cpu_set(rq->cpu, rd->span); |
6108 | if (cpu_isset(rq->cpu, cpu_online_map)) | ||
6109 | cpu_set(rq->cpu, rd->online); | ||
6110 | 6106 | ||
6111 | for (class = sched_class_highest; class; class = class->next) { | 6107 | for (class = sched_class_highest; class; class = class->next) { |
6112 | if (class->join_domain) | 6108 | if (class->join_domain) |