aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2005-11-07 03:58:38 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:29 -0500
commita4c4af7c8dc1eccdfb8c57e1684f08179b4407e6 (patch)
tree210773f292da3ba85818402f1945ee7b14ba8c71
parentcc658cfe3c66a6124b5a8db90cdcdd440201b1dc (diff)
[PATCH] cpu hoptlug: avoid usage of smp_processor_id() in preemptible code
Replace smp_processor_id() with any_online_cpu(cpu_online_map) in order to avoid lots of "BUG: using smp_processor_id() in preemptible [00000001] code:..." messages in case taking a cpu online fails. All the traces start at the last notifier_call_chain(...) in kernel/cpu.c. Since we hold the cpu_control semaphore it shouldn't be any problem to access cpu_online_map. The reason why cpu_up failed is simply that the cpu that was supposed to be taken online wasn't even there. That is because on s390 we never know when a new cpu comes and therefore cpu_possible_map consists of only ones and doesn't reflect reality. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c3
-rw-r--r--kernel/workqueue.c2
4 files changed, 7 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b4f4eb613537..013f1448006b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4680,7 +4680,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4680#ifdef CONFIG_HOTPLUG_CPU 4680#ifdef CONFIG_HOTPLUG_CPU
4681 case CPU_UP_CANCELED: 4681 case CPU_UP_CANCELED:
4682 /* Unbind it from offline cpu so it can run. Fall thru. */ 4682 /* Unbind it from offline cpu so it can run. Fall thru. */
4683 kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id()); 4683 kthread_bind(cpu_rq(cpu)->migration_thread,
4684 any_online_cpu(cpu_online_map));
4684 kthread_stop(cpu_rq(cpu)->migration_thread); 4685 kthread_stop(cpu_rq(cpu)->migration_thread);
4685 cpu_rq(cpu)->migration_thread = NULL; 4686 cpu_rq(cpu)->migration_thread = NULL;
4686 break; 4687 break;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f766b2fc48be..ad3295cdded5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,7 +470,8 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
470#ifdef CONFIG_HOTPLUG_CPU 470#ifdef CONFIG_HOTPLUG_CPU
471 case CPU_UP_CANCELED: 471 case CPU_UP_CANCELED:
472 /* Unbind so it can run. Fall thru. */ 472 /* Unbind so it can run. Fall thru. */
473 kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id()); 473 kthread_bind(per_cpu(ksoftirqd, hotcpu),
474 any_online_cpu(cpu_online_map));
474 case CPU_DEAD: 475 case CPU_DEAD:
475 p = per_cpu(ksoftirqd, hotcpu); 476 p = per_cpu(ksoftirqd, hotcpu);
476 per_cpu(ksoftirqd, hotcpu) = NULL; 477 per_cpu(ksoftirqd, hotcpu) = NULL;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 75976209cea7..a2dcceb9437d 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -123,7 +123,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
123#ifdef CONFIG_HOTPLUG_CPU 123#ifdef CONFIG_HOTPLUG_CPU
124 case CPU_UP_CANCELED: 124 case CPU_UP_CANCELED:
125 /* Unbind so it can run. Fall thru. */ 125 /* Unbind so it can run. Fall thru. */
126 kthread_bind(per_cpu(watchdog_task, hotcpu), smp_processor_id()); 126 kthread_bind(per_cpu(watchdog_task, hotcpu),
127 any_online_cpu(cpu_online_map));
127 case CPU_DEAD: 128 case CPU_DEAD:
128 p = per_cpu(watchdog_task, hotcpu); 129 p = per_cpu(watchdog_task, hotcpu);
129 per_cpu(watchdog_task, hotcpu) = NULL; 130 per_cpu(watchdog_task, hotcpu) = NULL;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7cee222231bc..42df83d7fad2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -524,7 +524,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
524 list_for_each_entry(wq, &workqueues, list) { 524 list_for_each_entry(wq, &workqueues, list) {
525 /* Unbind so it can run. */ 525 /* Unbind so it can run. */
526 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 526 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
527 smp_processor_id()); 527 any_online_cpu(cpu_online_map));
528 cleanup_workqueue_thread(wq, hotcpu); 528 cleanup_workqueue_thread(wq, hotcpu);
529 } 529 }
530 break; 530 break;