aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/smp.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c4
4 files changed, 7 insertions, 7 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 83b3d376708c..a566745dde62 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod)
379 for (i = 0; i < NR_CPUS; i++) 379 for (i = 0; i < NR_CPUS; i++)
380 local_set(&mod->ref[i].count, 0); 380 local_set(&mod->ref[i].count, 0);
381 /* Hold reference count during initialization. */ 381 /* Hold reference count during initialization. */
382 local_set(&mod->ref[_smp_processor_id()].count, 1); 382 local_set(&mod->ref[raw_smp_processor_id()].count, 1);
383 /* Backwards compatibility macros put refcount during init. */ 383 /* Backwards compatibility macros put refcount during init. */
384 mod->waiter = current; 384 mod->waiter = current;
385} 385}
diff --git a/kernel/power/smp.c b/kernel/power/smp.c
index cba3584b80fe..457c2302ed42 100644
--- a/kernel/power/smp.c
+++ b/kernel/power/smp.c
@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void)
48{ 48{
49 oldmask = current->cpus_allowed; 49 oldmask = current->cpus_allowed;
50 set_cpus_allowed(current, cpumask_of_cpu(0)); 50 set_cpus_allowed(current, cpumask_of_cpu(0));
51 printk("Freezing CPUs (at %d)", _smp_processor_id()); 51 printk("Freezing CPUs (at %d)", raw_smp_processor_id());
52 current->state = TASK_INTERRUPTIBLE; 52 current->state = TASK_INTERRUPTIBLE;
53 schedule_timeout(HZ); 53 schedule_timeout(HZ);
54 printk("..."); 54 printk("...");
55 BUG_ON(_smp_processor_id() != 0); 55 BUG_ON(raw_smp_processor_id() != 0);
56 56
57 /* FIXME: for this to work, all the CPUs must be running 57 /* FIXME: for this to work, all the CPUs must be running
58 * "idle" thread (or we deadlock). Is that guaranteed? */ 58 * "idle" thread (or we deadlock). Is that guaranteed? */
diff --git a/kernel/sched.c b/kernel/sched.c
index f12a0c8a7d98..deca041fc364 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield);
3814 */ 3814 */
3815void __sched io_schedule(void) 3815void __sched io_schedule(void)
3816{ 3816{
3817 struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); 3817 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3818 3818
3819 atomic_inc(&rq->nr_iowait); 3819 atomic_inc(&rq->nr_iowait);
3820 schedule(); 3820 schedule();
@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
3825 3825
3826long __sched io_schedule_timeout(long timeout) 3826long __sched io_schedule_timeout(long timeout)
3827{ 3827{
3828 struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); 3828 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3829 long ret; 3829 long ret;
3830 3830
3831 atomic_inc(&rq->nr_iowait); 3831 atomic_inc(&rq->nr_iowait);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 6116b25aa7cf..84a9d18aa8da 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -100,7 +100,7 @@ static int stop_machine(void)
100 stopmachine_state = STOPMACHINE_WAIT; 100 stopmachine_state = STOPMACHINE_WAIT;
101 101
102 for_each_online_cpu(i) { 102 for_each_online_cpu(i) {
103 if (i == _smp_processor_id()) 103 if (i == raw_smp_processor_id())
104 continue; 104 continue;
105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
106 if (ret < 0) 106 if (ret < 0)
@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
182 182
183 /* If they don't care which CPU fn runs on, bind to any online one. */ 183 /* If they don't care which CPU fn runs on, bind to any online one. */
184 if (cpu == NR_CPUS) 184 if (cpu == NR_CPUS)
185 cpu = _smp_processor_id(); 185 cpu = raw_smp_processor_id();
186 186
187 p = kthread_create(do_stop, &smdata, "kstopmachine"); 187 p = kthread_create(do_stop, &smdata, "kstopmachine");
188 if (!IS_ERR(p)) { 188 if (!IS_ERR(p)) {