diff options
author | Christoph Lameter <cl@linux.com> | 2010-12-08 10:22:55 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-12-17 09:07:19 -0500 |
commit | 909ea96468096b07fbb41aaf69be060d92bd9271 (patch) | |
tree | a7e015edd96b5f674874fe78cdd889769e130a2a /kernel | |
parent | 780f36d8b3fa9572f731d4fb85067b2e45e6f993 (diff) |
core: Replace __get_cpu_var with __this_cpu_read if not used for an address.
__get_cpu_var() can be replaced with this_cpu_read and will then use a
single read instruction with implied address calculation to access the
correct per cpu instance.
However, the address of a per cpu variable passed to __this_cpu_read()
cannot be determined (since it's an implied address conversion through
segment prefixes). Therefore apply this only to uses of __get_cpu_var
where the address of the variable is not used.
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Hugh Dickins <hughd@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/printk.c | 4 | ||||
-rw-r--r-- | kernel/rcutree.c | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 42 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 4 | ||||
-rw-r--r-- | kernel/watchdog.c | 36 |
9 files changed, 49 insertions, 49 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 676149a4ac5f..89c74861a3da 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead) | |||
69 | 69 | ||
70 | list_del_rcu(&p->tasks); | 70 | list_del_rcu(&p->tasks); |
71 | list_del_init(&p->sibling); | 71 | list_del_init(&p->sibling); |
72 | __get_cpu_var(process_counts)--; | 72 | __this_cpu_dec(process_counts); |
73 | } | 73 | } |
74 | list_del_rcu(&p->thread_group); | 74 | list_del_rcu(&p->thread_group); |
75 | } | 75 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 3b159c5991b7..e05e27de67df 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1282,7 +1282,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1282 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1282 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
1283 | list_add_tail(&p->sibling, &p->real_parent->children); | 1283 | list_add_tail(&p->sibling, &p->real_parent->children); |
1284 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1284 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
1285 | __get_cpu_var(process_counts)++; | 1285 | __this_cpu_inc(process_counts); |
1286 | } | 1286 | } |
1287 | attach_pid(p, PIDTYPE_PID, pid); | 1287 | attach_pid(p, PIDTYPE_PID, pid); |
1288 | nr_threads++; | 1288 | nr_threads++; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 72206cf5c6cf..29de5ae4ca95 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void) | |||
497 | */ | 497 | */ |
498 | static inline int hrtimer_hres_active(void) | 498 | static inline int hrtimer_hres_active(void) |
499 | { | 499 | { |
500 | return __get_cpu_var(hrtimer_bases).hres_active; | 500 | return __this_cpu_read(hrtimer_bases.hres_active); |
501 | } | 501 | } |
502 | 502 | ||
503 | /* | 503 | /* |
diff --git a/kernel/printk.c b/kernel/printk.c index 9a2264fc42ca..b032317f9964 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1074,8 +1074,8 @@ static DEFINE_PER_CPU(int, printk_pending); | |||
1074 | 1074 | ||
1075 | void printk_tick(void) | 1075 | void printk_tick(void) |
1076 | { | 1076 | { |
1077 | if (__get_cpu_var(printk_pending)) { | 1077 | if (__this_cpu_read(printk_pending)) { |
1078 | __get_cpu_var(printk_pending) = 0; | 1078 | __this_cpu_write(printk_pending, 0); |
1079 | wake_up_interruptible(&log_wait); | 1079 | wake_up_interruptible(&log_wait); |
1080 | } | 1080 | } |
1081 | } | 1081 | } |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ccdc04c47981..aeebf772d6a2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -367,8 +367,8 @@ void rcu_irq_exit(void) | |||
367 | WARN_ON_ONCE(rdtp->dynticks & 0x1); | 367 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
368 | 368 | ||
369 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 369 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
370 | if (__get_cpu_var(rcu_sched_data).nxtlist || | 370 | if (__this_cpu_read(rcu_sched_data.nxtlist) || |
371 | __get_cpu_var(rcu_bh_data).nxtlist) | 371 | __this_cpu_read(rcu_bh_data.nxtlist)) |
372 | set_need_resched(); | 372 | set_need_resched(); |
373 | } | 373 | } |
374 | 374 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 18f4be0d5fe0..d0a0dda52c1a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
70 | static void wakeup_softirqd(void) | 70 | static void wakeup_softirqd(void) |
71 | { | 71 | { |
72 | /* Interrupts are disabled: no need to stop preemption */ | 72 | /* Interrupts are disabled: no need to stop preemption */ |
73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
74 | 74 | ||
75 | if (tsk && tsk->state != TASK_RUNNING) | 75 | if (tsk && tsk->state != TASK_RUNNING) |
76 | wake_up_process(tsk); | 76 | wake_up_process(tsk); |
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t) | |||
388 | 388 | ||
389 | local_irq_save(flags); | 389 | local_irq_save(flags); |
390 | t->next = NULL; | 390 | t->next = NULL; |
391 | *__get_cpu_var(tasklet_vec).tail = t; | 391 | *__this_cpu_read(tasklet_vec.tail) = t; |
392 | __get_cpu_var(tasklet_vec).tail = &(t->next); | 392 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
393 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 393 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
394 | local_irq_restore(flags); | 394 | local_irq_restore(flags); |
395 | } | 395 | } |
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
402 | 402 | ||
403 | local_irq_save(flags); | 403 | local_irq_save(flags); |
404 | t->next = NULL; | 404 | t->next = NULL; |
405 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 405 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
406 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | 406 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
407 | raise_softirq_irqoff(HI_SOFTIRQ); | 407 | raise_softirq_irqoff(HI_SOFTIRQ); |
408 | local_irq_restore(flags); | 408 | local_irq_restore(flags); |
409 | } | 409 | } |
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) | |||
414 | { | 414 | { |
415 | BUG_ON(!irqs_disabled()); | 415 | BUG_ON(!irqs_disabled()); |
416 | 416 | ||
417 | t->next = __get_cpu_var(tasklet_hi_vec).head; | 417 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
418 | __get_cpu_var(tasklet_hi_vec).head = t; | 418 | __this_cpu_write(tasklet_hi_vec.head, t); |
419 | __raise_softirq_irqoff(HI_SOFTIRQ); | 419 | __raise_softirq_irqoff(HI_SOFTIRQ); |
420 | } | 420 | } |
421 | 421 | ||
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a) | |||
426 | struct tasklet_struct *list; | 426 | struct tasklet_struct *list; |
427 | 427 | ||
428 | local_irq_disable(); | 428 | local_irq_disable(); |
429 | list = __get_cpu_var(tasklet_vec).head; | 429 | list = __this_cpu_read(tasklet_vec.head); |
430 | __get_cpu_var(tasklet_vec).head = NULL; | 430 | __this_cpu_write(tasklet_vec.head, NULL); |
431 | __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; | 431 | __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); |
432 | local_irq_enable(); | 432 | local_irq_enable(); |
433 | 433 | ||
434 | while (list) { | 434 | while (list) { |
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a) | |||
449 | 449 | ||
450 | local_irq_disable(); | 450 | local_irq_disable(); |
451 | t->next = NULL; | 451 | t->next = NULL; |
452 | *__get_cpu_var(tasklet_vec).tail = t; | 452 | *__this_cpu_read(tasklet_vec.tail) = t; |
453 | __get_cpu_var(tasklet_vec).tail = &(t->next); | 453 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
454 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); | 454 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
455 | local_irq_enable(); | 455 | local_irq_enable(); |
456 | } | 456 | } |
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
461 | struct tasklet_struct *list; | 461 | struct tasklet_struct *list; |
462 | 462 | ||
463 | local_irq_disable(); | 463 | local_irq_disable(); |
464 | list = __get_cpu_var(tasklet_hi_vec).head; | 464 | list = __this_cpu_read(tasklet_hi_vec.head); |
465 | __get_cpu_var(tasklet_hi_vec).head = NULL; | 465 | __this_cpu_write(tasklet_hi_vec.head, NULL); |
466 | __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; | 466 | __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); |
467 | local_irq_enable(); | 467 | local_irq_enable(); |
468 | 468 | ||
469 | while (list) { | 469 | while (list) { |
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
484 | 484 | ||
485 | local_irq_disable(); | 485 | local_irq_disable(); |
486 | t->next = NULL; | 486 | t->next = NULL; |
487 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 487 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
488 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | 488 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
489 | __raise_softirq_irqoff(HI_SOFTIRQ); | 489 | __raise_softirq_irqoff(HI_SOFTIRQ); |
490 | local_irq_enable(); | 490 | local_irq_enable(); |
491 | } | 491 | } |
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu) | |||
802 | 802 | ||
803 | /* Find end, append list for that CPU. */ | 803 | /* Find end, append list for that CPU. */ |
804 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { | 804 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
805 | *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; | 805 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
806 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; | 806 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
807 | per_cpu(tasklet_vec, cpu).head = NULL; | 807 | per_cpu(tasklet_vec, cpu).head = NULL; |
808 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | 808 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
809 | } | 809 | } |
810 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 810 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
811 | 811 | ||
812 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { | 812 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
813 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; | 813 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
814 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; | 814 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
815 | per_cpu(tasklet_hi_vec, cpu).head = NULL; | 815 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
816 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | 816 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
817 | } | 817 | } |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b6b898d2eeef..051bc80a0c43 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu) | |||
49 | */ | 49 | */ |
50 | int tick_is_oneshot_available(void) | 50 | int tick_is_oneshot_available(void) |
51 | { | 51 | { |
52 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 52 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
53 | 53 | ||
54 | return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); | 54 | return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); |
55 | } | 55 | } |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index aada0e52680a..5cbc101f908b 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, | |||
95 | */ | 95 | */ |
96 | int tick_program_event(ktime_t expires, int force) | 96 | int tick_program_event(ktime_t expires, int force) |
97 | { | 97 | { |
98 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 98 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
99 | 99 | ||
100 | return tick_dev_program_event(dev, expires, force); | 100 | return tick_dev_program_event(dev, expires, force); |
101 | } | 101 | } |
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void) | |||
167 | int ret; | 167 | int ret; |
168 | 168 | ||
169 | local_irq_save(flags); | 169 | local_irq_save(flags); |
170 | ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT; | 170 | ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; |
171 | local_irq_restore(flags); | 171 | local_irq_restore(flags); |
172 | 172 | ||
173 | return ret; | 173 | return ret; |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e3c41a4024c..8037a86106ed 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -116,12 +116,12 @@ static void __touch_watchdog(void) | |||
116 | { | 116 | { |
117 | int this_cpu = smp_processor_id(); | 117 | int this_cpu = smp_processor_id(); |
118 | 118 | ||
119 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); | 119 | __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); |
120 | } | 120 | } |
121 | 121 | ||
122 | void touch_softlockup_watchdog(void) | 122 | void touch_softlockup_watchdog(void) |
123 | { | 123 | { |
124 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | 124 | __this_cpu_write(watchdog_touch_ts, 0); |
125 | } | 125 | } |
126 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 126 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
127 | 127 | ||
@@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void) | |||
165 | /* watchdog detector functions */ | 165 | /* watchdog detector functions */ |
166 | static int is_hardlockup(void) | 166 | static int is_hardlockup(void) |
167 | { | 167 | { |
168 | unsigned long hrint = __get_cpu_var(hrtimer_interrupts); | 168 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
169 | 169 | ||
170 | if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) | 170 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
171 | return 1; | 171 | return 1; |
172 | 172 | ||
173 | __get_cpu_var(hrtimer_interrupts_saved) = hrint; | 173 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | #endif | 176 | #endif |
@@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
203 | /* Ensure the watchdog never gets throttled */ | 203 | /* Ensure the watchdog never gets throttled */ |
204 | event->hw.interrupts = 0; | 204 | event->hw.interrupts = 0; |
205 | 205 | ||
206 | if (__get_cpu_var(watchdog_nmi_touch) == true) { | 206 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
207 | __get_cpu_var(watchdog_nmi_touch) = false; | 207 | __this_cpu_write(watchdog_nmi_touch, false); |
208 | return; | 208 | return; |
209 | } | 209 | } |
210 | 210 | ||
@@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
218 | int this_cpu = smp_processor_id(); | 218 | int this_cpu = smp_processor_id(); |
219 | 219 | ||
220 | /* only print hardlockups once */ | 220 | /* only print hardlockups once */ |
221 | if (__get_cpu_var(hard_watchdog_warn) == true) | 221 | if (__this_cpu_read(hard_watchdog_warn) == true) |
222 | return; | 222 | return; |
223 | 223 | ||
224 | if (hardlockup_panic) | 224 | if (hardlockup_panic) |
@@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
226 | else | 226 | else |
227 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); | 227 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
228 | 228 | ||
229 | __get_cpu_var(hard_watchdog_warn) = true; | 229 | __this_cpu_write(hard_watchdog_warn, true); |
230 | return; | 230 | return; |
231 | } | 231 | } |
232 | 232 | ||
233 | __get_cpu_var(hard_watchdog_warn) = false; | 233 | __this_cpu_write(hard_watchdog_warn, false); |
234 | return; | 234 | return; |
235 | } | 235 | } |
236 | static void watchdog_interrupt_count(void) | 236 | static void watchdog_interrupt_count(void) |
237 | { | 237 | { |
238 | __get_cpu_var(hrtimer_interrupts)++; | 238 | __this_cpu_inc(hrtimer_interrupts); |
239 | } | 239 | } |
240 | #else | 240 | #else |
241 | static inline void watchdog_interrupt_count(void) { return; } | 241 | static inline void watchdog_interrupt_count(void) { return; } |
@@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; } | |||
244 | /* watchdog kicker functions */ | 244 | /* watchdog kicker functions */ |
245 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 245 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
246 | { | 246 | { |
247 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); | 247 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
248 | struct pt_regs *regs = get_irq_regs(); | 248 | struct pt_regs *regs = get_irq_regs(); |
249 | int duration; | 249 | int duration; |
250 | 250 | ||
@@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
252 | watchdog_interrupt_count(); | 252 | watchdog_interrupt_count(); |
253 | 253 | ||
254 | /* kick the softlockup detector */ | 254 | /* kick the softlockup detector */ |
255 | wake_up_process(__get_cpu_var(softlockup_watchdog)); | 255 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
256 | 256 | ||
257 | /* .. and repeat */ | 257 | /* .. and repeat */ |
258 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); | 258 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
259 | 259 | ||
260 | if (touch_ts == 0) { | 260 | if (touch_ts == 0) { |
261 | if (unlikely(__get_cpu_var(softlockup_touch_sync))) { | 261 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
262 | /* | 262 | /* |
263 | * If the time stamp was touched atomically | 263 | * If the time stamp was touched atomically |
264 | * make sure the scheduler tick is up to date. | 264 | * make sure the scheduler tick is up to date. |
265 | */ | 265 | */ |
266 | __get_cpu_var(softlockup_touch_sync) = false; | 266 | __this_cpu_write(softlockup_touch_sync, false); |
267 | sched_clock_tick(); | 267 | sched_clock_tick(); |
268 | } | 268 | } |
269 | __touch_watchdog(); | 269 | __touch_watchdog(); |
@@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
279 | duration = is_softlockup(touch_ts); | 279 | duration = is_softlockup(touch_ts); |
280 | if (unlikely(duration)) { | 280 | if (unlikely(duration)) { |
281 | /* only warn once */ | 281 | /* only warn once */ |
282 | if (__get_cpu_var(soft_watchdog_warn) == true) | 282 | if (__this_cpu_read(soft_watchdog_warn) == true) |
283 | return HRTIMER_RESTART; | 283 | return HRTIMER_RESTART; |
284 | 284 | ||
285 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 285 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
@@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
294 | 294 | ||
295 | if (softlockup_panic) | 295 | if (softlockup_panic) |
296 | panic("softlockup: hung tasks"); | 296 | panic("softlockup: hung tasks"); |
297 | __get_cpu_var(soft_watchdog_warn) = true; | 297 | __this_cpu_write(soft_watchdog_warn, true); |
298 | } else | 298 | } else |
299 | __get_cpu_var(soft_watchdog_warn) = false; | 299 | __this_cpu_write(soft_watchdog_warn, false); |
300 | 300 | ||
301 | return HRTIMER_RESTART; | 301 | return HRTIMER_RESTART; |
302 | } | 302 | } |