aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-generic/irq_regs.h8
-rw-r--r--include/linux/elevator.h12
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/rcutree.c4
-rw-r--r--kernel/softirq.c42
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/tick-oneshot.c4
-rw-r--r--kernel/watchdog.c36
-rw-r--r--mm/slab.c6
13 files changed, 60 insertions, 66 deletions
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
index 5ae1d07d4a12..6bf9355fa7eb 100644
--- a/include/asm-generic/irq_regs.h
+++ b/include/asm-generic/irq_regs.h
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
22 22
23static inline struct pt_regs *get_irq_regs(void) 23static inline struct pt_regs *get_irq_regs(void)
24{ 24{
25 return __get_cpu_var(__irq_regs); 25 return __this_cpu_read(__irq_regs);
26} 26}
27 27
28static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) 28static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
29{ 29{
30 struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs); 30 struct pt_regs *old_regs;
31 31
32 old_regs = *pp_regs; 32 old_regs = __this_cpu_read(__irq_regs);
33 *pp_regs = new_regs; 33 __this_cpu_write(__irq_regs, new_regs);
34 return old_regs; 34 return old_regs;
35} 35}
36 36
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4fd978e7eb83..4d857973d2c9 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -195,15 +195,9 @@ enum {
195/* 195/*
196 * io context count accounting 196 * io context count accounting
197 */ 197 */
198#define elv_ioc_count_mod(name, __val) \ 198#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
199 do { \ 199#define elv_ioc_count_inc(name) this_cpu_inc(name)
200 preempt_disable(); \ 200#define elv_ioc_count_dec(name) this_cpu_dec(name)
201 __get_cpu_var(name) += (__val); \
202 preempt_enable(); \
203 } while (0)
204
205#define elv_ioc_count_inc(name) elv_ioc_count_mod(name, 1)
206#define elv_ioc_count_dec(name) elv_ioc_count_mod(name, -1)
207 201
208#define elv_ioc_count_read(name) \ 202#define elv_ioc_count_read(name) \
209({ \ 203({ \
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ad54c846911b..44e83ba12b5b 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void);
47 47
48#ifndef CONFIG_GENERIC_HARDIRQS 48#ifndef CONFIG_GENERIC_HARDIRQS
49#define kstat_irqs_this_cpu(irq) \ 49#define kstat_irqs_this_cpu(irq) \
50 (kstat_this_cpu.irqs[irq]) 50 (this_cpu_read(kstat.irqs[irq])
51 51
52struct irq_desc; 52struct irq_desc;
53 53
diff --git a/kernel/exit.c b/kernel/exit.c
index 676149a4ac5f..89c74861a3da 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
69 69
70 list_del_rcu(&p->tasks); 70 list_del_rcu(&p->tasks);
71 list_del_init(&p->sibling); 71 list_del_init(&p->sibling);
72 __get_cpu_var(process_counts)--; 72 __this_cpu_dec(process_counts);
73 } 73 }
74 list_del_rcu(&p->thread_group); 74 list_del_rcu(&p->thread_group);
75} 75}
diff --git a/kernel/fork.c b/kernel/fork.c
index 3b159c5991b7..e05e27de67df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1282,7 +1282,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1282 attach_pid(p, PIDTYPE_SID, task_session(current)); 1282 attach_pid(p, PIDTYPE_SID, task_session(current));
1283 list_add_tail(&p->sibling, &p->real_parent->children); 1283 list_add_tail(&p->sibling, &p->real_parent->children);
1284 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1284 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1285 __get_cpu_var(process_counts)++; 1285 __this_cpu_inc(process_counts);
1286 } 1286 }
1287 attach_pid(p, PIDTYPE_PID, pid); 1287 attach_pid(p, PIDTYPE_PID, pid);
1288 nr_threads++; 1288 nr_threads++;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 72206cf5c6cf..29de5ae4ca95 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
497 */ 497 */
498static inline int hrtimer_hres_active(void) 498static inline int hrtimer_hres_active(void)
499{ 499{
500 return __get_cpu_var(hrtimer_bases).hres_active; 500 return __this_cpu_read(hrtimer_bases.hres_active);
501} 501}
502 502
503/* 503/*
diff --git a/kernel/printk.c b/kernel/printk.c
index 9a2264fc42ca..b032317f9964 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1074,8 +1074,8 @@ static DEFINE_PER_CPU(int, printk_pending);
1074 1074
1075void printk_tick(void) 1075void printk_tick(void)
1076{ 1076{
1077 if (__get_cpu_var(printk_pending)) { 1077 if (__this_cpu_read(printk_pending)) {
1078 __get_cpu_var(printk_pending) = 0; 1078 __this_cpu_write(printk_pending, 0);
1079 wake_up_interruptible(&log_wait); 1079 wake_up_interruptible(&log_wait);
1080 } 1080 }
1081} 1081}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ccdc04c47981..aeebf772d6a2 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -367,8 +367,8 @@ void rcu_irq_exit(void)
367 WARN_ON_ONCE(rdtp->dynticks & 0x1); 367 WARN_ON_ONCE(rdtp->dynticks & 0x1);
368 368
369 /* If the interrupt queued a callback, get out of dyntick mode. */ 369 /* If the interrupt queued a callback, get out of dyntick mode. */
370 if (__get_cpu_var(rcu_sched_data).nxtlist || 370 if (__this_cpu_read(rcu_sched_data.nxtlist) ||
371 __get_cpu_var(rcu_bh_data).nxtlist) 371 __this_cpu_read(rcu_bh_data.nxtlist))
372 set_need_resched(); 372 set_need_resched();
373} 373}
374 374
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 18f4be0d5fe0..d0a0dda52c1a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
70static void wakeup_softirqd(void) 70static void wakeup_softirqd(void)
71{ 71{
72 /* Interrupts are disabled: no need to stop preemption */ 72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd); 73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74 74
75 if (tsk && tsk->state != TASK_RUNNING) 75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk); 76 wake_up_process(tsk);
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
388 388
389 local_irq_save(flags); 389 local_irq_save(flags);
390 t->next = NULL; 390 t->next = NULL;
391 *__get_cpu_var(tasklet_vec).tail = t; 391 *__this_cpu_read(tasklet_vec.tail) = t;
392 __get_cpu_var(tasklet_vec).tail = &(t->next); 392 __this_cpu_write(tasklet_vec.tail, &(t->next));
393 raise_softirq_irqoff(TASKLET_SOFTIRQ); 393 raise_softirq_irqoff(TASKLET_SOFTIRQ);
394 local_irq_restore(flags); 394 local_irq_restore(flags);
395} 395}
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
402 402
403 local_irq_save(flags); 403 local_irq_save(flags);
404 t->next = NULL; 404 t->next = NULL;
405 *__get_cpu_var(tasklet_hi_vec).tail = t; 405 *__this_cpu_read(tasklet_hi_vec.tail) = t;
406 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); 406 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
407 raise_softirq_irqoff(HI_SOFTIRQ); 407 raise_softirq_irqoff(HI_SOFTIRQ);
408 local_irq_restore(flags); 408 local_irq_restore(flags);
409} 409}
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
414{ 414{
415 BUG_ON(!irqs_disabled()); 415 BUG_ON(!irqs_disabled());
416 416
417 t->next = __get_cpu_var(tasklet_hi_vec).head; 417 t->next = __this_cpu_read(tasklet_hi_vec.head);
418 __get_cpu_var(tasklet_hi_vec).head = t; 418 __this_cpu_write(tasklet_hi_vec.head, t);
419 __raise_softirq_irqoff(HI_SOFTIRQ); 419 __raise_softirq_irqoff(HI_SOFTIRQ);
420} 420}
421 421
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
426 struct tasklet_struct *list; 426 struct tasklet_struct *list;
427 427
428 local_irq_disable(); 428 local_irq_disable();
429 list = __get_cpu_var(tasklet_vec).head; 429 list = __this_cpu_read(tasklet_vec.head);
430 __get_cpu_var(tasklet_vec).head = NULL; 430 __this_cpu_write(tasklet_vec.head, NULL);
431 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; 431 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
432 local_irq_enable(); 432 local_irq_enable();
433 433
434 while (list) { 434 while (list) {
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
449 449
450 local_irq_disable(); 450 local_irq_disable();
451 t->next = NULL; 451 t->next = NULL;
452 *__get_cpu_var(tasklet_vec).tail = t; 452 *__this_cpu_read(tasklet_vec.tail) = t;
453 __get_cpu_var(tasklet_vec).tail = &(t->next); 453 __this_cpu_write(tasklet_vec.tail, &(t->next));
454 __raise_softirq_irqoff(TASKLET_SOFTIRQ); 454 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
455 local_irq_enable(); 455 local_irq_enable();
456 } 456 }
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
461 struct tasklet_struct *list; 461 struct tasklet_struct *list;
462 462
463 local_irq_disable(); 463 local_irq_disable();
464 list = __get_cpu_var(tasklet_hi_vec).head; 464 list = __this_cpu_read(tasklet_hi_vec.head);
465 __get_cpu_var(tasklet_hi_vec).head = NULL; 465 __this_cpu_write(tasklet_hi_vec.head, NULL);
466 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; 466 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
467 local_irq_enable(); 467 local_irq_enable();
468 468
469 while (list) { 469 while (list) {
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
484 484
485 local_irq_disable(); 485 local_irq_disable();
486 t->next = NULL; 486 t->next = NULL;
487 *__get_cpu_var(tasklet_hi_vec).tail = t; 487 *__this_cpu_read(tasklet_hi_vec.tail) = t;
488 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); 488 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
489 __raise_softirq_irqoff(HI_SOFTIRQ); 489 __raise_softirq_irqoff(HI_SOFTIRQ);
490 local_irq_enable(); 490 local_irq_enable();
491 } 491 }
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
802 802
803 /* Find end, append list for that CPU. */ 803 /* Find end, append list for that CPU. */
804 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 804 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
805 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; 805 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
806 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; 806 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
807 per_cpu(tasklet_vec, cpu).head = NULL; 807 per_cpu(tasklet_vec, cpu).head = NULL;
808 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 808 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
809 } 809 }
810 raise_softirq_irqoff(TASKLET_SOFTIRQ); 810 raise_softirq_irqoff(TASKLET_SOFTIRQ);
811 811
812 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 812 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
813 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; 813 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
814 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; 814 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
815 per_cpu(tasklet_hi_vec, cpu).head = NULL; 815 per_cpu(tasklet_hi_vec, cpu).head = NULL;
816 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 816 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
817 } 817 }
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index b6b898d2eeef..051bc80a0c43 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
49 */ 49 */
50int tick_is_oneshot_available(void) 50int tick_is_oneshot_available(void)
51{ 51{
52 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
53 53
54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); 54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
55} 55}
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index aada0e52680a..5cbc101f908b 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
95 */ 95 */
96int tick_program_event(ktime_t expires, int force) 96int tick_program_event(ktime_t expires, int force)
97{ 97{
98 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 98 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
99 99
100 return tick_dev_program_event(dev, expires, force); 100 return tick_dev_program_event(dev, expires, force);
101} 101}
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
167 int ret; 167 int ret;
168 168
169 local_irq_save(flags); 169 local_irq_save(flags);
170 ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT; 170 ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
171 local_irq_restore(flags); 171 local_irq_restore(flags);
172 172
173 return ret; 173 return ret;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e3c41a4024c..8037a86106ed 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -116,12 +116,12 @@ static void __touch_watchdog(void)
116{ 116{
117 int this_cpu = smp_processor_id(); 117 int this_cpu = smp_processor_id();
118 118
119 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); 119 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
120} 120}
121 121
122void touch_softlockup_watchdog(void) 122void touch_softlockup_watchdog(void)
123{ 123{
124 __raw_get_cpu_var(watchdog_touch_ts) = 0; 124 __this_cpu_write(watchdog_touch_ts, 0);
125} 125}
126EXPORT_SYMBOL(touch_softlockup_watchdog); 126EXPORT_SYMBOL(touch_softlockup_watchdog);
127 127
@@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void)
165/* watchdog detector functions */ 165/* watchdog detector functions */
166static int is_hardlockup(void) 166static int is_hardlockup(void)
167{ 167{
168 unsigned long hrint = __get_cpu_var(hrtimer_interrupts); 168 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
169 169
170 if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) 170 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
171 return 1; 171 return 1;
172 172
173 __get_cpu_var(hrtimer_interrupts_saved) = hrint; 173 __this_cpu_write(hrtimer_interrupts_saved, hrint);
174 return 0; 174 return 0;
175} 175}
176#endif 176#endif
@@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
203 /* Ensure the watchdog never gets throttled */ 203 /* Ensure the watchdog never gets throttled */
204 event->hw.interrupts = 0; 204 event->hw.interrupts = 0;
205 205
206 if (__get_cpu_var(watchdog_nmi_touch) == true) { 206 if (__this_cpu_read(watchdog_nmi_touch) == true) {
207 __get_cpu_var(watchdog_nmi_touch) = false; 207 __this_cpu_write(watchdog_nmi_touch, false);
208 return; 208 return;
209 } 209 }
210 210
@@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
218 int this_cpu = smp_processor_id(); 218 int this_cpu = smp_processor_id();
219 219
220 /* only print hardlockups once */ 220 /* only print hardlockups once */
221 if (__get_cpu_var(hard_watchdog_warn) == true) 221 if (__this_cpu_read(hard_watchdog_warn) == true)
222 return; 222 return;
223 223
224 if (hardlockup_panic) 224 if (hardlockup_panic)
@@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
226 else 226 else
227 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 227 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
228 228
229 __get_cpu_var(hard_watchdog_warn) = true; 229 __this_cpu_write(hard_watchdog_warn, true);
230 return; 230 return;
231 } 231 }
232 232
233 __get_cpu_var(hard_watchdog_warn) = false; 233 __this_cpu_write(hard_watchdog_warn, false);
234 return; 234 return;
235} 235}
236static void watchdog_interrupt_count(void) 236static void watchdog_interrupt_count(void)
237{ 237{
238 __get_cpu_var(hrtimer_interrupts)++; 238 __this_cpu_inc(hrtimer_interrupts);
239} 239}
240#else 240#else
241static inline void watchdog_interrupt_count(void) { return; } 241static inline void watchdog_interrupt_count(void) { return; }
@@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; }
244/* watchdog kicker functions */ 244/* watchdog kicker functions */
245static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 245static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
246{ 246{
247 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); 247 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
248 struct pt_regs *regs = get_irq_regs(); 248 struct pt_regs *regs = get_irq_regs();
249 int duration; 249 int duration;
250 250
@@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
252 watchdog_interrupt_count(); 252 watchdog_interrupt_count();
253 253
254 /* kick the softlockup detector */ 254 /* kick the softlockup detector */
255 wake_up_process(__get_cpu_var(softlockup_watchdog)); 255 wake_up_process(__this_cpu_read(softlockup_watchdog));
256 256
257 /* .. and repeat */ 257 /* .. and repeat */
258 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); 258 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
259 259
260 if (touch_ts == 0) { 260 if (touch_ts == 0) {
261 if (unlikely(__get_cpu_var(softlockup_touch_sync))) { 261 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
262 /* 262 /*
263 * If the time stamp was touched atomically 263 * If the time stamp was touched atomically
264 * make sure the scheduler tick is up to date. 264 * make sure the scheduler tick is up to date.
265 */ 265 */
266 __get_cpu_var(softlockup_touch_sync) = false; 266 __this_cpu_write(softlockup_touch_sync, false);
267 sched_clock_tick(); 267 sched_clock_tick();
268 } 268 }
269 __touch_watchdog(); 269 __touch_watchdog();
@@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
279 duration = is_softlockup(touch_ts); 279 duration = is_softlockup(touch_ts);
280 if (unlikely(duration)) { 280 if (unlikely(duration)) {
281 /* only warn once */ 281 /* only warn once */
282 if (__get_cpu_var(soft_watchdog_warn) == true) 282 if (__this_cpu_read(soft_watchdog_warn) == true)
283 return HRTIMER_RESTART; 283 return HRTIMER_RESTART;
284 284
285 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 285 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
294 294
295 if (softlockup_panic) 295 if (softlockup_panic)
296 panic("softlockup: hung tasks"); 296 panic("softlockup: hung tasks");
297 __get_cpu_var(soft_watchdog_warn) = true; 297 __this_cpu_write(soft_watchdog_warn, true);
298 } else 298 } else
299 __get_cpu_var(soft_watchdog_warn) = false; 299 __this_cpu_write(soft_watchdog_warn, false);
300 300
301 return HRTIMER_RESTART; 301 return HRTIMER_RESTART;
302} 302}
diff --git a/mm/slab.c b/mm/slab.c
index b1e40dafbab3..316d75596f3c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu)
829 829
830static void next_reap_node(void) 830static void next_reap_node(void)
831{ 831{
832 int node = __get_cpu_var(slab_reap_node); 832 int node = __this_cpu_read(slab_reap_node);
833 833
834 node = next_node(node, node_online_map); 834 node = next_node(node, node_online_map);
835 if (unlikely(node >= MAX_NUMNODES)) 835 if (unlikely(node >= MAX_NUMNODES))
836 node = first_node(node_online_map); 836 node = first_node(node_online_map);
837 __get_cpu_var(slab_reap_node) = node; 837 __this_cpu_write(slab_reap_node, node);
838} 838}
839 839
840#else 840#else
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1012 */ 1012 */
1013static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1013static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1014{ 1014{
1015 int node = __get_cpu_var(slab_reap_node); 1015 int node = __this_cpu_read(slab_reap_node);
1016 1016
1017 if (l3->alien) { 1017 if (l3->alien) {
1018 struct array_cache *ac = l3->alien[node]; 1018 struct array_cache *ac = l3->alien[node];