aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/debugreg.h2
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/nmi.c24
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/hw_breakpoint.c12
-rw-r--r--arch/x86/kernel/irq.c6
-rw-r--r--arch/x86/kernel/irq_32.c4
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/oprofile/nmi_int.c2
16 files changed, 57 insertions, 62 deletions
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b81002f23614..078ad0caefc6 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -94,7 +94,7 @@ static inline void hw_breakpoint_disable(void)
94 94
95static inline int hw_breakpoint_active(void) 95static inline int hw_breakpoint_active(void)
96{ 96{
97 return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; 97 return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
98} 98}
99 99
100extern void aout_dump_debugregs(struct user *dump); 100extern void aout_dump_debugregs(struct user *dump);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7cc0a721f628..8d50922687af 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2302,7 +2302,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2302 unsigned int irr; 2302 unsigned int irr;
2303 struct irq_desc *desc; 2303 struct irq_desc *desc;
2304 struct irq_cfg *cfg; 2304 struct irq_cfg *cfg;
2305 irq = __get_cpu_var(vector_irq)[vector]; 2305 irq = __this_cpu_read(vector_irq[vector]);
2306 2306
2307 if (irq == -1) 2307 if (irq == -1)
2308 continue; 2308 continue;
@@ -2336,7 +2336,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2336 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2336 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2337 goto unlock; 2337 goto unlock;
2338 } 2338 }
2339 __get_cpu_var(vector_irq)[vector] = -1; 2339 __this_cpu_write(vector_irq[vector], -1);
2340unlock: 2340unlock:
2341 raw_spin_unlock(&desc->lock); 2341 raw_spin_unlock(&desc->lock);
2342 } 2342 }
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index c90041ccb742..b387dce0b409 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -306,12 +306,12 @@ void acpi_nmi_disable(void)
306 */ 306 */
307void cpu_nmi_set_wd_enabled(void) 307void cpu_nmi_set_wd_enabled(void)
308{ 308{
309 __get_cpu_var(wd_enabled) = 1; 309 __this_cpu_write(wd_enabled, 1);
310} 310}
311 311
312void setup_apic_nmi_watchdog(void *unused) 312void setup_apic_nmi_watchdog(void *unused)
313{ 313{
314 if (__get_cpu_var(wd_enabled)) 314 if (__this_cpu_read(wd_enabled))
315 return; 315 return;
316 316
317 /* cheap hack to support suspend/resume */ 317 /* cheap hack to support suspend/resume */
@@ -322,12 +322,12 @@ void setup_apic_nmi_watchdog(void *unused)
322 switch (nmi_watchdog) { 322 switch (nmi_watchdog) {
323 case NMI_LOCAL_APIC: 323 case NMI_LOCAL_APIC:
324 if (lapic_watchdog_init(nmi_hz) < 0) { 324 if (lapic_watchdog_init(nmi_hz) < 0) {
325 __get_cpu_var(wd_enabled) = 0; 325 __this_cpu_write(wd_enabled, 0);
326 return; 326 return;
327 } 327 }
328 /* FALL THROUGH */ 328 /* FALL THROUGH */
329 case NMI_IO_APIC: 329 case NMI_IO_APIC:
330 __get_cpu_var(wd_enabled) = 1; 330 __this_cpu_write(wd_enabled, 1);
331 atomic_inc(&nmi_active); 331 atomic_inc(&nmi_active);
332 } 332 }
333} 333}
@@ -337,13 +337,13 @@ void stop_apic_nmi_watchdog(void *unused)
337 /* only support LOCAL and IO APICs for now */ 337 /* only support LOCAL and IO APICs for now */
338 if (!nmi_watchdog_active()) 338 if (!nmi_watchdog_active())
339 return; 339 return;
340 if (__get_cpu_var(wd_enabled) == 0) 340 if (__this_cpu_read(wd_enabled) == 0)
341 return; 341 return;
342 if (nmi_watchdog == NMI_LOCAL_APIC) 342 if (nmi_watchdog == NMI_LOCAL_APIC)
343 lapic_watchdog_stop(); 343 lapic_watchdog_stop();
344 else 344 else
345 __acpi_nmi_disable(NULL); 345 __acpi_nmi_disable(NULL);
346 __get_cpu_var(wd_enabled) = 0; 346 __this_cpu_write(wd_enabled, 0);
347 atomic_dec(&nmi_active); 347 atomic_dec(&nmi_active);
348} 348}
349 349
@@ -403,8 +403,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
403 403
404 sum = get_timer_irqs(cpu); 404 sum = get_timer_irqs(cpu);
405 405
406 if (__get_cpu_var(nmi_touch)) { 406 if (__this_cpu_read(nmi_touch)) {
407 __get_cpu_var(nmi_touch) = 0; 407 __this_cpu_write(nmi_touch, 0);
408 touched = 1; 408 touched = 1;
409 } 409 }
410 410
@@ -427,7 +427,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
427 touched = 1; 427 touched = 1;
428 428
429 /* if the none of the timers isn't firing, this cpu isn't doing much */ 429 /* if the none of the timers isn't firing, this cpu isn't doing much */
430 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 430 if (!touched && __this_cpu_read(last_irq_sum) == sum) {
431 /* 431 /*
432 * Ayiee, looks like this CPU is stuck ... 432 * Ayiee, looks like this CPU is stuck ...
433 * wait a few IRQs (5 seconds) before doing the oops ... 433 * wait a few IRQs (5 seconds) before doing the oops ...
@@ -440,12 +440,12 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
440 die_nmi("BUG: NMI Watchdog detected LOCKUP", 440 die_nmi("BUG: NMI Watchdog detected LOCKUP",
441 regs, panic_on_timeout); 441 regs, panic_on_timeout);
442 } else { 442 } else {
443 __get_cpu_var(last_irq_sum) = sum; 443 __this_cpu_write(last_irq_sum, sum);
444 __this_cpu_write(alert_counter, 0); 444 __this_cpu_write(alert_counter, 0);
445 } 445 }
446 446
447 /* see if the nmi watchdog went off */ 447 /* see if the nmi watchdog went off */
448 if (!__get_cpu_var(wd_enabled)) 448 if (!__this_cpu_read(wd_enabled))
449 return rc; 449 return rc;
450 switch (nmi_watchdog) { 450 switch (nmi_watchdog) {
451 case NMI_LOCAL_APIC: 451 case NMI_LOCAL_APIC:
@@ -467,7 +467,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
467 467
468static void enable_ioapic_nmi_watchdog_single(void *unused) 468static void enable_ioapic_nmi_watchdog_single(void *unused)
469{ 469{
470 __get_cpu_var(wd_enabled) = 1; 470 __this_cpu_write(wd_enabled, 1);
471 atomic_inc(&nmi_active); 471 atomic_inc(&nmi_active);
472 __acpi_nmi_enable(NULL); 472 __acpi_nmi_enable(NULL);
473} 473}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c1c52c341f40..26ec9a7c3518 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -118,8 +118,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
118 else if (!strcmp(oem_table_id, "UVX")) 118 else if (!strcmp(oem_table_id, "UVX"))
119 uv_system_type = UV_X2APIC; 119 uv_system_type = UV_X2APIC;
120 else if (!strcmp(oem_table_id, "UVH")) { 120 else if (!strcmp(oem_table_id, "UVH")) {
121 __get_cpu_var(x2apic_extra_bits) = 121 __this_cpu_write(x2apic_extra_bits,
122 nodeid << (uvh_apicid.s.pnode_shift - 1); 122 nodeid << (uvh_apicid.s.pnode_shift - 1));
123 uv_system_type = UV_NON_UNIQUE_APIC; 123 uv_system_type = UV_NON_UNIQUE_APIC;
124 uv_set_apicid_hibit(); 124 uv_set_apicid_hibit();
125 return 1; 125 return 1;
@@ -284,7 +284,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x)
284 unsigned int id; 284 unsigned int id;
285 285
286 WARN_ON(preemptible() && num_online_cpus() > 1); 286 WARN_ON(preemptible() && num_online_cpus() > 1);
287 id = x | __get_cpu_var(x2apic_extra_bits); 287 id = x | __this_cpu_read(x2apic_extra_bits);
288 288
289 return id; 289 return id;
290} 290}
@@ -376,7 +376,7 @@ struct apic __refdata apic_x2apic_uv_x = {
376 376
377static __cpuinit void set_x2apic_extra_bits(int pnode) 377static __cpuinit void set_x2apic_extra_bits(int pnode)
378{ 378{
379 __get_cpu_var(x2apic_extra_bits) = (pnode << 6); 379 __this_cpu_write(x2apic_extra_bits, (pnode << 6));
380} 380}
381 381
382/* 382/*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 491977baf6c0..42a36046823e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1377static void query_values_on_cpu(void *_err) 1377static void query_values_on_cpu(void *_err)
1378{ 1378{
1379 int *err = _err; 1379 int *err = _err;
1380 struct powernow_k8_data *data = __get_cpu_var(powernow_data); 1380 struct powernow_k8_data *data = __this_cpu_read(powernow_data);
1381 1381
1382 *err = query_current_values_with_pending_wait(data); 1382 *err = query_current_values_with_pending_wait(data);
1383} 1383}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7a35b72d7c03..0c746af6c5eb 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
326 326
327static int msr_to_offset(u32 msr) 327static int msr_to_offset(u32 msr)
328{ 328{
329 unsigned bank = __get_cpu_var(injectm.bank); 329 unsigned bank = __this_cpu_read(injectm.bank);
330 330
331 if (msr == rip_msr) 331 if (msr == rip_msr)
332 return offsetof(struct mce, ip); 332 return offsetof(struct mce, ip);
@@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr)
346{ 346{
347 u64 v; 347 u64 v;
348 348
349 if (__get_cpu_var(injectm).finished) { 349 if (__this_cpu_read(injectm.finished)) {
350 int offset = msr_to_offset(msr); 350 int offset = msr_to_offset(msr);
351 351
352 if (offset < 0) 352 if (offset < 0)
@@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr)
369 369
370static void mce_wrmsrl(u32 msr, u64 v) 370static void mce_wrmsrl(u32 msr, u64 v)
371{ 371{
372 if (__get_cpu_var(injectm).finished) { 372 if (__this_cpu_read(injectm.finished)) {
373 int offset = msr_to_offset(msr); 373 int offset = msr_to_offset(msr);
374 374
375 if (offset >= 0) 375 if (offset >= 0)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 6d75b9145b13..ba85814f2590 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -968,8 +968,7 @@ x86_perf_event_set_period(struct perf_event *event)
968 968
969static void x86_pmu_enable_event(struct perf_event *event) 969static void x86_pmu_enable_event(struct perf_event *event)
970{ 970{
971 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 971 if (__this_cpu_read(cpu_hw_events.enabled))
972 if (cpuc->enabled)
973 __x86_pmu_enable_event(&event->hw, 972 __x86_pmu_enable_event(&event->hw,
974 ARCH_PERFMON_EVENTSEL_ENABLE); 973 ARCH_PERFMON_EVENTSEL_ENABLE);
975} 974}
@@ -1243,7 +1242,7 @@ perf_event_nmi_handler(struct notifier_block *self,
1243 break; 1242 break;
1244 case DIE_NMIUNKNOWN: 1243 case DIE_NMIUNKNOWN:
1245 this_nmi = percpu_read(irq_stat.__nmi_count); 1244 this_nmi = percpu_read(irq_stat.__nmi_count);
1246 if (this_nmi != __get_cpu_var(pmu_nmi).marked) 1245 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1247 /* let the kernel handle the unknown nmi */ 1246 /* let the kernel handle the unknown nmi */
1248 return NOTIFY_DONE; 1247 return NOTIFY_DONE;
1249 /* 1248 /*
@@ -1267,8 +1266,8 @@ perf_event_nmi_handler(struct notifier_block *self,
1267 this_nmi = percpu_read(irq_stat.__nmi_count); 1266 this_nmi = percpu_read(irq_stat.__nmi_count);
1268 if ((handled > 1) || 1267 if ((handled > 1) ||
1269 /* the next nmi could be a back-to-back nmi */ 1268 /* the next nmi could be a back-to-back nmi */
1270 ((__get_cpu_var(pmu_nmi).marked == this_nmi) && 1269 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1271 (__get_cpu_var(pmu_nmi).handled > 1))) { 1270 (__this_cpu_read(pmu_nmi.handled) > 1))) {
1272 /* 1271 /*
1273 * We could have two subsequent back-to-back nmis: The 1272 * We could have two subsequent back-to-back nmis: The
1274 * first handles more than one counter, the 2nd 1273 * first handles more than one counter, the 2nd
@@ -1279,8 +1278,8 @@ perf_event_nmi_handler(struct notifier_block *self,
1279 * handling more than one counter. We will mark the 1278 * handling more than one counter. We will mark the
1280 * next (3rd) and then drop it if unhandled. 1279 * next (3rd) and then drop it if unhandled.
1281 */ 1280 */
1282 __get_cpu_var(pmu_nmi).marked = this_nmi + 1; 1281 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1283 __get_cpu_var(pmu_nmi).handled = handled; 1282 __this_cpu_write(pmu_nmi.handled, handled);
1284 } 1283 }
1285 1284
1286 return NOTIFY_STOP; 1285 return NOTIFY_STOP;
@@ -1454,11 +1453,9 @@ static inline void x86_pmu_read(struct perf_event *event)
1454 */ 1453 */
1455static void x86_pmu_start_txn(struct pmu *pmu) 1454static void x86_pmu_start_txn(struct pmu *pmu)
1456{ 1455{
1457 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1458
1459 perf_pmu_disable(pmu); 1456 perf_pmu_disable(pmu);
1460 cpuc->group_flag |= PERF_EVENT_TXN; 1457 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1461 cpuc->n_txn = 0; 1458 __this_cpu_write(cpu_hw_events.n_txn, 0);
1462} 1459}
1463 1460
1464/* 1461/*
@@ -1468,14 +1465,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
1468 */ 1465 */
1469static void x86_pmu_cancel_txn(struct pmu *pmu) 1466static void x86_pmu_cancel_txn(struct pmu *pmu)
1470{ 1467{
1471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1468 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1472
1473 cpuc->group_flag &= ~PERF_EVENT_TXN;
1474 /* 1469 /*
1475 * Truncate the collected events. 1470 * Truncate the collected events.
1476 */ 1471 */
1477 cpuc->n_added -= cpuc->n_txn; 1472 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1478 cpuc->n_events -= cpuc->n_txn; 1473 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1479 perf_pmu_enable(pmu); 1474 perf_pmu_enable(pmu);
1480} 1475}
1481 1476
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index c8f5c088cad1..4ee59bcbdad3 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
649 struct hw_perf_event *hwc = &event->hw; 649 struct hw_perf_event *hwc = &event->hw;
650 650
651 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 651 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
652 if (!__get_cpu_var(cpu_hw_events).enabled) 652 if (!__this_cpu_read(cpu_hw_events.enabled))
653 return; 653 return;
654 654
655 intel_pmu_enable_bts(hwc->config); 655 intel_pmu_enable_bts(hwc->config);
@@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
679 679
680static void intel_pmu_reset(void) 680static void intel_pmu_reset(void)
681{ 681{
682 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; 682 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
683 unsigned long flags; 683 unsigned long flags;
684 int idx; 684 int idx;
685 685
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 3afb33f14d2d..b45246f9a640 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -167,9 +167,9 @@ static void ftrace_mod_code(void)
167 167
168void ftrace_nmi_enter(void) 168void ftrace_nmi_enter(void)
169{ 169{
170 __get_cpu_var(save_modifying_code) = modifying_code; 170 __this_cpu_write(save_modifying_code, modifying_code);
171 171
172 if (!__get_cpu_var(save_modifying_code)) 172 if (!__this_cpu_read(save_modifying_code))
173 return; 173 return;
174 174
175 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { 175 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
@@ -183,7 +183,7 @@ void ftrace_nmi_enter(void)
183 183
184void ftrace_nmi_exit(void) 184void ftrace_nmi_exit(void)
185{ 185{
186 if (!__get_cpu_var(save_modifying_code)) 186 if (!__this_cpu_read(save_modifying_code))
187 return; 187 return;
188 188
189 /* Finish all executions before clearing nmi_running */ 189 /* Finish all executions before clearing nmi_running */
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 42c594254507..02f07634d265 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
122 return -EBUSY; 122 return -EBUSY;
123 123
124 set_debugreg(info->address, i); 124 set_debugreg(info->address, i);
125 __get_cpu_var(cpu_debugreg[i]) = info->address; 125 __this_cpu_write(cpu_debugreg[i], info->address);
126 126
127 dr7 = &__get_cpu_var(cpu_dr7); 127 dr7 = &__get_cpu_var(cpu_dr7);
128 *dr7 |= encode_dr7(i, info->len, info->type); 128 *dr7 |= encode_dr7(i, info->len, info->type);
@@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
397 397
398void hw_breakpoint_restore(void) 398void hw_breakpoint_restore(void)
399{ 399{
400 set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); 400 set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
401 set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); 401 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
402 set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); 402 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
403 set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); 403 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
404 set_debugreg(current->thread.debugreg6, 6); 404 set_debugreg(current->thread.debugreg6, 6);
405 set_debugreg(__get_cpu_var(cpu_dr7), 7); 405 set_debugreg(__this_cpu_read(cpu_dr7), 7);
406} 406}
407EXPORT_SYMBOL_GPL(hw_breakpoint_restore); 407EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
408 408
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 83ec0175f986..3a43caa3beb7 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
234 exit_idle(); 234 exit_idle();
235 irq_enter(); 235 irq_enter();
236 236
237 irq = __get_cpu_var(vector_irq)[vector]; 237 irq = __this_cpu_read(vector_irq[vector]);
238 238
239 if (!handle_irq(irq, regs)) { 239 if (!handle_irq(irq, regs)) {
240 ack_APIC_irq(); 240 ack_APIC_irq();
@@ -350,12 +350,12 @@ void fixup_irqs(void)
350 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 350 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
351 unsigned int irr; 351 unsigned int irr;
352 352
353 if (__get_cpu_var(vector_irq)[vector] < 0) 353 if (__this_cpu_read(vector_irq[vector]) < 0)
354 continue; 354 continue;
355 355
356 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 356 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
357 if (irr & (1 << (vector % 32))) { 357 if (irr & (1 << (vector % 32))) {
358 irq = __get_cpu_var(vector_irq)[vector]; 358 irq = __this_cpu_read(vector_irq[vector]);
359 359
360 data = irq_get_irq_data(irq); 360 data = irq_get_irq_data(irq);
361 raw_spin_lock(&desc->lock); 361 raw_spin_lock(&desc->lock);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 96656f207751..48ff6dcffa02 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
79 u32 *isp, arg1, arg2; 79 u32 *isp, arg1, arg2;
80 80
81 curctx = (union irq_ctx *) current_thread_info(); 81 curctx = (union irq_ctx *) current_thread_info();
82 irqctx = __get_cpu_var(hardirq_ctx); 82 irqctx = __this_cpu_read(hardirq_ctx);
83 83
84 /* 84 /*
85 * this is where we switch to the IRQ stack. However, if we are 85 * this is where we switch to the IRQ stack. However, if we are
@@ -166,7 +166,7 @@ asmlinkage void do_softirq(void)
166 166
167 if (local_softirq_pending()) { 167 if (local_softirq_pending()) {
168 curctx = current_thread_info(); 168 curctx = current_thread_info();
169 irqctx = __get_cpu_var(softirq_ctx); 169 irqctx = __this_cpu_read(softirq_ctx);
170 irqctx->tinfo.task = curctx->task; 170 irqctx->tinfo.task = curctx->task;
171 irqctx->tinfo.previous_esp = current_stack_pointer; 171 irqctx->tinfo.previous_esp = current_stack_pointer;
172 172
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 083e99d1b7df..ff4e5a113a5b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1377,7 +1377,7 @@ void play_dead_common(void)
1377 1377
1378 mb(); 1378 mb();
1379 /* Ack it */ 1379 /* Ack it */
1380 __get_cpu_var(cpu_state) = CPU_DEAD; 1380 __this_cpu_write(cpu_state, CPU_DEAD);
1381 1381
1382 /* 1382 /*
1383 * With physical CPU hotplug, we should halt the cpu 1383 * With physical CPU hotplug, we should halt the cpu
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 0c40d8b72416..acb08dd7bb57 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -659,7 +659,7 @@ void restore_sched_clock_state(void)
659 659
660 local_irq_save(flags); 660 local_irq_save(flags);
661 661
662 __get_cpu_var(cyc2ns_offset) = 0; 662 __this_cpu_write(cyc2ns_offset, 0);
663 offset = cyc2ns_suspend - sched_clock(); 663 offset = cyc2ns_suspend - sched_clock();
664 664
665 for_each_possible_cpu(cpu) 665 for_each_possible_cpu(cpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cdac9e592aa5..79d9606c202c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -981,7 +981,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
981 if (kvm_tsc_changes_freq()) 981 if (kvm_tsc_changes_freq())
982 printk_once(KERN_WARNING 982 printk_once(KERN_WARNING
983 "kvm: unreliable cycle conversion on adjustable rate TSC\n"); 983 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
984 ret = nsec * __get_cpu_var(cpu_tsc_khz); 984 ret = nsec * __this_cpu_read(cpu_tsc_khz);
985 do_div(ret, USEC_PER_SEC); 985 do_div(ret, USEC_PER_SEC);
986 return ret; 986 return ret;
987} 987}
@@ -1066,7 +1066,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1066 local_irq_save(flags); 1066 local_irq_save(flags);
1067 kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); 1067 kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
1068 kernel_ns = get_kernel_ns(); 1068 kernel_ns = get_kernel_ns();
1069 this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 1069 this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1070 1070
1071 if (unlikely(this_tsc_khz == 0)) { 1071 if (unlikely(this_tsc_khz == 0)) {
1072 local_irq_restore(flags); 1072 local_irq_restore(flags);
@@ -4432,7 +4432,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4432 4432
4433static void tsc_bad(void *info) 4433static void tsc_bad(void *info)
4434{ 4434{
4435 __get_cpu_var(cpu_tsc_khz) = 0; 4435 __this_cpu_write(cpu_tsc_khz, 0);
4436} 4436}
4437 4437
4438static void tsc_khz_changed(void *data) 4438static void tsc_khz_changed(void *data)
@@ -4446,7 +4446,7 @@ static void tsc_khz_changed(void *data)
4446 khz = cpufreq_quick_get(raw_smp_processor_id()); 4446 khz = cpufreq_quick_get(raw_smp_processor_id());
4447 if (!khz) 4447 if (!khz)
4448 khz = tsc_khz; 4448 khz = tsc_khz;
4449 __get_cpu_var(cpu_tsc_khz) = khz; 4449 __this_cpu_write(cpu_tsc_khz, khz);
4450} 4450}
4451 4451
4452static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 4452static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 4e8baad36d37..a0cae67a657a 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -143,7 +143,7 @@ static inline int has_mux(void)
143 143
144inline int op_x86_phys_to_virt(int phys) 144inline int op_x86_phys_to_virt(int phys)
145{ 145{
146 return __get_cpu_var(switch_index) + phys; 146 return __this_cpu_read(switch_index) + phys;
147} 147}
148 148
149inline int op_x86_virt_to_phys(int virt) 149inline int op_x86_virt_to_phys(int virt)