aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/debugreg.h4
-rw-r--r--arch/x86/include/asm/perf_event_p4.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h12
-rw-r--r--arch/x86/kernel/apb_timer.c4
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c46
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c6
-rw-r--r--arch/x86/kernel/hw_breakpoint.c8
-rw-r--r--arch/x86/kernel/irq_64.c6
-rw-r--r--arch/x86/kernel/kvm.c22
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c14
-rw-r--r--arch/x86/oprofile/nmi_int.c8
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c40
-rw-r--r--arch/x86/platform/uv/uv_time.c2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/multicalls.c8
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/time.c10
34 files changed, 175 insertions, 175 deletions
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 4b528a970bd4..61fd18b83b6c 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void);
97DECLARE_PER_CPU(int, debug_stack_usage); 97DECLARE_PER_CPU(int, debug_stack_usage);
98static inline void debug_stack_usage_inc(void) 98static inline void debug_stack_usage_inc(void)
99{ 99{
100 __get_cpu_var(debug_stack_usage)++; 100 __this_cpu_inc(debug_stack_usage);
101} 101}
102static inline void debug_stack_usage_dec(void) 102static inline void debug_stack_usage_dec(void)
103{ 103{
104 __get_cpu_var(debug_stack_usage)--; 104 __this_cpu_dec(debug_stack_usage);
105} 105}
106int is_debug_stack(unsigned long addr); 106int is_debug_stack(unsigned long addr);
107void debug_stack_set_zero(void); 107void debug_stack_set_zero(void);
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 85e13ccf15c4..d725382c2ae0 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -189,7 +189,7 @@ static inline int p4_ht_thread(int cpu)
189{ 189{
190#ifdef CONFIG_SMP 190#ifdef CONFIG_SMP
191 if (smp_num_siblings == 2) 191 if (smp_num_siblings == 2)
192 return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); 192 return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
193#endif 193#endif
194 return 0; 194 return 0;
195} 195}
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index c63e925fd6b7..a00ad8f2a657 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -164,7 +164,7 @@ struct uv_hub_info_s {
164}; 164};
165 165
166DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 166DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
167#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) 167#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
168#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 168#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
169 169
170/* 170/*
@@ -601,16 +601,16 @@ struct uv_hub_nmi_s {
601 601
602struct uv_cpu_nmi_s { 602struct uv_cpu_nmi_s {
603 struct uv_hub_nmi_s *hub; 603 struct uv_hub_nmi_s *hub;
604 atomic_t state; 604 int state;
605 atomic_t pinging; 605 int pinging;
606 int queries; 606 int queries;
607 int pings; 607 int pings;
608}; 608};
609 609
610DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); 610DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
611#define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi)) 611
612#define uv_hub_nmi (uv_cpu_nmi.hub) 612#define uv_hub_nmi (uv_cpu_nmi.hub)
613#define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu)) 613#define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
614#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) 614#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
615 615
616/* uv_cpu_nmi_states */ 616/* uv_cpu_nmi_states */
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index af5b08ab3b71..5972b108f15a 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void)
146static int __init apbt_clockevent_register(void) 146static int __init apbt_clockevent_register(void)
147{ 147{
148 struct sfi_timer_table_entry *mtmr; 148 struct sfi_timer_table_entry *mtmr;
149 struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); 149 struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);
150 150
151 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 151 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
152 if (mtmr == NULL) { 152 if (mtmr == NULL) {
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void)
200 if (!cpu) 200 if (!cpu)
201 return; 201 return;
202 202
203 adev = &__get_cpu_var(cpu_apbt_dev); 203 adev = this_cpu_ptr(&cpu_apbt_dev);
204 if (!adev->timer) { 204 if (!adev->timer) {
205 adev->timer = dw_apb_clockevent_init(cpu, adev->name, 205 adev->timer = dw_apb_clockevent_init(cpu, adev->name,
206 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), 206 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 67760275544b..00853b254ab0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
561 */ 561 */
562static void setup_APIC_timer(void) 562static void setup_APIC_timer(void)
563{ 563{
564 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 564 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
565 565
566 if (this_cpu_has(X86_FEATURE_ARAT)) { 566 if (this_cpu_has(X86_FEATURE_ARAT)) {
567 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; 567 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
696 696
697static int __init calibrate_APIC_clock(void) 697static int __init calibrate_APIC_clock(void)
698{ 698{
699 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 699 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
700 void (*real_handler)(struct clock_event_device *dev); 700 void (*real_handler)(struct clock_event_device *dev);
701 unsigned long deltaj; 701 unsigned long deltaj;
702 long delta, deltatsc; 702 long delta, deltatsc;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 6ce600f9bc78..e658f21681c8 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -42,7 +42,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
42 * We are to modify mask, so we need an own copy 42 * We are to modify mask, so we need an own copy
43 * and be sure it's manipulated with irq off. 43 * and be sure it's manipulated with irq off.
44 */ 44 */
45 ipi_mask_ptr = __raw_get_cpu_var(ipi_mask); 45 ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
46 cpumask_copy(ipi_mask_ptr, mask); 46 cpumask_copy(ipi_mask_ptr, mask);
47 47
48 /* 48 /*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3eff36f719fb..4b4f78c9ba19 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1200,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage);
1200 1200
1201int is_debug_stack(unsigned long addr) 1201int is_debug_stack(unsigned long addr)
1202{ 1202{
1203 return __get_cpu_var(debug_stack_usage) || 1203 return __this_cpu_read(debug_stack_usage) ||
1204 (addr <= __get_cpu_var(debug_stack_addr) && 1204 (addr <= __this_cpu_read(debug_stack_addr) &&
1205 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); 1205 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1206} 1206}
1207NOKPROBE_SYMBOL(is_debug_stack); 1207NOKPROBE_SYMBOL(is_debug_stack);
1208 1208
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 5ac2d1fb28bc..4cfba4371a71 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex);
83static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) 83static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
84{ 84{
85 int cpu = smp_processor_id(); 85 int cpu = smp_processor_id();
86 struct mce *m = &__get_cpu_var(injectm); 86 struct mce *m = this_cpu_ptr(&injectm);
87 if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) 87 if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
88 return NMI_DONE; 88 return NMI_DONE;
89 cpumask_clear_cpu(cpu, mce_inject_cpumask); 89 cpumask_clear_cpu(cpu, mce_inject_cpumask);
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
97static void mce_irq_ipi(void *info) 97static void mce_irq_ipi(void *info)
98{ 98{
99 int cpu = smp_processor_id(); 99 int cpu = smp_processor_id();
100 struct mce *m = &__get_cpu_var(injectm); 100 struct mce *m = this_cpu_ptr(&injectm);
101 101
102 if (cpumask_test_cpu(cpu, mce_inject_cpumask) && 102 if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
103 m->inject_flags & MCJ_EXCEPTION) { 103 m->inject_flags & MCJ_EXCEPTION) {
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info)
109/* Inject mce on current CPU */ 109/* Inject mce on current CPU */
110static int raise_local(void) 110static int raise_local(void)
111{ 111{
112 struct mce *m = &__get_cpu_var(injectm); 112 struct mce *m = this_cpu_ptr(&injectm);
113 int context = MCJ_CTX(m->inject_flags); 113 int context = MCJ_CTX(m->inject_flags);
114 int ret = 0; 114 int ret = 0;
115 int cpu = m->extcpu; 115 int cpu = m->extcpu;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bd9ccda8087f..61a9668cebfd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr)
400 400
401 if (offset < 0) 401 if (offset < 0)
402 return 0; 402 return 0;
403 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); 403 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
404 } 404 }
405 405
406 if (rdmsrl_safe(msr, &v)) { 406 if (rdmsrl_safe(msr, &v)) {
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
422 int offset = msr_to_offset(msr); 422 int offset = msr_to_offset(msr);
423 423
424 if (offset >= 0) 424 if (offset >= 0)
425 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; 425 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
426 return; 426 return;
427 } 427 }
428 wrmsrl(msr, v); 428 wrmsrl(msr, v);
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring);
478/* Runs with CPU affinity in workqueue */ 478/* Runs with CPU affinity in workqueue */
479static int mce_ring_empty(void) 479static int mce_ring_empty(void)
480{ 480{
481 struct mce_ring *r = &__get_cpu_var(mce_ring); 481 struct mce_ring *r = this_cpu_ptr(&mce_ring);
482 482
483 return r->start == r->end; 483 return r->start == r->end;
484} 484}
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn)
490 490
491 *pfn = 0; 491 *pfn = 0;
492 get_cpu(); 492 get_cpu();
493 r = &__get_cpu_var(mce_ring); 493 r = this_cpu_ptr(&mce_ring);
494 if (r->start == r->end) 494 if (r->start == r->end)
495 goto out; 495 goto out;
496 *pfn = r->ring[r->start]; 496 *pfn = r->ring[r->start];
@@ -504,7 +504,7 @@ out:
504/* Always runs in MCE context with preempt off */ 504/* Always runs in MCE context with preempt off */
505static int mce_ring_add(unsigned long pfn) 505static int mce_ring_add(unsigned long pfn)
506{ 506{
507 struct mce_ring *r = &__get_cpu_var(mce_ring); 507 struct mce_ring *r = this_cpu_ptr(&mce_ring);
508 unsigned next; 508 unsigned next;
509 509
510 next = (r->end + 1) % MCE_RING_SIZE; 510 next = (r->end + 1) % MCE_RING_SIZE;
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c)
526static void mce_schedule_work(void) 526static void mce_schedule_work(void)
527{ 527{
528 if (!mce_ring_empty()) 528 if (!mce_ring_empty())
529 schedule_work(&__get_cpu_var(mce_work)); 529 schedule_work(this_cpu_ptr(&mce_work));
530} 530}
531 531
532DEFINE_PER_CPU(struct irq_work, mce_irq_work); 532DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs)
551 return; 551 return;
552 } 552 }
553 553
554 irq_work_queue(&__get_cpu_var(mce_irq_work)); 554 irq_work_queue(this_cpu_ptr(&mce_irq_work));
555} 555}
556 556
557/* 557/*
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1045 1045
1046 mce_gather_info(&m, regs); 1046 mce_gather_info(&m, regs);
1047 1047
1048 final = &__get_cpu_var(mces_seen); 1048 final = this_cpu_ptr(&mces_seen);
1049 *final = m; 1049 *final = m;
1050 1050
1051 memset(valid_banks, 0, sizeof(valid_banks)); 1051 memset(valid_banks, 0, sizeof(valid_banks));
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) =
1278 1278
1279static int cmc_error_seen(void) 1279static int cmc_error_seen(void)
1280{ 1280{
1281 unsigned long *v = &__get_cpu_var(mce_polled_error); 1281 unsigned long *v = this_cpu_ptr(&mce_polled_error);
1282 1282
1283 return test_and_clear_bit(0, v); 1283 return test_and_clear_bit(0, v);
1284} 1284}
1285 1285
1286static void mce_timer_fn(unsigned long data) 1286static void mce_timer_fn(unsigned long data)
1287{ 1287{
1288 struct timer_list *t = &__get_cpu_var(mce_timer); 1288 struct timer_list *t = this_cpu_ptr(&mce_timer);
1289 unsigned long iv; 1289 unsigned long iv;
1290 int notify; 1290 int notify;
1291 1291
1292 WARN_ON(smp_processor_id() != data); 1292 WARN_ON(smp_processor_id() != data);
1293 1293
1294 if (mce_available(__this_cpu_ptr(&cpu_info))) { 1294 if (mce_available(this_cpu_ptr(&cpu_info))) {
1295 machine_check_poll(MCP_TIMESTAMP, 1295 machine_check_poll(MCP_TIMESTAMP,
1296 &__get_cpu_var(mce_poll_banks)); 1296 this_cpu_ptr(&mce_poll_banks));
1297 mce_intel_cmci_poll(); 1297 mce_intel_cmci_poll();
1298 } 1298 }
1299 1299
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data)
1323 */ 1323 */
1324void mce_timer_kick(unsigned long interval) 1324void mce_timer_kick(unsigned long interval)
1325{ 1325{
1326 struct timer_list *t = &__get_cpu_var(mce_timer); 1326 struct timer_list *t = this_cpu_ptr(&mce_timer);
1327 unsigned long when = jiffies + interval; 1327 unsigned long when = jiffies + interval;
1328 unsigned long iv = __this_cpu_read(mce_next_interval); 1328 unsigned long iv = __this_cpu_read(mce_next_interval);
1329 1329
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1659 1659
1660static void __mcheck_cpu_init_timer(void) 1660static void __mcheck_cpu_init_timer(void)
1661{ 1661{
1662 struct timer_list *t = &__get_cpu_var(mce_timer); 1662 struct timer_list *t = this_cpu_ptr(&mce_timer);
1663 unsigned int cpu = smp_processor_id(); 1663 unsigned int cpu = smp_processor_id();
1664 1664
1665 setup_timer(t, mce_timer_fn, cpu); 1665 setup_timer(t, mce_timer_fn, cpu);
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
1702 __mcheck_cpu_init_generic(); 1702 __mcheck_cpu_init_generic();
1703 __mcheck_cpu_init_vendor(c); 1703 __mcheck_cpu_init_vendor(c);
1704 __mcheck_cpu_init_timer(); 1704 __mcheck_cpu_init_timer();
1705 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); 1705 INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
1706 init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); 1706 init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
1707} 1707}
1708 1708
1709/* 1709/*
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = {
1955static void __mce_disable_bank(void *arg) 1955static void __mce_disable_bank(void *arg)
1956{ 1956{
1957 int bank = *((int *)arg); 1957 int bank = *((int *)arg);
1958 __clear_bit(bank, __get_cpu_var(mce_poll_banks)); 1958 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1959 cmci_disable_bank(bank); 1959 cmci_disable_bank(bank);
1960} 1960}
1961 1961
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void)
2065static void mce_syscore_resume(void) 2065static void mce_syscore_resume(void)
2066{ 2066{
2067 __mcheck_cpu_init_generic(); 2067 __mcheck_cpu_init_generic();
2068 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 2068 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2069} 2069}
2070 2070
2071static struct syscore_ops mce_syscore_ops = { 2071static struct syscore_ops mce_syscore_ops = {
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = {
2080 2080
2081static void mce_cpu_restart(void *data) 2081static void mce_cpu_restart(void *data)
2082{ 2082{
2083 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2083 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2084 return; 2084 return;
2085 __mcheck_cpu_init_generic(); 2085 __mcheck_cpu_init_generic();
2086 __mcheck_cpu_init_timer(); 2086 __mcheck_cpu_init_timer();
@@ -2096,14 +2096,14 @@ static void mce_restart(void)
2096/* Toggle features for corrected errors */ 2096/* Toggle features for corrected errors */
2097static void mce_disable_cmci(void *data) 2097static void mce_disable_cmci(void *data)
2098{ 2098{
2099 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2099 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2100 return; 2100 return;
2101 cmci_clear(); 2101 cmci_clear();
2102} 2102}
2103 2103
2104static void mce_enable_ce(void *all) 2104static void mce_enable_ce(void *all)
2105{ 2105{
2106 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2106 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2107 return; 2107 return;
2108 cmci_reenable(); 2108 cmci_reenable();
2109 cmci_recheck(); 2109 cmci_recheck();
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h)
2336 unsigned long action = *(unsigned long *)h; 2336 unsigned long action = *(unsigned long *)h;
2337 int i; 2337 int i;
2338 2338
2339 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2339 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2340 return; 2340 return;
2341 2341
2342 if (!(action & CPU_TASKS_FROZEN)) 2342 if (!(action & CPU_TASKS_FROZEN))
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h)
2354 unsigned long action = *(unsigned long *)h; 2354 unsigned long action = *(unsigned long *)h;
2355 int i; 2355 int i;
2356 2356
2357 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2357 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2358 return; 2358 return;
2359 2359
2360 if (!(action & CPU_TASKS_FROZEN)) 2360 if (!(action & CPU_TASKS_FROZEN))
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1e49f8f41276..5d4999f95aec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void)
310 * event. 310 * event.
311 */ 311 */
312 machine_check_poll(MCP_TIMESTAMP, 312 machine_check_poll(MCP_TIMESTAMP,
313 &__get_cpu_var(mce_poll_banks)); 313 this_cpu_ptr(&mce_poll_banks));
314 314
315 if (high & MASK_OVERFLOW_HI) { 315 if (high & MASK_OVERFLOW_HI) {
316 rdmsrl(address, m.misc); 316 rdmsrl(address, m.misc);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 3bdb95ae8c43..b3c97bafc123 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void)
86{ 86{
87 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) 87 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
88 return; 88 return;
89 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 89 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
90} 90}
91 91
92void mce_intel_hcpu_update(unsigned long cpu) 92void mce_intel_hcpu_update(unsigned long cpu)
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void)
145 u64 val; 145 u64 val;
146 146
147 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 147 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
148 owned = __get_cpu_var(mce_banks_owned); 148 owned = this_cpu_ptr(mce_banks_owned);
149 for_each_set_bit(bank, owned, MAX_NR_BANKS) { 149 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151 val &= ~MCI_CTL2_CMCI_EN; 151 val &= ~MCI_CTL2_CMCI_EN;
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void)
195{ 195{
196 if (cmci_storm_detect()) 196 if (cmci_storm_detect())
197 return; 197 return;
198 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 198 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
199 mce_notify_irq(); 199 mce_notify_irq();
200} 200}
201 201
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void)
206 */ 206 */
207static void cmci_discover(int banks) 207static void cmci_discover(int banks)
208{ 208{
209 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); 209 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
210 unsigned long flags; 210 unsigned long flags;
211 int i; 211 int i;
212 int bios_wrong_thresh = 0; 212 int bios_wrong_thresh = 0;
@@ -228,7 +228,7 @@ static void cmci_discover(int banks)
228 /* Already owned by someone else? */ 228 /* Already owned by someone else? */
229 if (val & MCI_CTL2_CMCI_EN) { 229 if (val & MCI_CTL2_CMCI_EN) {
230 clear_bit(i, owned); 230 clear_bit(i, owned);
231 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 231 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
232 continue; 232 continue;
233 } 233 }
234 234
@@ -252,7 +252,7 @@ static void cmci_discover(int banks)
252 /* Did the enable bit stick? -- the bank supports CMCI */ 252 /* Did the enable bit stick? -- the bank supports CMCI */
253 if (val & MCI_CTL2_CMCI_EN) { 253 if (val & MCI_CTL2_CMCI_EN) {
254 set_bit(i, owned); 254 set_bit(i, owned);
255 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 255 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
256 /* 256 /*
257 * We are able to set thresholds for some banks that 257 * We are able to set thresholds for some banks that
258 * had a threshold of 0. This means the BIOS has not 258 * had a threshold of 0. This means the BIOS has not
@@ -263,7 +263,7 @@ static void cmci_discover(int banks)
263 (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) 263 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
264 bios_wrong_thresh = 1; 264 bios_wrong_thresh = 1;
265 } else { 265 } else {
266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 266 WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
267 } 267 }
268 } 268 }
269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
@@ -284,10 +284,10 @@ void cmci_recheck(void)
284 unsigned long flags; 284 unsigned long flags;
285 int banks; 285 int banks;
286 286
287 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) 287 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
288 return; 288 return;
289 local_irq_save(flags); 289 local_irq_save(flags);
290 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 290 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
291 local_irq_restore(flags); 291 local_irq_restore(flags);
292} 292}
293 293
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank)
296{ 296{
297 u64 val; 297 u64 val;
298 298
299 if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) 299 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
300 return; 300 return;
301 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 301 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
302 val &= ~MCI_CTL2_CMCI_EN; 302 val &= ~MCI_CTL2_CMCI_EN;
303 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 303 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
304 __clear_bit(bank, __get_cpu_var(mce_banks_owned)); 304 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
305} 305}
306 306
307/* 307/*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 16c73022306e..1b8299dd3d91 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
494 494
495void x86_pmu_disable_all(void) 495void x86_pmu_disable_all(void)
496{ 496{
497 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 497 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
498 int idx; 498 int idx;
499 499
500 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 500 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -512,7 +512,7 @@ void x86_pmu_disable_all(void)
512 512
513static void x86_pmu_disable(struct pmu *pmu) 513static void x86_pmu_disable(struct pmu *pmu)
514{ 514{
515 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 515 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
516 516
517 if (!x86_pmu_initialized()) 517 if (!x86_pmu_initialized())
518 return; 518 return;
@@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu)
529 529
530void x86_pmu_enable_all(int added) 530void x86_pmu_enable_all(int added)
531{ 531{
532 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 532 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
533 int idx; 533 int idx;
534 534
535 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 535 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags);
876 876
877static void x86_pmu_enable(struct pmu *pmu) 877static void x86_pmu_enable(struct pmu *pmu)
878{ 878{
879 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 879 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
880 struct perf_event *event; 880 struct perf_event *event;
881 struct hw_perf_event *hwc; 881 struct hw_perf_event *hwc;
882 int i, added = cpuc->n_added; 882 int i, added = cpuc->n_added;
@@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1030 */ 1030 */
1031static int x86_pmu_add(struct perf_event *event, int flags) 1031static int x86_pmu_add(struct perf_event *event, int flags)
1032{ 1032{
1033 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1033 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1034 struct hw_perf_event *hwc; 1034 struct hw_perf_event *hwc;
1035 int assign[X86_PMC_IDX_MAX]; 1035 int assign[X86_PMC_IDX_MAX];
1036 int n, n0, ret; 1036 int n, n0, ret;
@@ -1081,7 +1081,7 @@ out:
1081 1081
1082static void x86_pmu_start(struct perf_event *event, int flags) 1082static void x86_pmu_start(struct perf_event *event, int flags)
1083{ 1083{
1084 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1084 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1085 int idx = event->hw.idx; 1085 int idx = event->hw.idx;
1086 1086
1087 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1087 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -1160,7 +1160,7 @@ void perf_event_print_debug(void)
1160 1160
1161void x86_pmu_stop(struct perf_event *event, int flags) 1161void x86_pmu_stop(struct perf_event *event, int flags)
1162{ 1162{
1163 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1163 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1164 struct hw_perf_event *hwc = &event->hw; 1164 struct hw_perf_event *hwc = &event->hw;
1165 1165
1166 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { 1166 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
@@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
1182 1182
1183static void x86_pmu_del(struct perf_event *event, int flags) 1183static void x86_pmu_del(struct perf_event *event, int flags)
1184{ 1184{
1185 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1185 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1186 int i; 1186 int i;
1187 1187
1188 /* 1188 /*
@@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1237 int idx, handled = 0; 1237 int idx, handled = 0;
1238 u64 val; 1238 u64 val;
1239 1239
1240 cpuc = &__get_cpu_var(cpu_hw_events); 1240 cpuc = this_cpu_ptr(&cpu_hw_events);
1241 1241
1242 /* 1242 /*
1243 * Some chipsets need to unmask the LVTPC in a particular spot 1243 * Some chipsets need to unmask the LVTPC in a particular spot
@@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
1646 */ 1646 */
1647static int x86_pmu_commit_txn(struct pmu *pmu) 1647static int x86_pmu_commit_txn(struct pmu *pmu)
1648{ 1648{
1649 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1649 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1650 int assign[X86_PMC_IDX_MAX]; 1650 int assign[X86_PMC_IDX_MAX];
1651 int n, ret; 1651 int n, ret;
1652 1652
@@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment)
2005 if (idx > GDT_ENTRIES) 2005 if (idx > GDT_ENTRIES)
2006 return 0; 2006 return 0;
2007 2007
2008 desc = __this_cpu_ptr(&gdt_page.gdt[0]); 2008 desc = raw_cpu_ptr(gdt_page.gdt);
2009 } 2009 }
2010 2010
2011 return get_desc_base(desc + idx); 2011 return get_desc_base(desc + idx);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index beeb7cc07044..28926311aac1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void)
699 699
700void amd_pmu_enable_virt(void) 700void amd_pmu_enable_virt(void)
701{ 701{
702 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 702 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
703 703
704 cpuc->perf_ctr_virt_mask = 0; 704 cpuc->perf_ctr_virt_mask = 0;
705 705
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
711 711
712void amd_pmu_disable_virt(void) 712void amd_pmu_disable_virt(void)
713{ 713{
714 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 714 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
715 715
716 /* 716 /*
717 * We only mask out the Host-only bit so that host-only counting works 717 * We only mask out the Host-only bit so that host-only counting works
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3851def5057c..a73947c53b65 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
1174 1174
1175static void intel_pmu_disable_all(void) 1175static void intel_pmu_disable_all(void)
1176{ 1176{
1177 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1177 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1178 1178
1179 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 1179 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1180 1180
@@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void)
1187 1187
1188static void intel_pmu_enable_all(int added) 1188static void intel_pmu_enable_all(int added)
1189{ 1189{
1190 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1190 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1191 1191
1192 intel_pmu_pebs_enable_all(); 1192 intel_pmu_pebs_enable_all();
1193 intel_pmu_lbr_enable_all(); 1193 intel_pmu_lbr_enable_all();
@@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added)
1221 */ 1221 */
1222static void intel_pmu_nhm_workaround(void) 1222static void intel_pmu_nhm_workaround(void)
1223{ 1223{
1224 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1224 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1225 static const unsigned long nhm_magic[4] = { 1225 static const unsigned long nhm_magic[4] = {
1226 0x4300B5, 1226 0x4300B5,
1227 0x4300D2, 1227 0x4300D2,
@@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event)
1320static void intel_pmu_disable_event(struct perf_event *event) 1320static void intel_pmu_disable_event(struct perf_event *event)
1321{ 1321{
1322 struct hw_perf_event *hwc = &event->hw; 1322 struct hw_perf_event *hwc = &event->hw;
1323 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1323 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1324 1324
1325 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 1325 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1326 intel_pmu_disable_bts(); 1326 intel_pmu_disable_bts();
@@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1384static void intel_pmu_enable_event(struct perf_event *event) 1384static void intel_pmu_enable_event(struct perf_event *event)
1385{ 1385{
1386 struct hw_perf_event *hwc = &event->hw; 1386 struct hw_perf_event *hwc = &event->hw;
1387 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1387 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1388 1388
1389 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 1389 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1390 if (!__this_cpu_read(cpu_hw_events.enabled)) 1390 if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1478 u64 status; 1478 u64 status;
1479 int handled; 1479 int handled;
1480 1480
1481 cpuc = &__get_cpu_var(cpu_hw_events); 1481 cpuc = this_cpu_ptr(&cpu_hw_events);
1482 1482
1483 /* 1483 /*
1484 * No known reason to not always do late ACK, 1484 * No known reason to not always do late ACK,
@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1910 1910
1911static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) 1911static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1912{ 1912{
1913 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1913 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1914 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 1914 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1915 1915
1916 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; 1916 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
@@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1931 1931
1932static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) 1932static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1933{ 1933{
1934 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1934 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1935 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 1935 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1936 int idx; 1936 int idx;
1937 1937
@@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event)
1965 1965
1966static void core_pmu_enable_all(int added) 1966static void core_pmu_enable_all(int added)
1967{ 1967{
1968 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1968 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1969 int idx; 1969 int idx;
1970 1970
1971 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1971 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b1553d05a5cb..46211bcc813e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config)
474 474
475void intel_pmu_disable_bts(void) 475void intel_pmu_disable_bts(void)
476{ 476{
477 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 477 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
478 unsigned long debugctlmsr; 478 unsigned long debugctlmsr;
479 479
480 if (!cpuc->ds) 480 if (!cpuc->ds)
@@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void)
491 491
492int intel_pmu_drain_bts_buffer(void) 492int intel_pmu_drain_bts_buffer(void)
493{ 493{
494 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 494 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
495 struct debug_store *ds = cpuc->ds; 495 struct debug_store *ds = cpuc->ds;
496 struct bts_record { 496 struct bts_record {
497 u64 from; 497 u64 from;
@@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
669 669
670void intel_pmu_pebs_enable(struct perf_event *event) 670void intel_pmu_pebs_enable(struct perf_event *event)
671{ 671{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 672 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
673 struct hw_perf_event *hwc = &event->hw; 673 struct hw_perf_event *hwc = &event->hw;
674 674
675 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 675 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
684 684
685void intel_pmu_pebs_disable(struct perf_event *event) 685void intel_pmu_pebs_disable(struct perf_event *event)
686{ 686{
687 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 687 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
688 struct hw_perf_event *hwc = &event->hw; 688 struct hw_perf_event *hwc = &event->hw;
689 689
690 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); 690 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
@@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
702 702
703void intel_pmu_pebs_enable_all(void) 703void intel_pmu_pebs_enable_all(void)
704{ 704{
705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 705 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
706 706
707 if (cpuc->pebs_enabled) 707 if (cpuc->pebs_enabled)
708 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 708 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void)
710 710
711void intel_pmu_pebs_disable_all(void) 711void intel_pmu_pebs_disable_all(void)
712{ 712{
713 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 713 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
714 714
715 if (cpuc->pebs_enabled) 715 if (cpuc->pebs_enabled)
716 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 716 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
@@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void)
718 718
719static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) 719static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
720{ 720{
721 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 721 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
722 unsigned long from = cpuc->lbr_entries[0].from; 722 unsigned long from = cpuc->lbr_entries[0].from;
723 unsigned long old_to, to = cpuc->lbr_entries[0].to; 723 unsigned long old_to, to = cpuc->lbr_entries[0].to;
724 unsigned long ip = regs->ip; 724 unsigned long ip = regs->ip;
@@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
829 * We cast to the biggest pebs_record but are careful not to 829 * We cast to the biggest pebs_record but are careful not to
830 * unconditionally access the 'extra' entries. 830 * unconditionally access the 'extra' entries.
831 */ 831 */
832 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 832 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
833 struct pebs_record_hsw *pebs = __pebs; 833 struct pebs_record_hsw *pebs = __pebs;
834 struct perf_sample_data data; 834 struct perf_sample_data data;
835 struct pt_regs regs; 835 struct pt_regs regs;
@@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
916 916
917static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) 917static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
918{ 918{
919 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 919 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
920 struct debug_store *ds = cpuc->ds; 920 struct debug_store *ds = cpuc->ds;
921 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ 921 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
922 struct pebs_record_core *at, *top; 922 struct pebs_record_core *at, *top;
@@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
957 957
958static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) 958static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
959{ 959{
960 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 960 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
961 struct debug_store *ds = cpuc->ds; 961 struct debug_store *ds = cpuc->ds;
962 struct perf_event *event = NULL; 962 struct perf_event *event = NULL;
963 void *at, *top; 963 void *at, *top;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 4af10617de33..45fa730a5283 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
133static void __intel_pmu_lbr_enable(void) 133static void __intel_pmu_lbr_enable(void)
134{ 134{
135 u64 debugctl; 135 u64 debugctl;
136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 136 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
137 137
138 if (cpuc->lbr_sel) 138 if (cpuc->lbr_sel)
139 wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); 139 wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void)
183 183
184void intel_pmu_lbr_enable(struct perf_event *event) 184void intel_pmu_lbr_enable(struct perf_event *event)
185{ 185{
186 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 186 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
187 187
188 if (!x86_pmu.lbr_nr) 188 if (!x86_pmu.lbr_nr)
189 return; 189 return;
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event)
203 203
204void intel_pmu_lbr_disable(struct perf_event *event) 204void intel_pmu_lbr_disable(struct perf_event *event)
205{ 205{
206 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 206 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
207 207
208 if (!x86_pmu.lbr_nr) 208 if (!x86_pmu.lbr_nr)
209 return; 209 return;
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event)
220 220
221void intel_pmu_lbr_enable_all(void) 221void intel_pmu_lbr_enable_all(void)
222{ 222{
223 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 223 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
224 224
225 if (cpuc->lbr_users) 225 if (cpuc->lbr_users)
226 __intel_pmu_lbr_enable(); 226 __intel_pmu_lbr_enable();
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void)
228 228
229void intel_pmu_lbr_disable_all(void) 229void intel_pmu_lbr_disable_all(void)
230{ 230{
231 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 231 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
232 232
233 if (cpuc->lbr_users) 233 if (cpuc->lbr_users)
234 __intel_pmu_lbr_disable(); 234 __intel_pmu_lbr_disable();
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
332 332
333void intel_pmu_lbr_read(void) 333void intel_pmu_lbr_read(void)
334{ 334{
335 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 335 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
336 336
337 if (!cpuc->lbr_users) 337 if (!cpuc->lbr_users)
338 return; 338 return;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 619f7699487a..d64f275fe274 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v)
135 * or use ldexp(count, -32). 135 * or use ldexp(count, -32).
136 * Watts = Joules/Time delta 136 * Watts = Joules/Time delta
137 */ 137 */
138 return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); 138 return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
139} 139}
140 140
141static u64 rapl_event_update(struct perf_event *event) 141static u64 rapl_event_update(struct perf_event *event)
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
187 187
188static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) 188static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
189{ 189{
190 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 190 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
191 struct perf_event *event; 191 struct perf_event *event;
192 unsigned long flags; 192 unsigned long flags;
193 193
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
234 234
235static void rapl_pmu_event_start(struct perf_event *event, int mode) 235static void rapl_pmu_event_start(struct perf_event *event, int mode)
236{ 236{
237 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 237 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
238 unsigned long flags; 238 unsigned long flags;
239 239
240 spin_lock_irqsave(&pmu->lock, flags); 240 spin_lock_irqsave(&pmu->lock, flags);
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
244 244
245static void rapl_pmu_event_stop(struct perf_event *event, int mode) 245static void rapl_pmu_event_stop(struct perf_event *event, int mode)
246{ 246{
247 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 247 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
248 struct hw_perf_event *hwc = &event->hw; 248 struct hw_perf_event *hwc = &event->hw;
249 unsigned long flags; 249 unsigned long flags;
250 250
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
278 278
279static int rapl_pmu_event_add(struct perf_event *event, int mode) 279static int rapl_pmu_event_add(struct perf_event *event, int mode)
280{ 280{
281 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 281 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
282 struct hw_perf_event *hwc = &event->hw; 282 struct hw_perf_event *hwc = &event->hw;
283 unsigned long flags; 283 unsigned long flags;
284 284
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void)
696 return -1; 696 return -1;
697 } 697 }
698 698
699 pmu = __get_cpu_var(rapl_pmu); 699 pmu = __this_cpu_read(rapl_pmu);
700 700
701 pr_info("RAPL PMU detected, hw unit 2^-%d Joules," 701 pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
702 " API unit is 2^-32 Joules," 702 " API unit is 2^-32 Joules,"
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 838fa8772c62..5b0c232d1ee6 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs)
217 int bit, loops; 217 int bit, loops;
218 u64 status; 218 u64 status;
219 219
220 cpuc = &__get_cpu_var(cpu_hw_events); 220 cpuc = this_cpu_ptr(&cpu_hw_events);
221 221
222 knc_pmu_disable_all(); 222 knc_pmu_disable_all();
223 223
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 5d466b7d8609..f2e56783af3d 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
915 915
916static void p4_pmu_disable_all(void) 916static void p4_pmu_disable_all(void)
917{ 917{
918 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 918 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
919 int idx; 919 int idx;
920 920
921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 921 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
984 984
985static void p4_pmu_enable_all(int added) 985static void p4_pmu_enable_all(int added)
986{ 986{
987 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 987 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
988 int idx; 988 int idx;
989 989
990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 990 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
1004 int idx, handled = 0; 1004 int idx, handled = 0;
1005 u64 val; 1005 u64 val;
1006 1006
1007 cpuc = &__get_cpu_var(cpu_hw_events); 1007 cpuc = this_cpu_ptr(&cpu_hw_events);
1008 1008
1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1010 int overflow; 1010 int overflow;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 5f9cf20cdb68..3d5fb509bdeb 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
108 int i; 108 int i;
109 109
110 for (i = 0; i < HBP_NUM; i++) { 110 for (i = 0; i < HBP_NUM; i++) {
111 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); 111 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
112 112
113 if (!*slot) { 113 if (!*slot) {
114 *slot = bp; 114 *slot = bp;
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
122 set_debugreg(info->address, i); 122 set_debugreg(info->address, i);
123 __this_cpu_write(cpu_debugreg[i], info->address); 123 __this_cpu_write(cpu_debugreg[i], info->address);
124 124
125 dr7 = &__get_cpu_var(cpu_dr7); 125 dr7 = this_cpu_ptr(&cpu_dr7);
126 *dr7 |= encode_dr7(i, info->len, info->type); 126 *dr7 |= encode_dr7(i, info->len, info->type);
127 127
128 set_debugreg(*dr7, 7); 128 set_debugreg(*dr7, 7);
@@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
146 int i; 146 int i;
147 147
148 for (i = 0; i < HBP_NUM; i++) { 148 for (i = 0; i < HBP_NUM; i++) {
149 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); 149 struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
150 150
151 if (*slot == bp) { 151 if (*slot == bp) {
152 *slot = NULL; 152 *slot = NULL;
@@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
157 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) 157 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
158 return; 158 return;
159 159
160 dr7 = &__get_cpu_var(cpu_dr7); 160 dr7 = this_cpu_ptr(&cpu_dr7);
161 *dr7 &= ~__encode_dr7(i, info->len, info->type); 161 *dr7 &= ~__encode_dr7(i, info->len, info->type);
162 162
163 set_debugreg(*dr7, 7); 163 set_debugreg(*dr7, 7);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 4d1c746892eb..e4b503d5558c 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs)
52 regs->sp <= curbase + THREAD_SIZE) 52 regs->sp <= curbase + THREAD_SIZE)
53 return; 53 return;
54 54
55 irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + 55 irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) +
56 STACK_TOP_MARGIN; 56 STACK_TOP_MARGIN;
57 irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); 57 irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr);
58 if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) 58 if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
59 return; 59 return;
60 60
61 oist = &__get_cpu_var(orig_ist); 61 oist = this_cpu_ptr(&orig_ist);
62 estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; 62 estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
63 estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; 63 estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
64 if (regs->sp >= estack_top && regs->sp <= estack_bottom) 64 if (regs->sp >= estack_top && regs->sp <= estack_bottom)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 95c3cb16af3e..f6945bef2cd1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -244,9 +244,9 @@ u32 kvm_read_and_reset_pf_reason(void)
244{ 244{
245 u32 reason = 0; 245 u32 reason = 0;
246 246
247 if (__get_cpu_var(apf_reason).enabled) { 247 if (__this_cpu_read(apf_reason.enabled)) {
248 reason = __get_cpu_var(apf_reason).reason; 248 reason = __this_cpu_read(apf_reason.reason);
249 __get_cpu_var(apf_reason).reason = 0; 249 __this_cpu_write(apf_reason.reason, 0);
250 } 250 }
251 251
252 return reason; 252 return reason;
@@ -319,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
319 * there's no need for lock or memory barriers. 319 * there's no need for lock or memory barriers.
320 * An optimization barrier is implied in apic write. 320 * An optimization barrier is implied in apic write.
321 */ 321 */
322 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) 322 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
323 return; 323 return;
324 apic_write(APIC_EOI, APIC_EOI_ACK); 324 apic_write(APIC_EOI, APIC_EOI_ACK);
325} 325}
@@ -330,13 +330,13 @@ void kvm_guest_cpu_init(void)
330 return; 330 return;
331 331
332 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 332 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
333 u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); 333 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
334 334
335#ifdef CONFIG_PREEMPT 335#ifdef CONFIG_PREEMPT
336 pa |= KVM_ASYNC_PF_SEND_ALWAYS; 336 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
337#endif 337#endif
338 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); 338 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
339 __get_cpu_var(apf_reason).enabled = 1; 339 __this_cpu_write(apf_reason.enabled, 1);
340 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 340 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
341 smp_processor_id()); 341 smp_processor_id());
342 } 342 }
@@ -345,8 +345,8 @@ void kvm_guest_cpu_init(void)
345 unsigned long pa; 345 unsigned long pa;
346 /* Size alignment is implied but just to make it explicit. */ 346 /* Size alignment is implied but just to make it explicit. */
347 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); 347 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
348 __get_cpu_var(kvm_apic_eoi) = 0; 348 __this_cpu_write(kvm_apic_eoi, 0);
349 pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) 349 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
350 | KVM_MSR_ENABLED; 350 | KVM_MSR_ENABLED;
351 wrmsrl(MSR_KVM_PV_EOI_EN, pa); 351 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
352 } 352 }
@@ -357,11 +357,11 @@ void kvm_guest_cpu_init(void)
357 357
358static void kvm_pv_disable_apf(void) 358static void kvm_pv_disable_apf(void)
359{ 359{
360 if (!__get_cpu_var(apf_reason).enabled) 360 if (!__this_cpu_read(apf_reason.enabled))
361 return; 361 return;
362 362
363 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 363 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
364 __get_cpu_var(apf_reason).enabled = 0; 364 __this_cpu_write(apf_reason.enabled, 0);
365 365
366 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", 366 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
367 smp_processor_id()); 367 smp_processor_id());
@@ -724,7 +724,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
724 if (in_nmi()) 724 if (in_nmi())
725 return; 725 return;
726 726
727 w = &__get_cpu_var(klock_waiting); 727 w = this_cpu_ptr(&klock_waiting);
728 cpu = smp_processor_id(); 728 cpu = smp_processor_id();
729 start = spin_time_start(); 729 start = spin_time_start();
730 730
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f7f6a4a157a6..65510f624dfe 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -670,7 +670,7 @@ static int svm_hardware_enable(void)
670 670
671 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 671 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
672 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 672 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
673 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; 673 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
674 } 674 }
675 675
676 676
@@ -1313,8 +1313,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1313 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1313 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1314 1314
1315 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && 1315 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1316 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { 1316 svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1317 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; 1317 __this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
1318 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); 1318 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1319 } 1319 }
1320} 1320}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 04fa1b8298c8..d9dcfa27aa84 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1626,7 +1626,7 @@ static void reload_tss(void)
1626 /* 1626 /*
1627 * VT restores TR but not its size. Useless. 1627 * VT restores TR but not its size. Useless.
1628 */ 1628 */
1629 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1629 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1630 struct desc_struct *descs; 1630 struct desc_struct *descs;
1631 1631
1632 descs = (void *)gdt->address; 1632 descs = (void *)gdt->address;
@@ -1672,7 +1672,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1672 1672
1673static unsigned long segment_base(u16 selector) 1673static unsigned long segment_base(u16 selector)
1674{ 1674{
1675 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1675 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1676 struct desc_struct *d; 1676 struct desc_struct *d;
1677 unsigned long table_base; 1677 unsigned long table_base;
1678 unsigned long v; 1678 unsigned long v;
@@ -1802,7 +1802,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1802 */ 1802 */
1803 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) 1803 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1804 stts(); 1804 stts();
1805 load_gdt(&__get_cpu_var(host_gdt)); 1805 load_gdt(this_cpu_ptr(&host_gdt));
1806} 1806}
1807 1807
1808static void vmx_load_host_state(struct vcpu_vmx *vmx) 1808static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -1832,7 +1832,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1832 } 1832 }
1833 1833
1834 if (vmx->loaded_vmcs->cpu != cpu) { 1834 if (vmx->loaded_vmcs->cpu != cpu) {
1835 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1835 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1836 unsigned long sysenter_esp; 1836 unsigned long sysenter_esp;
1837 1837
1838 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1838 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -2771,7 +2771,7 @@ static int hardware_enable(void)
2771 ept_sync_global(); 2771 ept_sync_global();
2772 } 2772 }
2773 2773
2774 native_store_gdt(&__get_cpu_var(host_gdt)); 2774 native_store_gdt(this_cpu_ptr(&host_gdt));
2775 2775
2776 return 0; 2776 return 0;
2777} 2777}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5430e4b0af29..34c8f94331f8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1559,7 +1559,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1559 1559
1560 /* Keep irq disabled to prevent changes to the clock */ 1560 /* Keep irq disabled to prevent changes to the clock */
1561 local_irq_save(flags); 1561 local_irq_save(flags);
1562 this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 1562 this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1563 if (unlikely(this_tsc_khz == 0)) { 1563 if (unlikely(this_tsc_khz == 0)) {
1564 local_irq_restore(flags); 1564 local_irq_restore(flags);
1565 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 1565 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index dd89a13f1051..b4f2e7e9e907 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -140,7 +140,7 @@ static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
140 140
141bool kmemcheck_active(struct pt_regs *regs) 141bool kmemcheck_active(struct pt_regs *regs)
142{ 142{
143 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 143 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
144 144
145 return data->balance > 0; 145 return data->balance > 0;
146} 146}
@@ -148,7 +148,7 @@ bool kmemcheck_active(struct pt_regs *regs)
148/* Save an address that needs to be shown/hidden */ 148/* Save an address that needs to be shown/hidden */
149static void kmemcheck_save_addr(unsigned long addr) 149static void kmemcheck_save_addr(unsigned long addr)
150{ 150{
151 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 151 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
152 152
153 BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); 153 BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
154 data->addr[data->n_addrs++] = addr; 154 data->addr[data->n_addrs++] = addr;
@@ -156,7 +156,7 @@ static void kmemcheck_save_addr(unsigned long addr)
156 156
157static unsigned int kmemcheck_show_all(void) 157static unsigned int kmemcheck_show_all(void)
158{ 158{
159 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 159 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
160 unsigned int i; 160 unsigned int i;
161 unsigned int n; 161 unsigned int n;
162 162
@@ -169,7 +169,7 @@ static unsigned int kmemcheck_show_all(void)
169 169
170static unsigned int kmemcheck_hide_all(void) 170static unsigned int kmemcheck_hide_all(void)
171{ 171{
172 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 172 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
173 unsigned int i; 173 unsigned int i;
174 unsigned int n; 174 unsigned int n;
175 175
@@ -185,7 +185,7 @@ static unsigned int kmemcheck_hide_all(void)
185 */ 185 */
186void kmemcheck_show(struct pt_regs *regs) 186void kmemcheck_show(struct pt_regs *regs)
187{ 187{
188 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 188 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
189 189
190 BUG_ON(!irqs_disabled()); 190 BUG_ON(!irqs_disabled());
191 191
@@ -226,7 +226,7 @@ void kmemcheck_show(struct pt_regs *regs)
226 */ 226 */
227void kmemcheck_hide(struct pt_regs *regs) 227void kmemcheck_hide(struct pt_regs *regs)
228{ 228{
229 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 229 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
230 int n; 230 int n;
231 231
232 BUG_ON(!irqs_disabled()); 232 BUG_ON(!irqs_disabled());
@@ -528,7 +528,7 @@ static void kmemcheck_access(struct pt_regs *regs,
528 const uint8_t *insn_primary; 528 const uint8_t *insn_primary;
529 unsigned int size; 529 unsigned int size;
530 530
531 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 531 struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
532 532
533 /* Recursive fault -- ouch. */ 533 /* Recursive fault -- ouch. */
534 if (data->busy) { 534 if (data->busy) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 379e8bd0deea..1d2e6392f5fa 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -64,11 +64,11 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
64static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) 64static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
65{ 65{
66 if (ctr_running) 66 if (ctr_running)
67 model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); 67 model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
68 else if (!nmi_enabled) 68 else if (!nmi_enabled)
69 return NMI_DONE; 69 return NMI_DONE;
70 else 70 else
71 model->stop(&__get_cpu_var(cpu_msrs)); 71 model->stop(this_cpu_ptr(&cpu_msrs));
72 return NMI_HANDLED; 72 return NMI_HANDLED;
73} 73}
74 74
@@ -91,7 +91,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
91 91
92static void nmi_cpu_start(void *dummy) 92static void nmi_cpu_start(void *dummy)
93{ 93{
94 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 94 struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
95 if (!msrs->controls) 95 if (!msrs->controls)
96 WARN_ON_ONCE(1); 96 WARN_ON_ONCE(1);
97 else 97 else
@@ -111,7 +111,7 @@ static int nmi_start(void)
111 111
112static void nmi_cpu_stop(void *dummy) 112static void nmi_cpu_stop(void *dummy)
113{ 113{
114 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 114 struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
115 if (!msrs->controls) 115 if (!msrs->controls)
116 WARN_ON_ONCE(1); 116 WARN_ON_ONCE(1);
117 else 117 else
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 98ab13058f89..ad1d91f475ab 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -372,7 +372,7 @@ static unsigned int get_stagger(void)
372{ 372{
373#ifdef CONFIG_SMP 373#ifdef CONFIG_SMP
374 int cpu = smp_processor_id(); 374 int cpu = smp_processor_id();
375 return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); 375 return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
376#endif 376#endif
377 return 0; 377 return 0;
378} 378}
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c89c93320c12..c6b146e67116 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -63,8 +63,8 @@
63 63
64static struct uv_hub_nmi_s **uv_hub_nmi_list; 64static struct uv_hub_nmi_s **uv_hub_nmi_list;
65 65
66DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); 66DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
67EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi); 67EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
68 68
69static unsigned long nmi_mmr; 69static unsigned long nmi_mmr;
70static unsigned long nmi_mmr_clear; 70static unsigned long nmi_mmr_clear;
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
215 int nmi = 0; 215 int nmi = 0;
216 216
217 local64_inc(&uv_nmi_count); 217 local64_inc(&uv_nmi_count);
218 uv_cpu_nmi.queries++; 218 this_cpu_inc(uv_cpu_nmi.queries);
219 219
220 do { 220 do {
221 nmi = atomic_read(&hub_nmi->in_nmi); 221 nmi = atomic_read(&hub_nmi->in_nmi);
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void)
293 int cpu; 293 int cpu;
294 294
295 for_each_cpu(cpu, uv_nmi_cpu_mask) 295 for_each_cpu(cpu, uv_nmi_cpu_mask)
296 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1); 296 uv_cpu_nmi_per(cpu).pinging = 1;
297 297
298 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 298 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
299} 299}
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void)
304 int cpu; 304 int cpu;
305 305
306 for_each_cpu(cpu, uv_nmi_cpu_mask) { 306 for_each_cpu(cpu, uv_nmi_cpu_mask) {
307 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0); 307 uv_cpu_nmi_per(cpu).pinging = 0;
308 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT); 308 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
309 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); 309 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
310 } 310 }
311} 311}
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first)
328 int loop_delay = uv_nmi_loop_delay; 328 int loop_delay = uv_nmi_loop_delay;
329 329
330 for_each_cpu(j, uv_nmi_cpu_mask) { 330 for_each_cpu(j, uv_nmi_cpu_mask) {
331 if (atomic_read(&uv_cpu_nmi_per(j).state)) { 331 if (uv_cpu_nmi_per(j).state) {
332 cpumask_clear_cpu(j, uv_nmi_cpu_mask); 332 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
333 if (++k >= n) 333 if (++k >= n)
334 break; 334 break;
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first)
359static void uv_nmi_wait(int master) 359static void uv_nmi_wait(int master)
360{ 360{
361 /* indicate this cpu is in */ 361 /* indicate this cpu is in */
362 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN); 362 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
363 363
364 /* if not the first cpu in (the master), then we are a slave cpu */ 364 /* if not the first cpu in (the master), then we are a slave cpu */
365 if (!master) 365 if (!master)
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
419 "UV:%sNMI process trace for CPU %d\n", dots, cpu); 419 "UV:%sNMI process trace for CPU %d\n", dots, cpu);
420 show_regs(regs); 420 show_regs(regs);
421 } 421 }
422 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); 422 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
423} 423}
424 424
425/* Trigger a slave cpu to dump it's state */ 425/* Trigger a slave cpu to dump it's state */
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu)
427{ 427{
428 int retry = uv_nmi_trigger_delay; 428 int retry = uv_nmi_trigger_delay;
429 429
430 if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN) 430 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
431 return; 431 return;
432 432
433 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP); 433 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
434 do { 434 do {
435 cpu_relax(); 435 cpu_relax();
436 udelay(10); 436 udelay(10);
437 if (atomic_read(&uv_cpu_nmi_per(cpu).state) 437 if (uv_cpu_nmi_per(cpu).state
438 != UV_NMI_STATE_DUMP) 438 != UV_NMI_STATE_DUMP)
439 return; 439 return;
440 } while (--retry > 0); 440 } while (--retry > 0);
441 441
442 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); 442 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
443 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE); 443 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
444} 444}
445 445
446/* Wait until all cpus ready to exit */ 446/* Wait until all cpus ready to exit */
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
488 } else { 488 } else {
489 while (!atomic_read(&uv_nmi_slave_continue)) 489 while (!atomic_read(&uv_nmi_slave_continue))
490 cpu_relax(); 490 cpu_relax();
491 while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) 491 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
492 cpu_relax(); 492 cpu_relax();
493 uv_nmi_dump_state_cpu(cpu, regs); 493 uv_nmi_dump_state_cpu(cpu, regs);
494 } 494 }
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
615 local_irq_save(flags); 615 local_irq_save(flags);
616 616
617 /* If not a UV System NMI, ignore */ 617 /* If not a UV System NMI, ignore */
618 if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { 618 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
619 local_irq_restore(flags); 619 local_irq_restore(flags);
620 return NMI_DONE; 620 return NMI_DONE;
621 } 621 }
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
639 uv_call_kgdb_kdb(cpu, regs, master); 639 uv_call_kgdb_kdb(cpu, regs, master);
640 640
641 /* Clear per_cpu "in nmi" flag */ 641 /* Clear per_cpu "in nmi" flag */
642 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT); 642 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
643 643
644 /* Clear MMR NMI flag on each hub */ 644 /* Clear MMR NMI flag on each hub */
645 uv_clear_nmi(cpu); 645 uv_clear_nmi(cpu);
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
666{ 666{
667 int ret; 667 int ret;
668 668
669 uv_cpu_nmi.queries++; 669 this_cpu_inc(uv_cpu_nmi.queries);
670 if (!atomic_read(&uv_cpu_nmi.pinging)) { 670 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
671 local64_inc(&uv_nmi_ping_misses); 671 local64_inc(&uv_nmi_ping_misses);
672 return NMI_DONE; 672 return NMI_DONE;
673 } 673 }
674 674
675 uv_cpu_nmi.pings++; 675 this_cpu_inc(uv_cpu_nmi.pings);
676 local64_inc(&uv_nmi_ping_count); 676 local64_inc(&uv_nmi_ping_count);
677 ret = uv_handle_nmi(reason, regs); 677 ret = uv_handle_nmi(reason, regs);
678 atomic_set(&uv_cpu_nmi.pinging, 0); 678 this_cpu_write(uv_cpu_nmi.pinging, 0);
679 return ret; 679 return ret;
680} 680}
681 681
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5c86786bbfd2..a244237f3cfa 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -365,7 +365,7 @@ __setup("uvrtcevt", uv_enable_evt_rtc);
365 365
366static __init void uv_rtc_register_clockevents(struct work_struct *dummy) 366static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
367{ 367{
368 struct clock_event_device *ced = &__get_cpu_var(cpu_ced); 368 struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
369 369
370 *ced = clock_event_device_uv; 370 *ced = clock_event_device_uv;
371 ced->cpumask = cpumask_of(smp_processor_id()); 371 ced->cpumask = cpumask_of(smp_processor_id());
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index acb0effd8077..1a3f0445432a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -821,7 +821,7 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
821 821
822void xen_copy_trap_info(struct trap_info *traps) 822void xen_copy_trap_info(struct trap_info *traps)
823{ 823{
824 const struct desc_ptr *desc = &__get_cpu_var(idt_desc); 824 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
825 825
826 xen_convert_trap_info(desc, traps); 826 xen_convert_trap_info(desc, traps);
827} 827}
@@ -838,7 +838,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
838 838
839 spin_lock(&lock); 839 spin_lock(&lock);
840 840
841 __get_cpu_var(idt_desc) = *desc; 841 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
842 842
843 xen_convert_trap_info(desc, traps); 843 xen_convert_trap_info(desc, traps);
844 844
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 0d82003e76ad..ea54a08d8301 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -54,7 +54,7 @@ DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
54 54
55void xen_mc_flush(void) 55void xen_mc_flush(void)
56{ 56{
57 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 57 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
58 struct multicall_entry *mc; 58 struct multicall_entry *mc;
59 int ret = 0; 59 int ret = 0;
60 unsigned long flags; 60 unsigned long flags;
@@ -131,7 +131,7 @@ void xen_mc_flush(void)
131 131
132struct multicall_space __xen_mc_entry(size_t args) 132struct multicall_space __xen_mc_entry(size_t args)
133{ 133{
134 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 134 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
135 struct multicall_space ret; 135 struct multicall_space ret;
136 unsigned argidx = roundup(b->argidx, sizeof(u64)); 136 unsigned argidx = roundup(b->argidx, sizeof(u64));
137 137
@@ -162,7 +162,7 @@ struct multicall_space __xen_mc_entry(size_t args)
162 162
163struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) 163struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
164{ 164{
165 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 165 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
166 struct multicall_space ret = { NULL, NULL }; 166 struct multicall_space ret = { NULL, NULL };
167 167
168 BUG_ON(preemptible()); 168 BUG_ON(preemptible());
@@ -192,7 +192,7 @@ out:
192 192
193void xen_mc_callback(void (*fn)(void *), void *data) 193void xen_mc_callback(void (*fn)(void *), void *data)
194{ 194{
195 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 195 struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
196 struct callback *cb; 196 struct callback *cb;
197 197
198 if (b->cbidx == MC_BATCH) { 198 if (b->cbidx == MC_BATCH) {
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0ba5f3b967f0..23b45eb9a89c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -109,7 +109,7 @@ static bool xen_pvspin = true;
109__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 109__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
110{ 110{
111 int irq = __this_cpu_read(lock_kicker_irq); 111 int irq = __this_cpu_read(lock_kicker_irq);
112 struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); 112 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
113 int cpu = smp_processor_id(); 113 int cpu = smp_processor_id();
114 u64 start; 114 u64 start;
115 unsigned long flags; 115 unsigned long flags;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 5718b0b58b60..a1d430b112b3 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -80,7 +80,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
80 80
81 BUG_ON(preemptible()); 81 BUG_ON(preemptible());
82 82
83 state = &__get_cpu_var(xen_runstate); 83 state = this_cpu_ptr(&xen_runstate);
84 84
85 /* 85 /*
86 * The runstate info is always updated by the hypervisor on 86 * The runstate info is always updated by the hypervisor on
@@ -123,7 +123,7 @@ static void do_stolen_accounting(void)
123 123
124 WARN_ON(state.state != RUNSTATE_running); 124 WARN_ON(state.state != RUNSTATE_running);
125 125
126 snap = &__get_cpu_var(xen_runstate_snapshot); 126 snap = this_cpu_ptr(&xen_runstate_snapshot);
127 127
128 /* work out how much time the VCPU has not been runn*ing* */ 128 /* work out how much time the VCPU has not been runn*ing* */
129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; 129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void)
158 cycle_t ret; 158 cycle_t ret;
159 159
160 preempt_disable_notrace(); 160 preempt_disable_notrace();
161 src = &__get_cpu_var(xen_vcpu)->time; 161 src = this_cpu_ptr(&xen_vcpu->time);
162 ret = pvclock_clocksource_read(src); 162 ret = pvclock_clocksource_read(src);
163 preempt_enable_notrace(); 163 preempt_enable_notrace();
164 return ret; 164 return ret;
@@ -397,7 +397,7 @@ static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.
397 397
398static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 398static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
399{ 399{
400 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt; 400 struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
401 irqreturn_t ret; 401 irqreturn_t ret;
402 402
403 ret = IRQ_NONE; 403 ret = IRQ_NONE;
@@ -460,7 +460,7 @@ void xen_setup_cpu_clockevents(void)
460{ 460{
461 BUG_ON(preemptible()); 461 BUG_ON(preemptible());
462 462
463 clockevents_register_device(&__get_cpu_var(xen_clock_events).evt); 463 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
464} 464}
465 465
466void xen_timer_resume(void) 466void xen_timer_resume(void)