aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c46
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c6
13 files changed, 89 insertions, 89 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3eff36f719fb..4b4f78c9ba19 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1200,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage);
1200 1200
1201int is_debug_stack(unsigned long addr) 1201int is_debug_stack(unsigned long addr)
1202{ 1202{
1203 return __get_cpu_var(debug_stack_usage) || 1203 return __this_cpu_read(debug_stack_usage) ||
1204 (addr <= __get_cpu_var(debug_stack_addr) && 1204 (addr <= __this_cpu_read(debug_stack_addr) &&
1205 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); 1205 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1206} 1206}
1207NOKPROBE_SYMBOL(is_debug_stack); 1207NOKPROBE_SYMBOL(is_debug_stack);
1208 1208
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 5ac2d1fb28bc..4cfba4371a71 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex);
83static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) 83static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
84{ 84{
85 int cpu = smp_processor_id(); 85 int cpu = smp_processor_id();
86 struct mce *m = &__get_cpu_var(injectm); 86 struct mce *m = this_cpu_ptr(&injectm);
87 if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) 87 if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
88 return NMI_DONE; 88 return NMI_DONE;
89 cpumask_clear_cpu(cpu, mce_inject_cpumask); 89 cpumask_clear_cpu(cpu, mce_inject_cpumask);
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
97static void mce_irq_ipi(void *info) 97static void mce_irq_ipi(void *info)
98{ 98{
99 int cpu = smp_processor_id(); 99 int cpu = smp_processor_id();
100 struct mce *m = &__get_cpu_var(injectm); 100 struct mce *m = this_cpu_ptr(&injectm);
101 101
102 if (cpumask_test_cpu(cpu, mce_inject_cpumask) && 102 if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
103 m->inject_flags & MCJ_EXCEPTION) { 103 m->inject_flags & MCJ_EXCEPTION) {
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info)
109/* Inject mce on current CPU */ 109/* Inject mce on current CPU */
110static int raise_local(void) 110static int raise_local(void)
111{ 111{
112 struct mce *m = &__get_cpu_var(injectm); 112 struct mce *m = this_cpu_ptr(&injectm);
113 int context = MCJ_CTX(m->inject_flags); 113 int context = MCJ_CTX(m->inject_flags);
114 int ret = 0; 114 int ret = 0;
115 int cpu = m->extcpu; 115 int cpu = m->extcpu;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bd9ccda8087f..61a9668cebfd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr)
400 400
401 if (offset < 0) 401 if (offset < 0)
402 return 0; 402 return 0;
403 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); 403 return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
404 } 404 }
405 405
406 if (rdmsrl_safe(msr, &v)) { 406 if (rdmsrl_safe(msr, &v)) {
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
422 int offset = msr_to_offset(msr); 422 int offset = msr_to_offset(msr);
423 423
424 if (offset >= 0) 424 if (offset >= 0)
425 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; 425 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
426 return; 426 return;
427 } 427 }
428 wrmsrl(msr, v); 428 wrmsrl(msr, v);
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring);
478/* Runs with CPU affinity in workqueue */ 478/* Runs with CPU affinity in workqueue */
479static int mce_ring_empty(void) 479static int mce_ring_empty(void)
480{ 480{
481 struct mce_ring *r = &__get_cpu_var(mce_ring); 481 struct mce_ring *r = this_cpu_ptr(&mce_ring);
482 482
483 return r->start == r->end; 483 return r->start == r->end;
484} 484}
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn)
490 490
491 *pfn = 0; 491 *pfn = 0;
492 get_cpu(); 492 get_cpu();
493 r = &__get_cpu_var(mce_ring); 493 r = this_cpu_ptr(&mce_ring);
494 if (r->start == r->end) 494 if (r->start == r->end)
495 goto out; 495 goto out;
496 *pfn = r->ring[r->start]; 496 *pfn = r->ring[r->start];
@@ -504,7 +504,7 @@ out:
504/* Always runs in MCE context with preempt off */ 504/* Always runs in MCE context with preempt off */
505static int mce_ring_add(unsigned long pfn) 505static int mce_ring_add(unsigned long pfn)
506{ 506{
507 struct mce_ring *r = &__get_cpu_var(mce_ring); 507 struct mce_ring *r = this_cpu_ptr(&mce_ring);
508 unsigned next; 508 unsigned next;
509 509
510 next = (r->end + 1) % MCE_RING_SIZE; 510 next = (r->end + 1) % MCE_RING_SIZE;
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c)
526static void mce_schedule_work(void) 526static void mce_schedule_work(void)
527{ 527{
528 if (!mce_ring_empty()) 528 if (!mce_ring_empty())
529 schedule_work(&__get_cpu_var(mce_work)); 529 schedule_work(this_cpu_ptr(&mce_work));
530} 530}
531 531
532DEFINE_PER_CPU(struct irq_work, mce_irq_work); 532DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs)
551 return; 551 return;
552 } 552 }
553 553
554 irq_work_queue(&__get_cpu_var(mce_irq_work)); 554 irq_work_queue(this_cpu_ptr(&mce_irq_work));
555} 555}
556 556
557/* 557/*
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1045 1045
1046 mce_gather_info(&m, regs); 1046 mce_gather_info(&m, regs);
1047 1047
1048 final = &__get_cpu_var(mces_seen); 1048 final = this_cpu_ptr(&mces_seen);
1049 *final = m; 1049 *final = m;
1050 1050
1051 memset(valid_banks, 0, sizeof(valid_banks)); 1051 memset(valid_banks, 0, sizeof(valid_banks));
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) =
1278 1278
1279static int cmc_error_seen(void) 1279static int cmc_error_seen(void)
1280{ 1280{
1281 unsigned long *v = &__get_cpu_var(mce_polled_error); 1281 unsigned long *v = this_cpu_ptr(&mce_polled_error);
1282 1282
1283 return test_and_clear_bit(0, v); 1283 return test_and_clear_bit(0, v);
1284} 1284}
1285 1285
1286static void mce_timer_fn(unsigned long data) 1286static void mce_timer_fn(unsigned long data)
1287{ 1287{
1288 struct timer_list *t = &__get_cpu_var(mce_timer); 1288 struct timer_list *t = this_cpu_ptr(&mce_timer);
1289 unsigned long iv; 1289 unsigned long iv;
1290 int notify; 1290 int notify;
1291 1291
1292 WARN_ON(smp_processor_id() != data); 1292 WARN_ON(smp_processor_id() != data);
1293 1293
1294 if (mce_available(__this_cpu_ptr(&cpu_info))) { 1294 if (mce_available(this_cpu_ptr(&cpu_info))) {
1295 machine_check_poll(MCP_TIMESTAMP, 1295 machine_check_poll(MCP_TIMESTAMP,
1296 &__get_cpu_var(mce_poll_banks)); 1296 this_cpu_ptr(&mce_poll_banks));
1297 mce_intel_cmci_poll(); 1297 mce_intel_cmci_poll();
1298 } 1298 }
1299 1299
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data)
1323 */ 1323 */
1324void mce_timer_kick(unsigned long interval) 1324void mce_timer_kick(unsigned long interval)
1325{ 1325{
1326 struct timer_list *t = &__get_cpu_var(mce_timer); 1326 struct timer_list *t = this_cpu_ptr(&mce_timer);
1327 unsigned long when = jiffies + interval; 1327 unsigned long when = jiffies + interval;
1328 unsigned long iv = __this_cpu_read(mce_next_interval); 1328 unsigned long iv = __this_cpu_read(mce_next_interval);
1329 1329
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1659 1659
1660static void __mcheck_cpu_init_timer(void) 1660static void __mcheck_cpu_init_timer(void)
1661{ 1661{
1662 struct timer_list *t = &__get_cpu_var(mce_timer); 1662 struct timer_list *t = this_cpu_ptr(&mce_timer);
1663 unsigned int cpu = smp_processor_id(); 1663 unsigned int cpu = smp_processor_id();
1664 1664
1665 setup_timer(t, mce_timer_fn, cpu); 1665 setup_timer(t, mce_timer_fn, cpu);
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
1702 __mcheck_cpu_init_generic(); 1702 __mcheck_cpu_init_generic();
1703 __mcheck_cpu_init_vendor(c); 1703 __mcheck_cpu_init_vendor(c);
1704 __mcheck_cpu_init_timer(); 1704 __mcheck_cpu_init_timer();
1705 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); 1705 INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
1706 init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); 1706 init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
1707} 1707}
1708 1708
1709/* 1709/*
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = {
1955static void __mce_disable_bank(void *arg) 1955static void __mce_disable_bank(void *arg)
1956{ 1956{
1957 int bank = *((int *)arg); 1957 int bank = *((int *)arg);
1958 __clear_bit(bank, __get_cpu_var(mce_poll_banks)); 1958 __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
1959 cmci_disable_bank(bank); 1959 cmci_disable_bank(bank);
1960} 1960}
1961 1961
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void)
2065static void mce_syscore_resume(void) 2065static void mce_syscore_resume(void)
2066{ 2066{
2067 __mcheck_cpu_init_generic(); 2067 __mcheck_cpu_init_generic();
2068 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 2068 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2069} 2069}
2070 2070
2071static struct syscore_ops mce_syscore_ops = { 2071static struct syscore_ops mce_syscore_ops = {
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = {
2080 2080
2081static void mce_cpu_restart(void *data) 2081static void mce_cpu_restart(void *data)
2082{ 2082{
2083 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2083 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2084 return; 2084 return;
2085 __mcheck_cpu_init_generic(); 2085 __mcheck_cpu_init_generic();
2086 __mcheck_cpu_init_timer(); 2086 __mcheck_cpu_init_timer();
@@ -2096,14 +2096,14 @@ static void mce_restart(void)
2096/* Toggle features for corrected errors */ 2096/* Toggle features for corrected errors */
2097static void mce_disable_cmci(void *data) 2097static void mce_disable_cmci(void *data)
2098{ 2098{
2099 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2099 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2100 return; 2100 return;
2101 cmci_clear(); 2101 cmci_clear();
2102} 2102}
2103 2103
2104static void mce_enable_ce(void *all) 2104static void mce_enable_ce(void *all)
2105{ 2105{
2106 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2106 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2107 return; 2107 return;
2108 cmci_reenable(); 2108 cmci_reenable();
2109 cmci_recheck(); 2109 cmci_recheck();
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h)
2336 unsigned long action = *(unsigned long *)h; 2336 unsigned long action = *(unsigned long *)h;
2337 int i; 2337 int i;
2338 2338
2339 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2339 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2340 return; 2340 return;
2341 2341
2342 if (!(action & CPU_TASKS_FROZEN)) 2342 if (!(action & CPU_TASKS_FROZEN))
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h)
2354 unsigned long action = *(unsigned long *)h; 2354 unsigned long action = *(unsigned long *)h;
2355 int i; 2355 int i;
2356 2356
2357 if (!mce_available(__this_cpu_ptr(&cpu_info))) 2357 if (!mce_available(raw_cpu_ptr(&cpu_info)))
2358 return; 2358 return;
2359 2359
2360 if (!(action & CPU_TASKS_FROZEN)) 2360 if (!(action & CPU_TASKS_FROZEN))
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1e49f8f41276..5d4999f95aec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void)
310 * event. 310 * event.
311 */ 311 */
312 machine_check_poll(MCP_TIMESTAMP, 312 machine_check_poll(MCP_TIMESTAMP,
313 &__get_cpu_var(mce_poll_banks)); 313 this_cpu_ptr(&mce_poll_banks));
314 314
315 if (high & MASK_OVERFLOW_HI) { 315 if (high & MASK_OVERFLOW_HI) {
316 rdmsrl(address, m.misc); 316 rdmsrl(address, m.misc);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 3bdb95ae8c43..b3c97bafc123 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void)
86{ 86{
87 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) 87 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
88 return; 88 return;
89 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 89 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
90} 90}
91 91
92void mce_intel_hcpu_update(unsigned long cpu) 92void mce_intel_hcpu_update(unsigned long cpu)
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void)
145 u64 val; 145 u64 val;
146 146
147 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 147 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
148 owned = __get_cpu_var(mce_banks_owned); 148 owned = this_cpu_ptr(mce_banks_owned);
149 for_each_set_bit(bank, owned, MAX_NR_BANKS) { 149 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151 val &= ~MCI_CTL2_CMCI_EN; 151 val &= ~MCI_CTL2_CMCI_EN;
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void)
195{ 195{
196 if (cmci_storm_detect()) 196 if (cmci_storm_detect())
197 return; 197 return;
198 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 198 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
199 mce_notify_irq(); 199 mce_notify_irq();
200} 200}
201 201
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void)
206 */ 206 */
207static void cmci_discover(int banks) 207static void cmci_discover(int banks)
208{ 208{
209 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); 209 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
210 unsigned long flags; 210 unsigned long flags;
211 int i; 211 int i;
212 int bios_wrong_thresh = 0; 212 int bios_wrong_thresh = 0;
@@ -228,7 +228,7 @@ static void cmci_discover(int banks)
228 /* Already owned by someone else? */ 228 /* Already owned by someone else? */
229 if (val & MCI_CTL2_CMCI_EN) { 229 if (val & MCI_CTL2_CMCI_EN) {
230 clear_bit(i, owned); 230 clear_bit(i, owned);
231 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 231 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
232 continue; 232 continue;
233 } 233 }
234 234
@@ -252,7 +252,7 @@ static void cmci_discover(int banks)
252 /* Did the enable bit stick? -- the bank supports CMCI */ 252 /* Did the enable bit stick? -- the bank supports CMCI */
253 if (val & MCI_CTL2_CMCI_EN) { 253 if (val & MCI_CTL2_CMCI_EN) {
254 set_bit(i, owned); 254 set_bit(i, owned);
255 __clear_bit(i, __get_cpu_var(mce_poll_banks)); 255 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
256 /* 256 /*
257 * We are able to set thresholds for some banks that 257 * We are able to set thresholds for some banks that
258 * had a threshold of 0. This means the BIOS has not 258 * had a threshold of 0. This means the BIOS has not
@@ -263,7 +263,7 @@ static void cmci_discover(int banks)
263 (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) 263 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
264 bios_wrong_thresh = 1; 264 bios_wrong_thresh = 1;
265 } else { 265 } else {
266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 266 WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
267 } 267 }
268 } 268 }
269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
@@ -284,10 +284,10 @@ void cmci_recheck(void)
284 unsigned long flags; 284 unsigned long flags;
285 int banks; 285 int banks;
286 286
287 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) 287 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
288 return; 288 return;
289 local_irq_save(flags); 289 local_irq_save(flags);
290 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 290 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
291 local_irq_restore(flags); 291 local_irq_restore(flags);
292} 292}
293 293
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank)
296{ 296{
297 u64 val; 297 u64 val;
298 298
299 if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) 299 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
300 return; 300 return;
301 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 301 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
302 val &= ~MCI_CTL2_CMCI_EN; 302 val &= ~MCI_CTL2_CMCI_EN;
303 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 303 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
304 __clear_bit(bank, __get_cpu_var(mce_banks_owned)); 304 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
305} 305}
306 306
307/* 307/*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 16c73022306e..1b8299dd3d91 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
494 494
495void x86_pmu_disable_all(void) 495void x86_pmu_disable_all(void)
496{ 496{
497 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 497 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
498 int idx; 498 int idx;
499 499
500 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 500 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -512,7 +512,7 @@ void x86_pmu_disable_all(void)
512 512
513static void x86_pmu_disable(struct pmu *pmu) 513static void x86_pmu_disable(struct pmu *pmu)
514{ 514{
515 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 515 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
516 516
517 if (!x86_pmu_initialized()) 517 if (!x86_pmu_initialized())
518 return; 518 return;
@@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu)
529 529
530void x86_pmu_enable_all(int added) 530void x86_pmu_enable_all(int added)
531{ 531{
532 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 532 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
533 int idx; 533 int idx;
534 534
535 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 535 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags);
876 876
877static void x86_pmu_enable(struct pmu *pmu) 877static void x86_pmu_enable(struct pmu *pmu)
878{ 878{
879 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 879 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
880 struct perf_event *event; 880 struct perf_event *event;
881 struct hw_perf_event *hwc; 881 struct hw_perf_event *hwc;
882 int i, added = cpuc->n_added; 882 int i, added = cpuc->n_added;
@@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1030 */ 1030 */
1031static int x86_pmu_add(struct perf_event *event, int flags) 1031static int x86_pmu_add(struct perf_event *event, int flags)
1032{ 1032{
1033 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1033 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1034 struct hw_perf_event *hwc; 1034 struct hw_perf_event *hwc;
1035 int assign[X86_PMC_IDX_MAX]; 1035 int assign[X86_PMC_IDX_MAX];
1036 int n, n0, ret; 1036 int n, n0, ret;
@@ -1081,7 +1081,7 @@ out:
1081 1081
1082static void x86_pmu_start(struct perf_event *event, int flags) 1082static void x86_pmu_start(struct perf_event *event, int flags)
1083{ 1083{
1084 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1084 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1085 int idx = event->hw.idx; 1085 int idx = event->hw.idx;
1086 1086
1087 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1087 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -1160,7 +1160,7 @@ void perf_event_print_debug(void)
1160 1160
1161void x86_pmu_stop(struct perf_event *event, int flags) 1161void x86_pmu_stop(struct perf_event *event, int flags)
1162{ 1162{
1163 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1163 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1164 struct hw_perf_event *hwc = &event->hw; 1164 struct hw_perf_event *hwc = &event->hw;
1165 1165
1166 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { 1166 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
@@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
1182 1182
1183static void x86_pmu_del(struct perf_event *event, int flags) 1183static void x86_pmu_del(struct perf_event *event, int flags)
1184{ 1184{
1185 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1185 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1186 int i; 1186 int i;
1187 1187
1188 /* 1188 /*
@@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1237 int idx, handled = 0; 1237 int idx, handled = 0;
1238 u64 val; 1238 u64 val;
1239 1239
1240 cpuc = &__get_cpu_var(cpu_hw_events); 1240 cpuc = this_cpu_ptr(&cpu_hw_events);
1241 1241
1242 /* 1242 /*
1243 * Some chipsets need to unmask the LVTPC in a particular spot 1243 * Some chipsets need to unmask the LVTPC in a particular spot
@@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
1646 */ 1646 */
1647static int x86_pmu_commit_txn(struct pmu *pmu) 1647static int x86_pmu_commit_txn(struct pmu *pmu)
1648{ 1648{
1649 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1649 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1650 int assign[X86_PMC_IDX_MAX]; 1650 int assign[X86_PMC_IDX_MAX];
1651 int n, ret; 1651 int n, ret;
1652 1652
@@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment)
2005 if (idx > GDT_ENTRIES) 2005 if (idx > GDT_ENTRIES)
2006 return 0; 2006 return 0;
2007 2007
2008 desc = __this_cpu_ptr(&gdt_page.gdt[0]); 2008 desc = raw_cpu_ptr(gdt_page.gdt);
2009 } 2009 }
2010 2010
2011 return get_desc_base(desc + idx); 2011 return get_desc_base(desc + idx);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index beeb7cc07044..28926311aac1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void)
699 699
700void amd_pmu_enable_virt(void) 700void amd_pmu_enable_virt(void)
701{ 701{
702 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 702 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
703 703
704 cpuc->perf_ctr_virt_mask = 0; 704 cpuc->perf_ctr_virt_mask = 0;
705 705
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
711 711
712void amd_pmu_disable_virt(void) 712void amd_pmu_disable_virt(void)
713{ 713{
714 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 714 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
715 715
716 /* 716 /*
717 * We only mask out the Host-only bit so that host-only counting works 717 * We only mask out the Host-only bit so that host-only counting works
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3851def5057c..a73947c53b65 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
1174 1174
1175static void intel_pmu_disable_all(void) 1175static void intel_pmu_disable_all(void)
1176{ 1176{
1177 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1177 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1178 1178
1179 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 1179 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1180 1180
@@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void)
1187 1187
1188static void intel_pmu_enable_all(int added) 1188static void intel_pmu_enable_all(int added)
1189{ 1189{
1190 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1190 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1191 1191
1192 intel_pmu_pebs_enable_all(); 1192 intel_pmu_pebs_enable_all();
1193 intel_pmu_lbr_enable_all(); 1193 intel_pmu_lbr_enable_all();
@@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added)
1221 */ 1221 */
1222static void intel_pmu_nhm_workaround(void) 1222static void intel_pmu_nhm_workaround(void)
1223{ 1223{
1224 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1224 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1225 static const unsigned long nhm_magic[4] = { 1225 static const unsigned long nhm_magic[4] = {
1226 0x4300B5, 1226 0x4300B5,
1227 0x4300D2, 1227 0x4300D2,
@@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event)
1320static void intel_pmu_disable_event(struct perf_event *event) 1320static void intel_pmu_disable_event(struct perf_event *event)
1321{ 1321{
1322 struct hw_perf_event *hwc = &event->hw; 1322 struct hw_perf_event *hwc = &event->hw;
1323 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1323 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1324 1324
1325 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 1325 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1326 intel_pmu_disable_bts(); 1326 intel_pmu_disable_bts();
@@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1384static void intel_pmu_enable_event(struct perf_event *event) 1384static void intel_pmu_enable_event(struct perf_event *event)
1385{ 1385{
1386 struct hw_perf_event *hwc = &event->hw; 1386 struct hw_perf_event *hwc = &event->hw;
1387 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1387 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1388 1388
1389 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 1389 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1390 if (!__this_cpu_read(cpu_hw_events.enabled)) 1390 if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1478 u64 status; 1478 u64 status;
1479 int handled; 1479 int handled;
1480 1480
1481 cpuc = &__get_cpu_var(cpu_hw_events); 1481 cpuc = this_cpu_ptr(&cpu_hw_events);
1482 1482
1483 /* 1483 /*
1484 * No known reason to not always do late ACK, 1484 * No known reason to not always do late ACK,
@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1910 1910
1911static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) 1911static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1912{ 1912{
1913 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1913 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1914 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 1914 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1915 1915
1916 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; 1916 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
@@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1931 1931
1932static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) 1932static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1933{ 1933{
1934 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1934 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1935 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 1935 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1936 int idx; 1936 int idx;
1937 1937
@@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event)
1965 1965
1966static void core_pmu_enable_all(int added) 1966static void core_pmu_enable_all(int added)
1967{ 1967{
1968 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1968 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1969 int idx; 1969 int idx;
1970 1970
1971 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1971 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b1553d05a5cb..46211bcc813e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config)
474 474
475void intel_pmu_disable_bts(void) 475void intel_pmu_disable_bts(void)
476{ 476{
477 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 477 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
478 unsigned long debugctlmsr; 478 unsigned long debugctlmsr;
479 479
480 if (!cpuc->ds) 480 if (!cpuc->ds)
@@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void)
491 491
492int intel_pmu_drain_bts_buffer(void) 492int intel_pmu_drain_bts_buffer(void)
493{ 493{
494 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 494 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
495 struct debug_store *ds = cpuc->ds; 495 struct debug_store *ds = cpuc->ds;
496 struct bts_record { 496 struct bts_record {
497 u64 from; 497 u64 from;
@@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
669 669
670void intel_pmu_pebs_enable(struct perf_event *event) 670void intel_pmu_pebs_enable(struct perf_event *event)
671{ 671{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 672 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
673 struct hw_perf_event *hwc = &event->hw; 673 struct hw_perf_event *hwc = &event->hw;
674 674
675 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 675 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
684 684
685void intel_pmu_pebs_disable(struct perf_event *event) 685void intel_pmu_pebs_disable(struct perf_event *event)
686{ 686{
687 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 687 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
688 struct hw_perf_event *hwc = &event->hw; 688 struct hw_perf_event *hwc = &event->hw;
689 689
690 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); 690 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
@@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
702 702
703void intel_pmu_pebs_enable_all(void) 703void intel_pmu_pebs_enable_all(void)
704{ 704{
705 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 705 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
706 706
707 if (cpuc->pebs_enabled) 707 if (cpuc->pebs_enabled)
708 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 708 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void)
710 710
711void intel_pmu_pebs_disable_all(void) 711void intel_pmu_pebs_disable_all(void)
712{ 712{
713 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 713 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
714 714
715 if (cpuc->pebs_enabled) 715 if (cpuc->pebs_enabled)
716 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 716 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
@@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void)
718 718
719static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) 719static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
720{ 720{
721 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 721 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
722 unsigned long from = cpuc->lbr_entries[0].from; 722 unsigned long from = cpuc->lbr_entries[0].from;
723 unsigned long old_to, to = cpuc->lbr_entries[0].to; 723 unsigned long old_to, to = cpuc->lbr_entries[0].to;
724 unsigned long ip = regs->ip; 724 unsigned long ip = regs->ip;
@@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
829 * We cast to the biggest pebs_record but are careful not to 829 * We cast to the biggest pebs_record but are careful not to
830 * unconditionally access the 'extra' entries. 830 * unconditionally access the 'extra' entries.
831 */ 831 */
832 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 832 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
833 struct pebs_record_hsw *pebs = __pebs; 833 struct pebs_record_hsw *pebs = __pebs;
834 struct perf_sample_data data; 834 struct perf_sample_data data;
835 struct pt_regs regs; 835 struct pt_regs regs;
@@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
916 916
917static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) 917static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
918{ 918{
919 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 919 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
920 struct debug_store *ds = cpuc->ds; 920 struct debug_store *ds = cpuc->ds;
921 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ 921 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
922 struct pebs_record_core *at, *top; 922 struct pebs_record_core *at, *top;
@@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
957 957
958static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) 958static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
959{ 959{
960 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 960 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
961 struct debug_store *ds = cpuc->ds; 961 struct debug_store *ds = cpuc->ds;
962 struct perf_event *event = NULL; 962 struct perf_event *event = NULL;
963 void *at, *top; 963 void *at, *top;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 4af10617de33..45fa730a5283 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
133static void __intel_pmu_lbr_enable(void) 133static void __intel_pmu_lbr_enable(void)
134{ 134{
135 u64 debugctl; 135 u64 debugctl;
136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 136 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
137 137
138 if (cpuc->lbr_sel) 138 if (cpuc->lbr_sel)
139 wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); 139 wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void)
183 183
184void intel_pmu_lbr_enable(struct perf_event *event) 184void intel_pmu_lbr_enable(struct perf_event *event)
185{ 185{
186 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 186 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
187 187
188 if (!x86_pmu.lbr_nr) 188 if (!x86_pmu.lbr_nr)
189 return; 189 return;
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event)
203 203
204void intel_pmu_lbr_disable(struct perf_event *event) 204void intel_pmu_lbr_disable(struct perf_event *event)
205{ 205{
206 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 206 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
207 207
208 if (!x86_pmu.lbr_nr) 208 if (!x86_pmu.lbr_nr)
209 return; 209 return;
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event)
220 220
221void intel_pmu_lbr_enable_all(void) 221void intel_pmu_lbr_enable_all(void)
222{ 222{
223 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 223 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
224 224
225 if (cpuc->lbr_users) 225 if (cpuc->lbr_users)
226 __intel_pmu_lbr_enable(); 226 __intel_pmu_lbr_enable();
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void)
228 228
229void intel_pmu_lbr_disable_all(void) 229void intel_pmu_lbr_disable_all(void)
230{ 230{
231 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 231 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
232 232
233 if (cpuc->lbr_users) 233 if (cpuc->lbr_users)
234 __intel_pmu_lbr_disable(); 234 __intel_pmu_lbr_disable();
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
332 332
333void intel_pmu_lbr_read(void) 333void intel_pmu_lbr_read(void)
334{ 334{
335 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 335 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
336 336
337 if (!cpuc->lbr_users) 337 if (!cpuc->lbr_users)
338 return; 338 return;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 619f7699487a..d64f275fe274 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v)
135 * or use ldexp(count, -32). 135 * or use ldexp(count, -32).
136 * Watts = Joules/Time delta 136 * Watts = Joules/Time delta
137 */ 137 */
138 return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); 138 return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
139} 139}
140 140
141static u64 rapl_event_update(struct perf_event *event) 141static u64 rapl_event_update(struct perf_event *event)
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
187 187
188static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) 188static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
189{ 189{
190 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 190 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
191 struct perf_event *event; 191 struct perf_event *event;
192 unsigned long flags; 192 unsigned long flags;
193 193
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
234 234
235static void rapl_pmu_event_start(struct perf_event *event, int mode) 235static void rapl_pmu_event_start(struct perf_event *event, int mode)
236{ 236{
237 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 237 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
238 unsigned long flags; 238 unsigned long flags;
239 239
240 spin_lock_irqsave(&pmu->lock, flags); 240 spin_lock_irqsave(&pmu->lock, flags);
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
244 244
245static void rapl_pmu_event_stop(struct perf_event *event, int mode) 245static void rapl_pmu_event_stop(struct perf_event *event, int mode)
246{ 246{
247 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 247 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
248 struct hw_perf_event *hwc = &event->hw; 248 struct hw_perf_event *hwc = &event->hw;
249 unsigned long flags; 249 unsigned long flags;
250 250
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
278 278
279static int rapl_pmu_event_add(struct perf_event *event, int mode) 279static int rapl_pmu_event_add(struct perf_event *event, int mode)
280{ 280{
281 struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 281 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
282 struct hw_perf_event *hwc = &event->hw; 282 struct hw_perf_event *hwc = &event->hw;
283 unsigned long flags; 283 unsigned long flags;
284 284
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void)
696 return -1; 696 return -1;
697 } 697 }
698 698
699 pmu = __get_cpu_var(rapl_pmu); 699 pmu = __this_cpu_read(rapl_pmu);
700 700
701 pr_info("RAPL PMU detected, hw unit 2^-%d Joules," 701 pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
702 " API unit is 2^-32 Joules," 702 " API unit is 2^-32 Joules,"
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 838fa8772c62..5b0c232d1ee6 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs)
217 int bit, loops; 217 int bit, loops;
218 u64 status; 218 u64 status;
219 219
220 cpuc = &__get_cpu_var(cpu_hw_events); 220 cpuc = this_cpu_ptr(&cpu_hw_events);
221 221
222 knc_pmu_disable_all(); 222 knc_pmu_disable_all();
223 223
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 5d466b7d8609..f2e56783af3d 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
915 915
916static void p4_pmu_disable_all(void) 916static void p4_pmu_disable_all(void)
917{ 917{
918 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 918 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
919 int idx; 919 int idx;
920 920
921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 921 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
984 984
985static void p4_pmu_enable_all(int added) 985static void p4_pmu_enable_all(int added)
986{ 986{
987 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 987 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
988 int idx; 988 int idx;
989 989
990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 990 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
1004 int idx, handled = 0; 1004 int idx, handled = 0;
1005 u64 val; 1005 u64 val;
1006 1006
1007 cpuc = &__get_cpu_var(cpu_hw_events); 1007 cpuc = this_cpu_ptr(&cpu_hw_events);
1008 1008
1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1010 int overflow; 1010 int overflow;