diff options
author | Christoph Lameter <cl@linux.com> | 2014-08-17 13:30:40 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-08-26 13:45:49 -0400 |
commit | 89cbc76768c2fa4ed95545bf961f3a14ddfeed21 (patch) | |
tree | 14a566d17dc886d3330d67404553530f8f979e2d /arch/x86 | |
parent | 532d0d0690d1532dcc5a190162ad820b636bcd4d (diff) |
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86')
30 files changed, 147 insertions, 147 deletions
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 4b528a970bd4..61fd18b83b6c 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void); | |||
97 | DECLARE_PER_CPU(int, debug_stack_usage); | 97 | DECLARE_PER_CPU(int, debug_stack_usage); |
98 | static inline void debug_stack_usage_inc(void) | 98 | static inline void debug_stack_usage_inc(void) |
99 | { | 99 | { |
100 | __get_cpu_var(debug_stack_usage)++; | 100 | __this_cpu_inc(debug_stack_usage); |
101 | } | 101 | } |
102 | static inline void debug_stack_usage_dec(void) | 102 | static inline void debug_stack_usage_dec(void) |
103 | { | 103 | { |
104 | __get_cpu_var(debug_stack_usage)--; | 104 | __this_cpu_dec(debug_stack_usage); |
105 | } | 105 | } |
106 | int is_debug_stack(unsigned long addr); | 106 | int is_debug_stack(unsigned long addr); |
107 | void debug_stack_set_zero(void); | 107 | void debug_stack_set_zero(void); |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index c63e925fd6b7..bb84cfd5a1a1 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -164,7 +164,7 @@ struct uv_hub_info_s { | |||
164 | }; | 164 | }; |
165 | 165 | ||
166 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 166 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
167 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | 167 | #define uv_hub_info this_cpu_ptr(&__uv_hub_info) |
168 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | 168 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) |
169 | 169 | ||
170 | /* | 170 | /* |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index af5b08ab3b71..5972b108f15a 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void) | |||
146 | static int __init apbt_clockevent_register(void) | 146 | static int __init apbt_clockevent_register(void) |
147 | { | 147 | { |
148 | struct sfi_timer_table_entry *mtmr; | 148 | struct sfi_timer_table_entry *mtmr; |
149 | struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); | 149 | struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); |
150 | 150 | ||
151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | 151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); |
152 | if (mtmr == NULL) { | 152 | if (mtmr == NULL) { |
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void) | |||
200 | if (!cpu) | 200 | if (!cpu) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | adev = &__get_cpu_var(cpu_apbt_dev); | 203 | adev = this_cpu_ptr(&cpu_apbt_dev); |
204 | if (!adev->timer) { | 204 | if (!adev->timer) { |
205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, | 205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, |
206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), | 206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 67760275544b..00853b254ab0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
561 | */ | 561 | */ |
562 | static void setup_APIC_timer(void) | 562 | static void setup_APIC_timer(void) |
563 | { | 563 | { |
564 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 564 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
565 | 565 | ||
566 | if (this_cpu_has(X86_FEATURE_ARAT)) { | 566 | if (this_cpu_has(X86_FEATURE_ARAT)) { |
567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) | |||
696 | 696 | ||
697 | static int __init calibrate_APIC_clock(void) | 697 | static int __init calibrate_APIC_clock(void) |
698 | { | 698 | { |
699 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 699 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
700 | void (*real_handler)(struct clock_event_device *dev); | 700 | void (*real_handler)(struct clock_event_device *dev); |
701 | unsigned long deltaj; | 701 | unsigned long deltaj; |
702 | long delta, deltatsc; | 702 | long delta, deltatsc; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e4ab2b42bd6f..5666eb9568fc 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1198,9 +1198,9 @@ DEFINE_PER_CPU(int, debug_stack_usage); | |||
1198 | 1198 | ||
1199 | int is_debug_stack(unsigned long addr) | 1199 | int is_debug_stack(unsigned long addr) |
1200 | { | 1200 | { |
1201 | return __get_cpu_var(debug_stack_usage) || | 1201 | return __this_cpu_read(debug_stack_usage) || |
1202 | (addr <= __get_cpu_var(debug_stack_addr) && | 1202 | (addr <= __this_cpu_read(debug_stack_addr) && |
1203 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); | 1203 | addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); |
1204 | } | 1204 | } |
1205 | NOKPROBE_SYMBOL(is_debug_stack); | 1205 | NOKPROBE_SYMBOL(is_debug_stack); |
1206 | 1206 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 5ac2d1fb28bc..4cfba4371a71 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex); | |||
83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | 83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) |
84 | { | 84 | { |
85 | int cpu = smp_processor_id(); | 85 | int cpu = smp_processor_id(); |
86 | struct mce *m = &__get_cpu_var(injectm); | 86 | struct mce *m = this_cpu_ptr(&injectm); |
87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) | 87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) |
88 | return NMI_DONE; | 88 | return NMI_DONE; |
89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | |||
97 | static void mce_irq_ipi(void *info) | 97 | static void mce_irq_ipi(void *info) |
98 | { | 98 | { |
99 | int cpu = smp_processor_id(); | 99 | int cpu = smp_processor_id(); |
100 | struct mce *m = &__get_cpu_var(injectm); | 100 | struct mce *m = this_cpu_ptr(&injectm); |
101 | 101 | ||
102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && | 102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && |
103 | m->inject_flags & MCJ_EXCEPTION) { | 103 | m->inject_flags & MCJ_EXCEPTION) { |
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info) | |||
109 | /* Inject mce on current CPU */ | 109 | /* Inject mce on current CPU */ |
110 | static int raise_local(void) | 110 | static int raise_local(void) |
111 | { | 111 | { |
112 | struct mce *m = &__get_cpu_var(injectm); | 112 | struct mce *m = this_cpu_ptr(&injectm); |
113 | int context = MCJ_CTX(m->inject_flags); | 113 | int context = MCJ_CTX(m->inject_flags); |
114 | int ret = 0; | 114 | int ret = 0; |
115 | int cpu = m->extcpu; | 115 | int cpu = m->extcpu; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bd9ccda8087f..61a9668cebfd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
400 | 400 | ||
401 | if (offset < 0) | 401 | if (offset < 0) |
402 | return 0; | 402 | return 0; |
403 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | 403 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
404 | } | 404 | } |
405 | 405 | ||
406 | if (rdmsrl_safe(msr, &v)) { | 406 | if (rdmsrl_safe(msr, &v)) { |
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v) | |||
422 | int offset = msr_to_offset(msr); | 422 | int offset = msr_to_offset(msr); |
423 | 423 | ||
424 | if (offset >= 0) | 424 | if (offset >= 0) |
425 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | 425 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
426 | return; | 426 | return; |
427 | } | 427 | } |
428 | wrmsrl(msr, v); | 428 | wrmsrl(msr, v); |
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring); | |||
478 | /* Runs with CPU affinity in workqueue */ | 478 | /* Runs with CPU affinity in workqueue */ |
479 | static int mce_ring_empty(void) | 479 | static int mce_ring_empty(void) |
480 | { | 480 | { |
481 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 481 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
482 | 482 | ||
483 | return r->start == r->end; | 483 | return r->start == r->end; |
484 | } | 484 | } |
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn) | |||
490 | 490 | ||
491 | *pfn = 0; | 491 | *pfn = 0; |
492 | get_cpu(); | 492 | get_cpu(); |
493 | r = &__get_cpu_var(mce_ring); | 493 | r = this_cpu_ptr(&mce_ring); |
494 | if (r->start == r->end) | 494 | if (r->start == r->end) |
495 | goto out; | 495 | goto out; |
496 | *pfn = r->ring[r->start]; | 496 | *pfn = r->ring[r->start]; |
@@ -504,7 +504,7 @@ out: | |||
504 | /* Always runs in MCE context with preempt off */ | 504 | /* Always runs in MCE context with preempt off */ |
505 | static int mce_ring_add(unsigned long pfn) | 505 | static int mce_ring_add(unsigned long pfn) |
506 | { | 506 | { |
507 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 507 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
508 | unsigned next; | 508 | unsigned next; |
509 | 509 | ||
510 | next = (r->end + 1) % MCE_RING_SIZE; | 510 | next = (r->end + 1) % MCE_RING_SIZE; |
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c) | |||
526 | static void mce_schedule_work(void) | 526 | static void mce_schedule_work(void) |
527 | { | 527 | { |
528 | if (!mce_ring_empty()) | 528 | if (!mce_ring_empty()) |
529 | schedule_work(&__get_cpu_var(mce_work)); | 529 | schedule_work(this_cpu_ptr(&mce_work)); |
530 | } | 530 | } |
531 | 531 | ||
532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); | 532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
551 | return; | 551 | return; |
552 | } | 552 | } |
553 | 553 | ||
554 | irq_work_queue(&__get_cpu_var(mce_irq_work)); | 554 | irq_work_queue(this_cpu_ptr(&mce_irq_work)); |
555 | } | 555 | } |
556 | 556 | ||
557 | /* | 557 | /* |
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
1045 | 1045 | ||
1046 | mce_gather_info(&m, regs); | 1046 | mce_gather_info(&m, regs); |
1047 | 1047 | ||
1048 | final = &__get_cpu_var(mces_seen); | 1048 | final = this_cpu_ptr(&mces_seen); |
1049 | *final = m; | 1049 | *final = m; |
1050 | 1050 | ||
1051 | memset(valid_banks, 0, sizeof(valid_banks)); | 1051 | memset(valid_banks, 0, sizeof(valid_banks)); |
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) = | |||
1278 | 1278 | ||
1279 | static int cmc_error_seen(void) | 1279 | static int cmc_error_seen(void) |
1280 | { | 1280 | { |
1281 | unsigned long *v = &__get_cpu_var(mce_polled_error); | 1281 | unsigned long *v = this_cpu_ptr(&mce_polled_error); |
1282 | 1282 | ||
1283 | return test_and_clear_bit(0, v); | 1283 | return test_and_clear_bit(0, v); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static void mce_timer_fn(unsigned long data) | 1286 | static void mce_timer_fn(unsigned long data) |
1287 | { | 1287 | { |
1288 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1288 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1289 | unsigned long iv; | 1289 | unsigned long iv; |
1290 | int notify; | 1290 | int notify; |
1291 | 1291 | ||
1292 | WARN_ON(smp_processor_id() != data); | 1292 | WARN_ON(smp_processor_id() != data); |
1293 | 1293 | ||
1294 | if (mce_available(__this_cpu_ptr(&cpu_info))) { | 1294 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
1295 | machine_check_poll(MCP_TIMESTAMP, | 1295 | machine_check_poll(MCP_TIMESTAMP, |
1296 | &__get_cpu_var(mce_poll_banks)); | 1296 | this_cpu_ptr(&mce_poll_banks)); |
1297 | mce_intel_cmci_poll(); | 1297 | mce_intel_cmci_poll(); |
1298 | } | 1298 | } |
1299 | 1299 | ||
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data) | |||
1323 | */ | 1323 | */ |
1324 | void mce_timer_kick(unsigned long interval) | 1324 | void mce_timer_kick(unsigned long interval) |
1325 | { | 1325 | { |
1326 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1326 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1327 | unsigned long when = jiffies + interval; | 1327 | unsigned long when = jiffies + interval; |
1328 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1328 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1329 | 1329 | ||
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) | |||
1659 | 1659 | ||
1660 | static void __mcheck_cpu_init_timer(void) | 1660 | static void __mcheck_cpu_init_timer(void) |
1661 | { | 1661 | { |
1662 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1662 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1663 | unsigned int cpu = smp_processor_id(); | 1663 | unsigned int cpu = smp_processor_id(); |
1664 | 1664 | ||
1665 | setup_timer(t, mce_timer_fn, cpu); | 1665 | setup_timer(t, mce_timer_fn, cpu); |
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) | |||
1702 | __mcheck_cpu_init_generic(); | 1702 | __mcheck_cpu_init_generic(); |
1703 | __mcheck_cpu_init_vendor(c); | 1703 | __mcheck_cpu_init_vendor(c); |
1704 | __mcheck_cpu_init_timer(); | 1704 | __mcheck_cpu_init_timer(); |
1705 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | 1705 | INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); |
1706 | init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); | 1706 | init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); |
1707 | } | 1707 | } |
1708 | 1708 | ||
1709 | /* | 1709 | /* |
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = { | |||
1955 | static void __mce_disable_bank(void *arg) | 1955 | static void __mce_disable_bank(void *arg) |
1956 | { | 1956 | { |
1957 | int bank = *((int *)arg); | 1957 | int bank = *((int *)arg); |
1958 | __clear_bit(bank, __get_cpu_var(mce_poll_banks)); | 1958 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
1959 | cmci_disable_bank(bank); | 1959 | cmci_disable_bank(bank); |
1960 | } | 1960 | } |
1961 | 1961 | ||
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void) | |||
2065 | static void mce_syscore_resume(void) | 2065 | static void mce_syscore_resume(void) |
2066 | { | 2066 | { |
2067 | __mcheck_cpu_init_generic(); | 2067 | __mcheck_cpu_init_generic(); |
2068 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); | 2068 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
2069 | } | 2069 | } |
2070 | 2070 | ||
2071 | static struct syscore_ops mce_syscore_ops = { | 2071 | static struct syscore_ops mce_syscore_ops = { |
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = { | |||
2080 | 2080 | ||
2081 | static void mce_cpu_restart(void *data) | 2081 | static void mce_cpu_restart(void *data) |
2082 | { | 2082 | { |
2083 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2083 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2084 | return; | 2084 | return; |
2085 | __mcheck_cpu_init_generic(); | 2085 | __mcheck_cpu_init_generic(); |
2086 | __mcheck_cpu_init_timer(); | 2086 | __mcheck_cpu_init_timer(); |
@@ -2096,14 +2096,14 @@ static void mce_restart(void) | |||
2096 | /* Toggle features for corrected errors */ | 2096 | /* Toggle features for corrected errors */ |
2097 | static void mce_disable_cmci(void *data) | 2097 | static void mce_disable_cmci(void *data) |
2098 | { | 2098 | { |
2099 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2099 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2100 | return; | 2100 | return; |
2101 | cmci_clear(); | 2101 | cmci_clear(); |
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | static void mce_enable_ce(void *all) | 2104 | static void mce_enable_ce(void *all) |
2105 | { | 2105 | { |
2106 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2106 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2107 | return; | 2107 | return; |
2108 | cmci_reenable(); | 2108 | cmci_reenable(); |
2109 | cmci_recheck(); | 2109 | cmci_recheck(); |
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h) | |||
2336 | unsigned long action = *(unsigned long *)h; | 2336 | unsigned long action = *(unsigned long *)h; |
2337 | int i; | 2337 | int i; |
2338 | 2338 | ||
2339 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2339 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2340 | return; | 2340 | return; |
2341 | 2341 | ||
2342 | if (!(action & CPU_TASKS_FROZEN)) | 2342 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h) | |||
2354 | unsigned long action = *(unsigned long *)h; | 2354 | unsigned long action = *(unsigned long *)h; |
2355 | int i; | 2355 | int i; |
2356 | 2356 | ||
2357 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2357 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2358 | return; | 2358 | return; |
2359 | 2359 | ||
2360 | if (!(action & CPU_TASKS_FROZEN)) | 2360 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1e49f8f41276..5d4999f95aec 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void) | |||
310 | * event. | 310 | * event. |
311 | */ | 311 | */ |
312 | machine_check_poll(MCP_TIMESTAMP, | 312 | machine_check_poll(MCP_TIMESTAMP, |
313 | &__get_cpu_var(mce_poll_banks)); | 313 | this_cpu_ptr(&mce_poll_banks)); |
314 | 314 | ||
315 | if (high & MASK_OVERFLOW_HI) { | 315 | if (high & MASK_OVERFLOW_HI) { |
316 | rdmsrl(address, m.misc); | 316 | rdmsrl(address, m.misc); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..b3c97bafc123 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void) | |||
86 | { | 86 | { |
87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) | 87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) |
88 | return; | 88 | return; |
89 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 89 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
90 | } | 90 | } |
91 | 91 | ||
92 | void mce_intel_hcpu_update(unsigned long cpu) | 92 | void mce_intel_hcpu_update(unsigned long cpu) |
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void) | |||
145 | u64 val; | 145 | u64 val; |
146 | 146 | ||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
148 | owned = __get_cpu_var(mce_banks_owned); | 148 | owned = this_cpu_ptr(mce_banks_owned); |
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | 149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
151 | val &= ~MCI_CTL2_CMCI_EN; | 151 | val &= ~MCI_CTL2_CMCI_EN; |
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void) | |||
195 | { | 195 | { |
196 | if (cmci_storm_detect()) | 196 | if (cmci_storm_detect()) |
197 | return; | 197 | return; |
198 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 198 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
199 | mce_notify_irq(); | 199 | mce_notify_irq(); |
200 | } | 200 | } |
201 | 201 | ||
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void) | |||
206 | */ | 206 | */ |
207 | static void cmci_discover(int banks) | 207 | static void cmci_discover(int banks) |
208 | { | 208 | { |
209 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | 209 | unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | int i; | 211 | int i; |
212 | int bios_wrong_thresh = 0; | 212 | int bios_wrong_thresh = 0; |
@@ -228,7 +228,7 @@ static void cmci_discover(int banks) | |||
228 | /* Already owned by someone else? */ | 228 | /* Already owned by someone else? */ |
229 | if (val & MCI_CTL2_CMCI_EN) { | 229 | if (val & MCI_CTL2_CMCI_EN) { |
230 | clear_bit(i, owned); | 230 | clear_bit(i, owned); |
231 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 231 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
232 | continue; | 232 | continue; |
233 | } | 233 | } |
234 | 234 | ||
@@ -252,7 +252,7 @@ static void cmci_discover(int banks) | |||
252 | /* Did the enable bit stick? -- the bank supports CMCI */ | 252 | /* Did the enable bit stick? -- the bank supports CMCI */ |
253 | if (val & MCI_CTL2_CMCI_EN) { | 253 | if (val & MCI_CTL2_CMCI_EN) { |
254 | set_bit(i, owned); | 254 | set_bit(i, owned); |
255 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 255 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
256 | /* | 256 | /* |
257 | * We are able to set thresholds for some banks that | 257 | * We are able to set thresholds for some banks that |
258 | * had a threshold of 0. This means the BIOS has not | 258 | * had a threshold of 0. This means the BIOS has not |
@@ -263,7 +263,7 @@ static void cmci_discover(int banks) | |||
263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) | 263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) |
264 | bios_wrong_thresh = 1; | 264 | bios_wrong_thresh = 1; |
265 | } else { | 265 | } else { |
266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 266 | WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
@@ -284,10 +284,10 @@ void cmci_recheck(void) | |||
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | int banks; | 285 | int banks; |
286 | 286 | ||
287 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) | 287 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
288 | return; | 288 | return; |
289 | local_irq_save(flags); | 289 | local_irq_save(flags); |
290 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 290 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
291 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
292 | } | 292 | } |
293 | 293 | ||
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank) | |||
296 | { | 296 | { |
297 | u64 val; | 297 | u64 val; |
298 | 298 | ||
299 | if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) | 299 | if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) |
300 | return; | 300 | return; |
301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
302 | val &= ~MCI_CTL2_CMCI_EN; | 302 | val &= ~MCI_CTL2_CMCI_EN; |
303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | 303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
304 | __clear_bit(bank, __get_cpu_var(mce_banks_owned)); | 304 | __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | 307 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2879ecdaac43..5cd2b7967370 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -487,7 +487,7 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
487 | 487 | ||
488 | void x86_pmu_disable_all(void) | 488 | void x86_pmu_disable_all(void) |
489 | { | 489 | { |
490 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 490 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
491 | int idx; | 491 | int idx; |
492 | 492 | ||
493 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 493 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -505,7 +505,7 @@ void x86_pmu_disable_all(void) | |||
505 | 505 | ||
506 | static void x86_pmu_disable(struct pmu *pmu) | 506 | static void x86_pmu_disable(struct pmu *pmu) |
507 | { | 507 | { |
508 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 508 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
509 | 509 | ||
510 | if (!x86_pmu_initialized()) | 510 | if (!x86_pmu_initialized()) |
511 | return; | 511 | return; |
@@ -522,7 +522,7 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
522 | 522 | ||
523 | void x86_pmu_enable_all(int added) | 523 | void x86_pmu_enable_all(int added) |
524 | { | 524 | { |
525 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 525 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
526 | int idx; | 526 | int idx; |
527 | 527 | ||
528 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 528 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -869,7 +869,7 @@ static void x86_pmu_start(struct perf_event *event, int flags); | |||
869 | 869 | ||
870 | static void x86_pmu_enable(struct pmu *pmu) | 870 | static void x86_pmu_enable(struct pmu *pmu) |
871 | { | 871 | { |
872 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 872 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
873 | struct perf_event *event; | 873 | struct perf_event *event; |
874 | struct hw_perf_event *hwc; | 874 | struct hw_perf_event *hwc; |
875 | int i, added = cpuc->n_added; | 875 | int i, added = cpuc->n_added; |
@@ -1020,7 +1020,7 @@ void x86_pmu_enable_event(struct perf_event *event) | |||
1020 | */ | 1020 | */ |
1021 | static int x86_pmu_add(struct perf_event *event, int flags) | 1021 | static int x86_pmu_add(struct perf_event *event, int flags) |
1022 | { | 1022 | { |
1023 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1023 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1024 | struct hw_perf_event *hwc; | 1024 | struct hw_perf_event *hwc; |
1025 | int assign[X86_PMC_IDX_MAX]; | 1025 | int assign[X86_PMC_IDX_MAX]; |
1026 | int n, n0, ret; | 1026 | int n, n0, ret; |
@@ -1071,7 +1071,7 @@ out: | |||
1071 | 1071 | ||
1072 | static void x86_pmu_start(struct perf_event *event, int flags) | 1072 | static void x86_pmu_start(struct perf_event *event, int flags) |
1073 | { | 1073 | { |
1074 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1074 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1075 | int idx = event->hw.idx; | 1075 | int idx = event->hw.idx; |
1076 | 1076 | ||
1077 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 1077 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
@@ -1150,7 +1150,7 @@ void perf_event_print_debug(void) | |||
1150 | 1150 | ||
1151 | void x86_pmu_stop(struct perf_event *event, int flags) | 1151 | void x86_pmu_stop(struct perf_event *event, int flags) |
1152 | { | 1152 | { |
1153 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1153 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1154 | struct hw_perf_event *hwc = &event->hw; | 1154 | struct hw_perf_event *hwc = &event->hw; |
1155 | 1155 | ||
1156 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { | 1156 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { |
@@ -1172,7 +1172,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) | |||
1172 | 1172 | ||
1173 | static void x86_pmu_del(struct perf_event *event, int flags) | 1173 | static void x86_pmu_del(struct perf_event *event, int flags) |
1174 | { | 1174 | { |
1175 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1175 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1176 | int i; | 1176 | int i; |
1177 | 1177 | ||
1178 | /* | 1178 | /* |
@@ -1227,7 +1227,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1227 | int idx, handled = 0; | 1227 | int idx, handled = 0; |
1228 | u64 val; | 1228 | u64 val; |
1229 | 1229 | ||
1230 | cpuc = &__get_cpu_var(cpu_hw_events); | 1230 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1231 | 1231 | ||
1232 | /* | 1232 | /* |
1233 | * Some chipsets need to unmask the LVTPC in a particular spot | 1233 | * Some chipsets need to unmask the LVTPC in a particular spot |
@@ -1636,7 +1636,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) | |||
1636 | */ | 1636 | */ |
1637 | static int x86_pmu_commit_txn(struct pmu *pmu) | 1637 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1638 | { | 1638 | { |
1639 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1639 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1640 | int assign[X86_PMC_IDX_MAX]; | 1640 | int assign[X86_PMC_IDX_MAX]; |
1641 | int n, ret; | 1641 | int n, ret; |
1642 | 1642 | ||
@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
1995 | if (idx > GDT_ENTRIES) | 1995 | if (idx > GDT_ENTRIES) |
1996 | return 0; | 1996 | return 0; |
1997 | 1997 | ||
1998 | desc = __this_cpu_ptr(&gdt_page.gdt[0]); | 1998 | desc = raw_cpu_ptr(gdt_page.gdt); |
1999 | } | 1999 | } |
2000 | 2000 | ||
2001 | return get_desc_base(desc + idx); | 2001 | return get_desc_base(desc + idx); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index beeb7cc07044..28926311aac1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void) | |||
699 | 699 | ||
700 | void amd_pmu_enable_virt(void) | 700 | void amd_pmu_enable_virt(void) |
701 | { | 701 | { |
702 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 702 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
703 | 703 | ||
704 | cpuc->perf_ctr_virt_mask = 0; | 704 | cpuc->perf_ctr_virt_mask = 0; |
705 | 705 | ||
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |||
711 | 711 | ||
712 | void amd_pmu_disable_virt(void) | 712 | void amd_pmu_disable_virt(void) |
713 | { | 713 | { |
714 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 714 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
715 | 715 | ||
716 | /* | 716 | /* |
717 | * We only mask out the Host-only bit so that host-only counting works | 717 | * We only mask out the Host-only bit so that host-only counting works |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 2502d0d9d246..6f80accf137d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1045,7 +1045,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | |||
1045 | 1045 | ||
1046 | static void intel_pmu_disable_all(void) | 1046 | static void intel_pmu_disable_all(void) |
1047 | { | 1047 | { |
1048 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1048 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1049 | 1049 | ||
1050 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1050 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1051 | 1051 | ||
@@ -1058,7 +1058,7 @@ static void intel_pmu_disable_all(void) | |||
1058 | 1058 | ||
1059 | static void intel_pmu_enable_all(int added) | 1059 | static void intel_pmu_enable_all(int added) |
1060 | { | 1060 | { |
1061 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1061 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1062 | 1062 | ||
1063 | intel_pmu_pebs_enable_all(); | 1063 | intel_pmu_pebs_enable_all(); |
1064 | intel_pmu_lbr_enable_all(); | 1064 | intel_pmu_lbr_enable_all(); |
@@ -1092,7 +1092,7 @@ static void intel_pmu_enable_all(int added) | |||
1092 | */ | 1092 | */ |
1093 | static void intel_pmu_nhm_workaround(void) | 1093 | static void intel_pmu_nhm_workaround(void) |
1094 | { | 1094 | { |
1095 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1095 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1096 | static const unsigned long nhm_magic[4] = { | 1096 | static const unsigned long nhm_magic[4] = { |
1097 | 0x4300B5, | 1097 | 0x4300B5, |
1098 | 0x4300D2, | 1098 | 0x4300D2, |
@@ -1191,7 +1191,7 @@ static inline bool event_is_checkpointed(struct perf_event *event) | |||
1191 | static void intel_pmu_disable_event(struct perf_event *event) | 1191 | static void intel_pmu_disable_event(struct perf_event *event) |
1192 | { | 1192 | { |
1193 | struct hw_perf_event *hwc = &event->hw; | 1193 | struct hw_perf_event *hwc = &event->hw; |
1194 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1194 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1195 | 1195 | ||
1196 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1196 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1197 | intel_pmu_disable_bts(); | 1197 | intel_pmu_disable_bts(); |
@@ -1255,7 +1255,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) | |||
1255 | static void intel_pmu_enable_event(struct perf_event *event) | 1255 | static void intel_pmu_enable_event(struct perf_event *event) |
1256 | { | 1256 | { |
1257 | struct hw_perf_event *hwc = &event->hw; | 1257 | struct hw_perf_event *hwc = &event->hw; |
1258 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1258 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1259 | 1259 | ||
1260 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1260 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1261 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 1261 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
@@ -1349,7 +1349,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1349 | u64 status; | 1349 | u64 status; |
1350 | int handled; | 1350 | int handled; |
1351 | 1351 | ||
1352 | cpuc = &__get_cpu_var(cpu_hw_events); | 1352 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1353 | 1353 | ||
1354 | /* | 1354 | /* |
1355 | * No known reason to not always do late ACK, | 1355 | * No known reason to not always do late ACK, |
@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs); | |||
1781 | 1781 | ||
1782 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | 1782 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) |
1783 | { | 1783 | { |
1784 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1784 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1785 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1785 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1786 | 1786 | ||
1787 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; | 1787 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; |
@@ -1802,7 +1802,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | |||
1802 | 1802 | ||
1803 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) | 1803 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) |
1804 | { | 1804 | { |
1805 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1805 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1806 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1806 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1807 | int idx; | 1807 | int idx; |
1808 | 1808 | ||
@@ -1836,7 +1836,7 @@ static void core_pmu_enable_event(struct perf_event *event) | |||
1836 | 1836 | ||
1837 | static void core_pmu_enable_all(int added) | 1837 | static void core_pmu_enable_all(int added) |
1838 | { | 1838 | { |
1839 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1839 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1840 | int idx; | 1840 | int idx; |
1841 | 1841 | ||
1842 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1842 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 696ade311ded..7b786b369789 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -475,7 +475,7 @@ void intel_pmu_enable_bts(u64 config) | |||
475 | 475 | ||
476 | void intel_pmu_disable_bts(void) | 476 | void intel_pmu_disable_bts(void) |
477 | { | 477 | { |
478 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 478 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
479 | unsigned long debugctlmsr; | 479 | unsigned long debugctlmsr; |
480 | 480 | ||
481 | if (!cpuc->ds) | 481 | if (!cpuc->ds) |
@@ -492,7 +492,7 @@ void intel_pmu_disable_bts(void) | |||
492 | 492 | ||
493 | int intel_pmu_drain_bts_buffer(void) | 493 | int intel_pmu_drain_bts_buffer(void) |
494 | { | 494 | { |
495 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 495 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
496 | struct debug_store *ds = cpuc->ds; | 496 | struct debug_store *ds = cpuc->ds; |
497 | struct bts_record { | 497 | struct bts_record { |
498 | u64 from; | 498 | u64 from; |
@@ -712,7 +712,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) | |||
712 | 712 | ||
713 | void intel_pmu_pebs_enable(struct perf_event *event) | 713 | void intel_pmu_pebs_enable(struct perf_event *event) |
714 | { | 714 | { |
715 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 715 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
716 | struct hw_perf_event *hwc = &event->hw; | 716 | struct hw_perf_event *hwc = &event->hw; |
717 | 717 | ||
718 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 718 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
@@ -727,7 +727,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
727 | 727 | ||
728 | void intel_pmu_pebs_disable(struct perf_event *event) | 728 | void intel_pmu_pebs_disable(struct perf_event *event) |
729 | { | 729 | { |
730 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 730 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
731 | struct hw_perf_event *hwc = &event->hw; | 731 | struct hw_perf_event *hwc = &event->hw; |
732 | 732 | ||
733 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); | 733 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
@@ -745,7 +745,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) | |||
745 | 745 | ||
746 | void intel_pmu_pebs_enable_all(void) | 746 | void intel_pmu_pebs_enable_all(void) |
747 | { | 747 | { |
748 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 748 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
749 | 749 | ||
750 | if (cpuc->pebs_enabled) | 750 | if (cpuc->pebs_enabled) |
751 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 751 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
@@ -753,7 +753,7 @@ void intel_pmu_pebs_enable_all(void) | |||
753 | 753 | ||
754 | void intel_pmu_pebs_disable_all(void) | 754 | void intel_pmu_pebs_disable_all(void) |
755 | { | 755 | { |
756 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 756 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
757 | 757 | ||
758 | if (cpuc->pebs_enabled) | 758 | if (cpuc->pebs_enabled) |
759 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); | 759 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
@@ -761,7 +761,7 @@ void intel_pmu_pebs_disable_all(void) | |||
761 | 761 | ||
762 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | 762 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
763 | { | 763 | { |
764 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 764 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
765 | unsigned long from = cpuc->lbr_entries[0].from; | 765 | unsigned long from = cpuc->lbr_entries[0].from; |
766 | unsigned long old_to, to = cpuc->lbr_entries[0].to; | 766 | unsigned long old_to, to = cpuc->lbr_entries[0].to; |
767 | unsigned long ip = regs->ip; | 767 | unsigned long ip = regs->ip; |
@@ -868,7 +868,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
868 | * We cast to the biggest pebs_record but are careful not to | 868 | * We cast to the biggest pebs_record but are careful not to |
869 | * unconditionally access the 'extra' entries. | 869 | * unconditionally access the 'extra' entries. |
870 | */ | 870 | */ |
871 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 871 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
872 | struct pebs_record_hsw *pebs = __pebs; | 872 | struct pebs_record_hsw *pebs = __pebs; |
873 | struct perf_sample_data data; | 873 | struct perf_sample_data data; |
874 | struct pt_regs regs; | 874 | struct pt_regs regs; |
@@ -957,7 +957,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
957 | 957 | ||
958 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 958 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
959 | { | 959 | { |
960 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 960 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
961 | struct debug_store *ds = cpuc->ds; | 961 | struct debug_store *ds = cpuc->ds; |
962 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ | 962 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ |
963 | struct pebs_record_core *at, *top; | 963 | struct pebs_record_core *at, *top; |
@@ -998,7 +998,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
998 | 998 | ||
999 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 999 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
1000 | { | 1000 | { |
1001 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1001 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1002 | struct debug_store *ds = cpuc->ds; | 1002 | struct debug_store *ds = cpuc->ds; |
1003 | struct perf_event *event = NULL; | 1003 | struct perf_event *event = NULL; |
1004 | void *at, *top; | 1004 | void *at, *top; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 9dd2459a4c73..ebb0d3144551 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |||
133 | static void __intel_pmu_lbr_enable(void) | 133 | static void __intel_pmu_lbr_enable(void) |
134 | { | 134 | { |
135 | u64 debugctl; | 135 | u64 debugctl; |
136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
137 | 137 | ||
138 | if (cpuc->lbr_sel) | 138 | if (cpuc->lbr_sel) |
139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); | 139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); |
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void) | |||
183 | 183 | ||
184 | void intel_pmu_lbr_enable(struct perf_event *event) | 184 | void intel_pmu_lbr_enable(struct perf_event *event) |
185 | { | 185 | { |
186 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 186 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
187 | 187 | ||
188 | if (!x86_pmu.lbr_nr) | 188 | if (!x86_pmu.lbr_nr) |
189 | return; | 189 | return; |
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
203 | 203 | ||
204 | void intel_pmu_lbr_disable(struct perf_event *event) | 204 | void intel_pmu_lbr_disable(struct perf_event *event) |
205 | { | 205 | { |
206 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 206 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
207 | 207 | ||
208 | if (!x86_pmu.lbr_nr) | 208 | if (!x86_pmu.lbr_nr) |
209 | return; | 209 | return; |
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event) | |||
220 | 220 | ||
221 | void intel_pmu_lbr_enable_all(void) | 221 | void intel_pmu_lbr_enable_all(void) |
222 | { | 222 | { |
223 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 223 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
224 | 224 | ||
225 | if (cpuc->lbr_users) | 225 | if (cpuc->lbr_users) |
226 | __intel_pmu_lbr_enable(); | 226 | __intel_pmu_lbr_enable(); |
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void) | |||
228 | 228 | ||
229 | void intel_pmu_lbr_disable_all(void) | 229 | void intel_pmu_lbr_disable_all(void) |
230 | { | 230 | { |
231 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 231 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
232 | 232 | ||
233 | if (cpuc->lbr_users) | 233 | if (cpuc->lbr_users) |
234 | __intel_pmu_lbr_disable(); | 234 | __intel_pmu_lbr_disable(); |
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
332 | 332 | ||
333 | void intel_pmu_lbr_read(void) | 333 | void intel_pmu_lbr_read(void) |
334 | { | 334 | { |
335 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 335 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
336 | 336 | ||
337 | if (!cpuc->lbr_users) | 337 | if (!cpuc->lbr_users) |
338 | return; | 338 | return; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 619f7699487a..d64f275fe274 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v) | |||
135 | * or use ldexp(count, -32). | 135 | * or use ldexp(count, -32). |
136 | * Watts = Joules/Time delta | 136 | * Watts = Joules/Time delta |
137 | */ | 137 | */ |
138 | return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); | 138 | return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); |
139 | } | 139 | } |
140 | 140 | ||
141 | static u64 rapl_event_update(struct perf_event *event) | 141 | static u64 rapl_event_update(struct perf_event *event) |
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu) | |||
187 | 187 | ||
188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) | 188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) |
189 | { | 189 | { |
190 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 190 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
191 | struct perf_event *event; | 191 | struct perf_event *event; |
192 | unsigned long flags; | 192 | unsigned long flags; |
193 | 193 | ||
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, | |||
234 | 234 | ||
235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) | 235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) |
236 | { | 236 | { |
237 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 237 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
238 | unsigned long flags; | 238 | unsigned long flags; |
239 | 239 | ||
240 | spin_lock_irqsave(&pmu->lock, flags); | 240 | spin_lock_irqsave(&pmu->lock, flags); |
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) | |||
244 | 244 | ||
245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) | 245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) |
246 | { | 246 | { |
247 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 247 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
248 | struct hw_perf_event *hwc = &event->hw; | 248 | struct hw_perf_event *hwc = &event->hw; |
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) | |||
278 | 278 | ||
279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) | 279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) |
280 | { | 280 | { |
281 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 281 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
282 | struct hw_perf_event *hwc = &event->hw; | 282 | struct hw_perf_event *hwc = &event->hw; |
283 | unsigned long flags; | 283 | unsigned long flags; |
284 | 284 | ||
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void) | |||
696 | return -1; | 696 | return -1; |
697 | } | 697 | } |
698 | 698 | ||
699 | pmu = __get_cpu_var(rapl_pmu); | 699 | pmu = __this_cpu_read(rapl_pmu); |
700 | 700 | ||
701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," | 701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," |
702 | " API unit is 2^-32 Joules," | 702 | " API unit is 2^-32 Joules," |
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c index 838fa8772c62..5b0c232d1ee6 100644 --- a/arch/x86/kernel/cpu/perf_event_knc.c +++ b/arch/x86/kernel/cpu/perf_event_knc.c | |||
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs) | |||
217 | int bit, loops; | 217 | int bit, loops; |
218 | u64 status; | 218 | u64 status; |
219 | 219 | ||
220 | cpuc = &__get_cpu_var(cpu_hw_events); | 220 | cpuc = this_cpu_ptr(&cpu_hw_events); |
221 | 221 | ||
222 | knc_pmu_disable_all(); | 222 | knc_pmu_disable_all(); |
223 | 223 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 5d466b7d8609..f2e56783af3d 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
915 | 915 | ||
916 | static void p4_pmu_disable_all(void) | 916 | static void p4_pmu_disable_all(void) |
917 | { | 917 | { |
918 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 918 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
919 | int idx; | 919 | int idx; |
920 | 920 | ||
921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
984 | 984 | ||
985 | static void p4_pmu_enable_all(int added) | 985 | static void p4_pmu_enable_all(int added) |
986 | { | 986 | { |
987 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
988 | int idx; | 988 | int idx; |
989 | 989 | ||
990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
1004 | int idx, handled = 0; | 1004 | int idx, handled = 0; |
1005 | u64 val; | 1005 | u64 val; |
1006 | 1006 | ||
1007 | cpuc = &__get_cpu_var(cpu_hw_events); | 1007 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1008 | 1008 | ||
1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1010 | int overflow; | 1010 | int overflow; |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 5f9cf20cdb68..3d5fb509bdeb 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
108 | int i; | 108 | int i; |
109 | 109 | ||
110 | for (i = 0; i < HBP_NUM; i++) { | 110 | for (i = 0; i < HBP_NUM; i++) { |
111 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 111 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
112 | 112 | ||
113 | if (!*slot) { | 113 | if (!*slot) { |
114 | *slot = bp; | 114 | *slot = bp; |
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
122 | set_debugreg(info->address, i); | 122 | set_debugreg(info->address, i); |
123 | __this_cpu_write(cpu_debugreg[i], info->address); | 123 | __this_cpu_write(cpu_debugreg[i], info->address); |
124 | 124 | ||
125 | dr7 = &__get_cpu_var(cpu_dr7); | 125 | dr7 = this_cpu_ptr(&cpu_dr7); |
126 | *dr7 |= encode_dr7(i, info->len, info->type); | 126 | *dr7 |= encode_dr7(i, info->len, info->type); |
127 | 127 | ||
128 | set_debugreg(*dr7, 7); | 128 | set_debugreg(*dr7, 7); |
@@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
146 | int i; | 146 | int i; |
147 | 147 | ||
148 | for (i = 0; i < HBP_NUM; i++) { | 148 | for (i = 0; i < HBP_NUM; i++) { |
149 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 149 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
150 | 150 | ||
151 | if (*slot == bp) { | 151 | if (*slot == bp) { |
152 | *slot = NULL; | 152 | *slot = NULL; |
@@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | 157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) |
158 | return; | 158 | return; |
159 | 159 | ||
160 | dr7 = &__get_cpu_var(cpu_dr7); | 160 | dr7 = this_cpu_ptr(&cpu_dr7); |
161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); | 161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); |
162 | 162 | ||
163 | set_debugreg(*dr7, 7); | 163 | set_debugreg(*dr7, 7); |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 4d1c746892eb..e4b503d5558c 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
52 | regs->sp <= curbase + THREAD_SIZE) | 52 | regs->sp <= curbase + THREAD_SIZE) |
53 | return; | 53 | return; |
54 | 54 | ||
55 | irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + | 55 | irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + |
56 | STACK_TOP_MARGIN; | 56 | STACK_TOP_MARGIN; |
57 | irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); | 57 | irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); |
58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) | 58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) |
59 | return; | 59 | return; |
60 | 60 | ||
61 | oist = &__get_cpu_var(orig_ist); | 61 | oist = this_cpu_ptr(&orig_ist); |
62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; | 62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; |
63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; | 63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; |
64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) | 64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3dd8e2c4d74a..2b68102dbbeb 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -243,9 +243,9 @@ u32 kvm_read_and_reset_pf_reason(void) | |||
243 | { | 243 | { |
244 | u32 reason = 0; | 244 | u32 reason = 0; |
245 | 245 | ||
246 | if (__get_cpu_var(apf_reason).enabled) { | 246 | if (__this_cpu_read(apf_reason.enabled)) { |
247 | reason = __get_cpu_var(apf_reason).reason; | 247 | reason = __this_cpu_read(apf_reason.reason); |
248 | __get_cpu_var(apf_reason).reason = 0; | 248 | __this_cpu_write(apf_reason.reason, 0); |
249 | } | 249 | } |
250 | 250 | ||
251 | return reason; | 251 | return reason; |
@@ -318,7 +318,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |||
318 | * there's no need for lock or memory barriers. | 318 | * there's no need for lock or memory barriers. |
319 | * An optimization barrier is implied in apic write. | 319 | * An optimization barrier is implied in apic write. |
320 | */ | 320 | */ |
321 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) | 321 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) |
322 | return; | 322 | return; |
323 | apic_write(APIC_EOI, APIC_EOI_ACK); | 323 | apic_write(APIC_EOI, APIC_EOI_ACK); |
324 | } | 324 | } |
@@ -329,13 +329,13 @@ void kvm_guest_cpu_init(void) | |||
329 | return; | 329 | return; |
330 | 330 | ||
331 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | 331 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { |
332 | u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); | 332 | u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); |
333 | 333 | ||
334 | #ifdef CONFIG_PREEMPT | 334 | #ifdef CONFIG_PREEMPT |
335 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | 335 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; |
336 | #endif | 336 | #endif |
337 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); | 337 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
338 | __get_cpu_var(apf_reason).enabled = 1; | 338 | __this_cpu_write(apf_reason.enabled, 1); |
339 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 339 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
340 | smp_processor_id()); | 340 | smp_processor_id()); |
341 | } | 341 | } |
@@ -344,8 +344,8 @@ void kvm_guest_cpu_init(void) | |||
344 | unsigned long pa; | 344 | unsigned long pa; |
345 | /* Size alignment is implied but just to make it explicit. */ | 345 | /* Size alignment is implied but just to make it explicit. */ |
346 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | 346 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); |
347 | __get_cpu_var(kvm_apic_eoi) = 0; | 347 | __this_cpu_write(kvm_apic_eoi, 0); |
348 | pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) | 348 | pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) |
349 | | KVM_MSR_ENABLED; | 349 | | KVM_MSR_ENABLED; |
350 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); | 350 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
351 | } | 351 | } |
@@ -356,11 +356,11 @@ void kvm_guest_cpu_init(void) | |||
356 | 356 | ||
357 | static void kvm_pv_disable_apf(void) | 357 | static void kvm_pv_disable_apf(void) |
358 | { | 358 | { |
359 | if (!__get_cpu_var(apf_reason).enabled) | 359 | if (!__this_cpu_read(apf_reason.enabled)) |
360 | return; | 360 | return; |
361 | 361 | ||
362 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | 362 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); |
363 | __get_cpu_var(apf_reason).enabled = 0; | 363 | __this_cpu_write(apf_reason.enabled, 0); |
364 | 364 | ||
365 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | 365 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", |
366 | smp_processor_id()); | 366 | smp_processor_id()); |
@@ -716,7 +716,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | |||
716 | if (in_nmi()) | 716 | if (in_nmi()) |
717 | return; | 717 | return; |
718 | 718 | ||
719 | w = &__get_cpu_var(klock_waiting); | 719 | w = this_cpu_ptr(&klock_waiting); |
720 | cpu = smp_processor_id(); | 720 | cpu = smp_processor_id(); |
721 | start = spin_time_start(); | 721 | start = spin_time_start(); |
722 | 722 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ddf742768ecf..1b0e90658d8d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -670,7 +670,7 @@ static int svm_hardware_enable(void *garbage) | |||
670 | 670 | ||
671 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { | 671 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
672 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 672 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
673 | __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; | 673 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
674 | } | 674 | } |
675 | 675 | ||
676 | 676 | ||
@@ -1312,8 +1312,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1312 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | 1312 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
1313 | 1313 | ||
1314 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && | 1314 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && |
1315 | svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { | 1315 | svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) { |
1316 | __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; | 1316 | __this_cpu_write(current_tsc_ratio, svm->tsc_ratio); |
1317 | wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); | 1317 | wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); |
1318 | } | 1318 | } |
1319 | } | 1319 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bfe11cf124a1..36cf28a910b8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1601,7 +1601,7 @@ static void reload_tss(void) | |||
1601 | /* | 1601 | /* |
1602 | * VT restores TR but not its size. Useless. | 1602 | * VT restores TR but not its size. Useless. |
1603 | */ | 1603 | */ |
1604 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1604 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1605 | struct desc_struct *descs; | 1605 | struct desc_struct *descs; |
1606 | 1606 | ||
1607 | descs = (void *)gdt->address; | 1607 | descs = (void *)gdt->address; |
@@ -1647,7 +1647,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) | |||
1647 | 1647 | ||
1648 | static unsigned long segment_base(u16 selector) | 1648 | static unsigned long segment_base(u16 selector) |
1649 | { | 1649 | { |
1650 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1650 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1651 | struct desc_struct *d; | 1651 | struct desc_struct *d; |
1652 | unsigned long table_base; | 1652 | unsigned long table_base; |
1653 | unsigned long v; | 1653 | unsigned long v; |
@@ -1777,7 +1777,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1777 | */ | 1777 | */ |
1778 | if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) | 1778 | if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) |
1779 | stts(); | 1779 | stts(); |
1780 | load_gdt(&__get_cpu_var(host_gdt)); | 1780 | load_gdt(this_cpu_ptr(&host_gdt)); |
1781 | } | 1781 | } |
1782 | 1782 | ||
1783 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | 1783 | static void vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -1807,7 +1807,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | if (vmx->loaded_vmcs->cpu != cpu) { | 1809 | if (vmx->loaded_vmcs->cpu != cpu) { |
1810 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1810 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1811 | unsigned long sysenter_esp; | 1811 | unsigned long sysenter_esp; |
1812 | 1812 | ||
1813 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | 1813 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
@@ -2744,7 +2744,7 @@ static int hardware_enable(void *garbage) | |||
2744 | ept_sync_global(); | 2744 | ept_sync_global(); |
2745 | } | 2745 | } |
2746 | 2746 | ||
2747 | native_store_gdt(&__get_cpu_var(host_gdt)); | 2747 | native_store_gdt(this_cpu_ptr(&host_gdt)); |
2748 | 2748 | ||
2749 | return 0; | 2749 | return 0; |
2750 | } | 2750 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8f1e22d3b286..c84ee536f9a3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1556,7 +1556,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1556 | 1556 | ||
1557 | /* Keep irq disabled to prevent changes to the clock */ | 1557 | /* Keep irq disabled to prevent changes to the clock */ |
1558 | local_irq_save(flags); | 1558 | local_irq_save(flags); |
1559 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | 1559 | this_tsc_khz = __this_cpu_read(cpu_tsc_khz); |
1560 | if (unlikely(this_tsc_khz == 0)) { | 1560 | if (unlikely(this_tsc_khz == 0)) { |
1561 | local_irq_restore(flags); | 1561 | local_irq_restore(flags); |
1562 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | 1562 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index dd89a13f1051..b4f2e7e9e907 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -140,7 +140,7 @@ static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); | |||
140 | 140 | ||
141 | bool kmemcheck_active(struct pt_regs *regs) | 141 | bool kmemcheck_active(struct pt_regs *regs) |
142 | { | 142 | { |
143 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 143 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
144 | 144 | ||
145 | return data->balance > 0; | 145 | return data->balance > 0; |
146 | } | 146 | } |
@@ -148,7 +148,7 @@ bool kmemcheck_active(struct pt_regs *regs) | |||
148 | /* Save an address that needs to be shown/hidden */ | 148 | /* Save an address that needs to be shown/hidden */ |
149 | static void kmemcheck_save_addr(unsigned long addr) | 149 | static void kmemcheck_save_addr(unsigned long addr) |
150 | { | 150 | { |
151 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 151 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
152 | 152 | ||
153 | BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); | 153 | BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); |
154 | data->addr[data->n_addrs++] = addr; | 154 | data->addr[data->n_addrs++] = addr; |
@@ -156,7 +156,7 @@ static void kmemcheck_save_addr(unsigned long addr) | |||
156 | 156 | ||
157 | static unsigned int kmemcheck_show_all(void) | 157 | static unsigned int kmemcheck_show_all(void) |
158 | { | 158 | { |
159 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 159 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
160 | unsigned int i; | 160 | unsigned int i; |
161 | unsigned int n; | 161 | unsigned int n; |
162 | 162 | ||
@@ -169,7 +169,7 @@ static unsigned int kmemcheck_show_all(void) | |||
169 | 169 | ||
170 | static unsigned int kmemcheck_hide_all(void) | 170 | static unsigned int kmemcheck_hide_all(void) |
171 | { | 171 | { |
172 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 172 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
173 | unsigned int i; | 173 | unsigned int i; |
174 | unsigned int n; | 174 | unsigned int n; |
175 | 175 | ||
@@ -185,7 +185,7 @@ static unsigned int kmemcheck_hide_all(void) | |||
185 | */ | 185 | */ |
186 | void kmemcheck_show(struct pt_regs *regs) | 186 | void kmemcheck_show(struct pt_regs *regs) |
187 | { | 187 | { |
188 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 188 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
189 | 189 | ||
190 | BUG_ON(!irqs_disabled()); | 190 | BUG_ON(!irqs_disabled()); |
191 | 191 | ||
@@ -226,7 +226,7 @@ void kmemcheck_show(struct pt_regs *regs) | |||
226 | */ | 226 | */ |
227 | void kmemcheck_hide(struct pt_regs *regs) | 227 | void kmemcheck_hide(struct pt_regs *regs) |
228 | { | 228 | { |
229 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 229 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
230 | int n; | 230 | int n; |
231 | 231 | ||
232 | BUG_ON(!irqs_disabled()); | 232 | BUG_ON(!irqs_disabled()); |
@@ -528,7 +528,7 @@ static void kmemcheck_access(struct pt_regs *regs, | |||
528 | const uint8_t *insn_primary; | 528 | const uint8_t *insn_primary; |
529 | unsigned int size; | 529 | unsigned int size; |
530 | 530 | ||
531 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 531 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
532 | 532 | ||
533 | /* Recursive fault -- ouch. */ | 533 | /* Recursive fault -- ouch. */ |
534 | if (data->busy) { | 534 | if (data->busy) { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 379e8bd0deea..1d2e6392f5fa 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -64,11 +64,11 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | |||
64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) | 64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) |
65 | { | 65 | { |
66 | if (ctr_running) | 66 | if (ctr_running) |
67 | model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); | 67 | model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs)); |
68 | else if (!nmi_enabled) | 68 | else if (!nmi_enabled) |
69 | return NMI_DONE; | 69 | return NMI_DONE; |
70 | else | 70 | else |
71 | model->stop(&__get_cpu_var(cpu_msrs)); | 71 | model->stop(this_cpu_ptr(&cpu_msrs)); |
72 | return NMI_HANDLED; | 72 | return NMI_HANDLED; |
73 | } | 73 | } |
74 | 74 | ||
@@ -91,7 +91,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) | |||
91 | 91 | ||
92 | static void nmi_cpu_start(void *dummy) | 92 | static void nmi_cpu_start(void *dummy) |
93 | { | 93 | { |
94 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 94 | struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); |
95 | if (!msrs->controls) | 95 | if (!msrs->controls) |
96 | WARN_ON_ONCE(1); | 96 | WARN_ON_ONCE(1); |
97 | else | 97 | else |
@@ -111,7 +111,7 @@ static int nmi_start(void) | |||
111 | 111 | ||
112 | static void nmi_cpu_stop(void *dummy) | 112 | static void nmi_cpu_stop(void *dummy) |
113 | { | 113 | { |
114 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 114 | struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); |
115 | if (!msrs->controls) | 115 | if (!msrs->controls) |
116 | WARN_ON_ONCE(1); | 116 | WARN_ON_ONCE(1); |
117 | else | 117 | else |
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 5c86786bbfd2..a244237f3cfa 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -365,7 +365,7 @@ __setup("uvrtcevt", uv_enable_evt_rtc); | |||
365 | 365 | ||
366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) | 366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) |
367 | { | 367 | { |
368 | struct clock_event_device *ced = &__get_cpu_var(cpu_ced); | 368 | struct clock_event_device *ced = this_cpu_ptr(&cpu_ced); |
369 | 369 | ||
370 | *ced = clock_event_device_uv; | 370 | *ced = clock_event_device_uv; |
371 | ced->cpumask = cpumask_of(smp_processor_id()); | 371 | ced->cpumask = cpumask_of(smp_processor_id()); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c0cb11fb5008..2628ee556756 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -821,7 +821,7 @@ static void xen_convert_trap_info(const struct desc_ptr *desc, | |||
821 | 821 | ||
822 | void xen_copy_trap_info(struct trap_info *traps) | 822 | void xen_copy_trap_info(struct trap_info *traps) |
823 | { | 823 | { |
824 | const struct desc_ptr *desc = &__get_cpu_var(idt_desc); | 824 | const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); |
825 | 825 | ||
826 | xen_convert_trap_info(desc, traps); | 826 | xen_convert_trap_info(desc, traps); |
827 | } | 827 | } |
@@ -838,7 +838,7 @@ static void xen_load_idt(const struct desc_ptr *desc) | |||
838 | 838 | ||
839 | spin_lock(&lock); | 839 | spin_lock(&lock); |
840 | 840 | ||
841 | __get_cpu_var(idt_desc) = *desc; | 841 | memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); |
842 | 842 | ||
843 | xen_convert_trap_info(desc, traps); | 843 | xen_convert_trap_info(desc, traps); |
844 | 844 | ||
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 0d82003e76ad..ea54a08d8301 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
@@ -54,7 +54,7 @@ DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); | |||
54 | 54 | ||
55 | void xen_mc_flush(void) | 55 | void xen_mc_flush(void) |
56 | { | 56 | { |
57 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 57 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
58 | struct multicall_entry *mc; | 58 | struct multicall_entry *mc; |
59 | int ret = 0; | 59 | int ret = 0; |
60 | unsigned long flags; | 60 | unsigned long flags; |
@@ -131,7 +131,7 @@ void xen_mc_flush(void) | |||
131 | 131 | ||
132 | struct multicall_space __xen_mc_entry(size_t args) | 132 | struct multicall_space __xen_mc_entry(size_t args) |
133 | { | 133 | { |
134 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 134 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
135 | struct multicall_space ret; | 135 | struct multicall_space ret; |
136 | unsigned argidx = roundup(b->argidx, sizeof(u64)); | 136 | unsigned argidx = roundup(b->argidx, sizeof(u64)); |
137 | 137 | ||
@@ -162,7 +162,7 @@ struct multicall_space __xen_mc_entry(size_t args) | |||
162 | 162 | ||
163 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) | 163 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) |
164 | { | 164 | { |
165 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 165 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
166 | struct multicall_space ret = { NULL, NULL }; | 166 | struct multicall_space ret = { NULL, NULL }; |
167 | 167 | ||
168 | BUG_ON(preemptible()); | 168 | BUG_ON(preemptible()); |
@@ -192,7 +192,7 @@ out: | |||
192 | 192 | ||
193 | void xen_mc_callback(void (*fn)(void *), void *data) | 193 | void xen_mc_callback(void (*fn)(void *), void *data) |
194 | { | 194 | { |
195 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 195 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
196 | struct callback *cb; | 196 | struct callback *cb; |
197 | 197 | ||
198 | if (b->cbidx == MC_BATCH) { | 198 | if (b->cbidx == MC_BATCH) { |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 0ba5f3b967f0..23b45eb9a89c 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -109,7 +109,7 @@ static bool xen_pvspin = true; | |||
109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | 109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) |
110 | { | 110 | { |
111 | int irq = __this_cpu_read(lock_kicker_irq); | 111 | int irq = __this_cpu_read(lock_kicker_irq); |
112 | struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); | 112 | struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); |
113 | int cpu = smp_processor_id(); | 113 | int cpu = smp_processor_id(); |
114 | u64 start; | 114 | u64 start; |
115 | unsigned long flags; | 115 | unsigned long flags; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5718b0b58b60..a1d430b112b3 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -80,7 +80,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
80 | 80 | ||
81 | BUG_ON(preemptible()); | 81 | BUG_ON(preemptible()); |
82 | 82 | ||
83 | state = &__get_cpu_var(xen_runstate); | 83 | state = this_cpu_ptr(&xen_runstate); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * The runstate info is always updated by the hypervisor on | 86 | * The runstate info is always updated by the hypervisor on |
@@ -123,7 +123,7 @@ static void do_stolen_accounting(void) | |||
123 | 123 | ||
124 | WARN_ON(state.state != RUNSTATE_running); | 124 | WARN_ON(state.state != RUNSTATE_running); |
125 | 125 | ||
126 | snap = &__get_cpu_var(xen_runstate_snapshot); | 126 | snap = this_cpu_ptr(&xen_runstate_snapshot); |
127 | 127 | ||
128 | /* work out how much time the VCPU has not been runn*ing* */ | 128 | /* work out how much time the VCPU has not been runn*ing* */ |
129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; | 129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; |
@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void) | |||
158 | cycle_t ret; | 158 | cycle_t ret; |
159 | 159 | ||
160 | preempt_disable_notrace(); | 160 | preempt_disable_notrace(); |
161 | src = &__get_cpu_var(xen_vcpu)->time; | 161 | src = this_cpu_ptr(&xen_vcpu->time); |
162 | ret = pvclock_clocksource_read(src); | 162 | ret = pvclock_clocksource_read(src); |
163 | preempt_enable_notrace(); | 163 | preempt_enable_notrace(); |
164 | return ret; | 164 | return ret; |
@@ -397,7 +397,7 @@ static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt. | |||
397 | 397 | ||
398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) | 398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) |
399 | { | 399 | { |
400 | struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt; | 400 | struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt); |
401 | irqreturn_t ret; | 401 | irqreturn_t ret; |
402 | 402 | ||
403 | ret = IRQ_NONE; | 403 | ret = IRQ_NONE; |
@@ -460,7 +460,7 @@ void xen_setup_cpu_clockevents(void) | |||
460 | { | 460 | { |
461 | BUG_ON(preemptible()); | 461 | BUG_ON(preemptible()); |
462 | 462 | ||
463 | clockevents_register_device(&__get_cpu_var(xen_clock_events).evt); | 463 | clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); |
464 | } | 464 | } |
465 | 465 | ||
466 | void xen_timer_resume(void) | 466 | void xen_timer_resume(void) |