diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-15 01:48:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-15 01:48:18 -0400 |
commit | 0429fbc0bdc297d64188483ba029a23773ae07b0 (patch) | |
tree | 67de46978c90f37540dd6ded1db20eb53a569030 | |
parent | 6929c358972facf2999f8768815c40dd88514fc2 (diff) | |
parent | 513d1a2884a49654f368b5fa25ef186e976bdada (diff) |
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo:
"Way back, before the current percpu allocator was implemented, static
and dynamic percpu memory areas were allocated and handled separately
and had their own accessors. The distinction has been gone for many
years now; however, the now duplicate two sets of accessors remained
with the pointer based ones - this_cpu_*() - evolving various other
operations over time. During the process, we also accumulated other
inconsistent operations.
This pull request contains Christoph's patches to clean up the
duplicate accessor situation. __get_cpu_var() uses are replaced with
with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr().
Unfortunately, the former sometimes is tricky thanks to C being a bit
messy with the distinction between lvalues and pointers, which led to
a rather ugly solution for cpumask_var_t involving the introduction of
this_cpu_cpumask_var_ptr().
This converts most of the uses but not all. Christoph will follow up
with the remaining conversions in this merge window and hopefully
remove the obsolete accessors"
* 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits)
irqchip: Properly fetch the per cpu offset
percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix
ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write.
percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t
Revert "powerpc: Replace __get_cpu_var uses"
percpu: Remove __this_cpu_ptr
clocksource: Replace __this_cpu_ptr with raw_cpu_ptr
sparc: Replace __get_cpu_var uses
avr32: Replace __get_cpu_var with __this_cpu_write
blackfin: Replace __get_cpu_var uses
tile: Use this_cpu_ptr() for hardware counters
tile: Replace __get_cpu_var uses
powerpc: Replace __get_cpu_var uses
alpha: Replace __get_cpu_var
ia64: Replace __get_cpu_var uses
s390: cio driver &__get_cpu_var replacements
s390: Replace __get_cpu_var uses
mips: Replace __get_cpu_var uses
MIPS: Replace __get_cpu_var uses in FPU emulator.
arm: Replace __this_cpu_ptr with raw_cpu_ptr
...
149 files changed, 560 insertions, 547 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index c52e7f0ee5f6..5c218aa3f3df 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -431,7 +431,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
431 | */ | 431 | */ |
432 | static int alpha_pmu_add(struct perf_event *event, int flags) | 432 | static int alpha_pmu_add(struct perf_event *event, int flags) |
433 | { | 433 | { |
434 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 434 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
435 | struct hw_perf_event *hwc = &event->hw; | 435 | struct hw_perf_event *hwc = &event->hw; |
436 | int n0; | 436 | int n0; |
437 | int ret; | 437 | int ret; |
@@ -483,7 +483,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags) | |||
483 | */ | 483 | */ |
484 | static void alpha_pmu_del(struct perf_event *event, int flags) | 484 | static void alpha_pmu_del(struct perf_event *event, int flags) |
485 | { | 485 | { |
486 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 486 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
487 | struct hw_perf_event *hwc = &event->hw; | 487 | struct hw_perf_event *hwc = &event->hw; |
488 | unsigned long irq_flags; | 488 | unsigned long irq_flags; |
489 | int j; | 489 | int j; |
@@ -531,7 +531,7 @@ static void alpha_pmu_read(struct perf_event *event) | |||
531 | static void alpha_pmu_stop(struct perf_event *event, int flags) | 531 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
532 | { | 532 | { |
533 | struct hw_perf_event *hwc = &event->hw; | 533 | struct hw_perf_event *hwc = &event->hw; |
534 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 534 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
535 | 535 | ||
536 | if (!(hwc->state & PERF_HES_STOPPED)) { | 536 | if (!(hwc->state & PERF_HES_STOPPED)) { |
537 | cpuc->idx_mask &= ~(1UL<<hwc->idx); | 537 | cpuc->idx_mask &= ~(1UL<<hwc->idx); |
@@ -551,7 +551,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags) | |||
551 | static void alpha_pmu_start(struct perf_event *event, int flags) | 551 | static void alpha_pmu_start(struct perf_event *event, int flags) |
552 | { | 552 | { |
553 | struct hw_perf_event *hwc = &event->hw; | 553 | struct hw_perf_event *hwc = &event->hw; |
554 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 554 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
555 | 555 | ||
556 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | 556 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
557 | return; | 557 | return; |
@@ -724,7 +724,7 @@ static int alpha_pmu_event_init(struct perf_event *event) | |||
724 | */ | 724 | */ |
725 | static void alpha_pmu_enable(struct pmu *pmu) | 725 | static void alpha_pmu_enable(struct pmu *pmu) |
726 | { | 726 | { |
727 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 727 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
728 | 728 | ||
729 | if (cpuc->enabled) | 729 | if (cpuc->enabled) |
730 | return; | 730 | return; |
@@ -750,7 +750,7 @@ static void alpha_pmu_enable(struct pmu *pmu) | |||
750 | 750 | ||
751 | static void alpha_pmu_disable(struct pmu *pmu) | 751 | static void alpha_pmu_disable(struct pmu *pmu) |
752 | { | 752 | { |
753 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 753 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
754 | 754 | ||
755 | if (!cpuc->enabled) | 755 | if (!cpuc->enabled) |
756 | return; | 756 | return; |
@@ -814,8 +814,8 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
814 | struct hw_perf_event *hwc; | 814 | struct hw_perf_event *hwc; |
815 | int idx, j; | 815 | int idx, j; |
816 | 816 | ||
817 | __get_cpu_var(irq_pmi_count)++; | 817 | __this_cpu_inc(irq_pmi_count); |
818 | cpuc = &__get_cpu_var(cpu_hw_events); | 818 | cpuc = this_cpu_ptr(&cpu_hw_events); |
819 | 819 | ||
820 | /* Completely counting through the PMC's period to trigger a new PMC | 820 | /* Completely counting through the PMC's period to trigger a new PMC |
821 | * overflow interrupt while in this interrupt routine is utterly | 821 | * overflow interrupt while in this interrupt routine is utterly |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index ee39cee8064c..643a9dcdf093 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -56,9 +56,9 @@ unsigned long est_cycle_freq; | |||
56 | 56 | ||
57 | DEFINE_PER_CPU(u8, irq_work_pending); | 57 | DEFINE_PER_CPU(u8, irq_work_pending); |
58 | 58 | ||
59 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 | 59 | #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) |
60 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) | 60 | #define test_irq_work_pending() __this_cpu_read(irq_work_pending) |
61 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 | 61 | #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) |
62 | 62 | ||
63 | void arch_irq_work_raise(void) | 63 | void arch_irq_work_raise(void) |
64 | { | 64 | { |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index dfc32130bc44..93090213c71c 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -92,7 +92,7 @@ static int twd_timer_ack(void) | |||
92 | 92 | ||
93 | static void twd_timer_stop(void) | 93 | static void twd_timer_stop(void) |
94 | { | 94 | { |
95 | struct clock_event_device *clk = __this_cpu_ptr(twd_evt); | 95 | struct clock_event_device *clk = raw_cpu_ptr(twd_evt); |
96 | 96 | ||
97 | twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); | 97 | twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); |
98 | disable_percpu_irq(clk->irq); | 98 | disable_percpu_irq(clk->irq); |
@@ -108,7 +108,7 @@ static void twd_update_frequency(void *new_rate) | |||
108 | { | 108 | { |
109 | twd_timer_rate = *((unsigned long *) new_rate); | 109 | twd_timer_rate = *((unsigned long *) new_rate); |
110 | 110 | ||
111 | clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate); | 111 | clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate); |
112 | } | 112 | } |
113 | 113 | ||
114 | static int twd_rate_change(struct notifier_block *nb, | 114 | static int twd_rate_change(struct notifier_block *nb, |
@@ -134,7 +134,7 @@ static struct notifier_block twd_clk_nb = { | |||
134 | 134 | ||
135 | static int twd_clk_init(void) | 135 | static int twd_clk_init(void) |
136 | { | 136 | { |
137 | if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) | 137 | if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) |
138 | return clk_notifier_register(twd_clk, &twd_clk_nb); | 138 | return clk_notifier_register(twd_clk, &twd_clk_nb); |
139 | 139 | ||
140 | return 0; | 140 | return 0; |
@@ -153,7 +153,7 @@ static void twd_update_frequency(void *data) | |||
153 | { | 153 | { |
154 | twd_timer_rate = clk_get_rate(twd_clk); | 154 | twd_timer_rate = clk_get_rate(twd_clk); |
155 | 155 | ||
156 | clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate); | 156 | clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate); |
157 | } | 157 | } |
158 | 158 | ||
159 | static int twd_cpufreq_transition(struct notifier_block *nb, | 159 | static int twd_cpufreq_transition(struct notifier_block *nb, |
@@ -179,7 +179,7 @@ static struct notifier_block twd_cpufreq_nb = { | |||
179 | 179 | ||
180 | static int twd_cpufreq_init(void) | 180 | static int twd_cpufreq_init(void) |
181 | { | 181 | { |
182 | if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) | 182 | if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) |
183 | return cpufreq_register_notifier(&twd_cpufreq_nb, | 183 | return cpufreq_register_notifier(&twd_cpufreq_nb, |
184 | CPUFREQ_TRANSITION_NOTIFIER); | 184 | CPUFREQ_TRANSITION_NOTIFIER); |
185 | 185 | ||
@@ -269,7 +269,7 @@ static void twd_get_clock(struct device_node *np) | |||
269 | */ | 269 | */ |
270 | static void twd_timer_setup(void) | 270 | static void twd_timer_setup(void) |
271 | { | 271 | { |
272 | struct clock_event_device *clk = __this_cpu_ptr(twd_evt); | 272 | struct clock_event_device *clk = raw_cpu_ptr(twd_evt); |
273 | int cpu = smp_processor_id(); | 273 | int cpu = smp_processor_id(); |
274 | 274 | ||
275 | /* | 275 | /* |
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c index f820e9f25520..a94ece4a72c8 100644 --- a/arch/avr32/kernel/kprobes.c +++ b/arch/avr32/kernel/kprobes.c | |||
@@ -104,7 +104,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
104 | 104 | ||
105 | static void __kprobes set_current_kprobe(struct kprobe *p) | 105 | static void __kprobes set_current_kprobe(struct kprobe *p) |
106 | { | 106 | { |
107 | __get_cpu_var(current_kprobe) = p; | 107 | __this_cpu_write(current_kprobe, p); |
108 | } | 108 | } |
109 | 109 | ||
110 | static int __kprobes kprobe_handler(struct pt_regs *regs) | 110 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h index 17b5e92e3bc6..fe1160fbff91 100644 --- a/arch/blackfin/include/asm/ipipe.h +++ b/arch/blackfin/include/asm/ipipe.h | |||
@@ -157,7 +157,7 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | #define __ipipe_do_root_xirq(ipd, irq) \ | 159 | #define __ipipe_do_root_xirq(ipd, irq) \ |
160 | ((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs))) | 160 | ((ipd)->irqs[irq].handler(irq, raw_cpu_ptr(&__ipipe_tick_regs))) |
161 | 161 | ||
162 | #define __ipipe_run_irqtail(irq) /* Must be a macro */ \ | 162 | #define __ipipe_run_irqtail(irq) /* Must be a macro */ \ |
163 | do { \ | 163 | do { \ |
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c index ea2032013cc2..1e9c8b0bf486 100644 --- a/arch/blackfin/kernel/perf_event.c +++ b/arch/blackfin/kernel/perf_event.c | |||
@@ -300,7 +300,7 @@ again: | |||
300 | 300 | ||
301 | static void bfin_pmu_stop(struct perf_event *event, int flags) | 301 | static void bfin_pmu_stop(struct perf_event *event, int flags) |
302 | { | 302 | { |
303 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 303 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
304 | struct hw_perf_event *hwc = &event->hw; | 304 | struct hw_perf_event *hwc = &event->hw; |
305 | int idx = hwc->idx; | 305 | int idx = hwc->idx; |
306 | 306 | ||
@@ -318,7 +318,7 @@ static void bfin_pmu_stop(struct perf_event *event, int flags) | |||
318 | 318 | ||
319 | static void bfin_pmu_start(struct perf_event *event, int flags) | 319 | static void bfin_pmu_start(struct perf_event *event, int flags) |
320 | { | 320 | { |
321 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 321 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
322 | struct hw_perf_event *hwc = &event->hw; | 322 | struct hw_perf_event *hwc = &event->hw; |
323 | int idx = hwc->idx; | 323 | int idx = hwc->idx; |
324 | 324 | ||
@@ -335,7 +335,7 @@ static void bfin_pmu_start(struct perf_event *event, int flags) | |||
335 | 335 | ||
336 | static void bfin_pmu_del(struct perf_event *event, int flags) | 336 | static void bfin_pmu_del(struct perf_event *event, int flags) |
337 | { | 337 | { |
338 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 338 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
339 | 339 | ||
340 | bfin_pmu_stop(event, PERF_EF_UPDATE); | 340 | bfin_pmu_stop(event, PERF_EF_UPDATE); |
341 | __clear_bit(event->hw.idx, cpuc->used_mask); | 341 | __clear_bit(event->hw.idx, cpuc->used_mask); |
@@ -345,7 +345,7 @@ static void bfin_pmu_del(struct perf_event *event, int flags) | |||
345 | 345 | ||
346 | static int bfin_pmu_add(struct perf_event *event, int flags) | 346 | static int bfin_pmu_add(struct perf_event *event, int flags) |
347 | { | 347 | { |
348 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 348 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
349 | struct hw_perf_event *hwc = &event->hw; | 349 | struct hw_perf_event *hwc = &event->hw; |
350 | int idx = hwc->idx; | 350 | int idx = hwc->idx; |
351 | int ret = -EAGAIN; | 351 | int ret = -EAGAIN; |
@@ -421,7 +421,7 @@ static int bfin_pmu_event_init(struct perf_event *event) | |||
421 | 421 | ||
422 | static void bfin_pmu_enable(struct pmu *pmu) | 422 | static void bfin_pmu_enable(struct pmu *pmu) |
423 | { | 423 | { |
424 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 424 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
425 | struct perf_event *event; | 425 | struct perf_event *event; |
426 | struct hw_perf_event *hwc; | 426 | struct hw_perf_event *hwc; |
427 | int i; | 427 | int i; |
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 694619365265..dd2af74aff80 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -1309,12 +1309,12 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1309 | bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ | 1309 | bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ |
1310 | #endif | 1310 | #endif |
1311 | /* This is basically what we need from the register frame. */ | 1311 | /* This is basically what we need from the register frame. */ |
1312 | __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend; | 1312 | __this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend); |
1313 | __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc; | 1313 | __this_cpu_write(__ipipe_tick_regs.pc, regs->pc); |
1314 | if (this_domain != ipipe_root_domain) | 1314 | if (this_domain != ipipe_root_domain) |
1315 | __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10; | 1315 | __this_cpu_and(__ipipe_tick_regs.ipend, ~0x10); |
1316 | else | 1316 | else |
1317 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; | 1317 | __this_cpu_or(__ipipe_tick_regs.ipend, 0x10); |
1318 | } | 1318 | } |
1319 | 1319 | ||
1320 | /* | 1320 | /* |
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index ba6c30d8534d..8ad3e90cc8fc 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -146,7 +146,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) | |||
146 | platform_clear_ipi(cpu, IRQ_SUPPLE_1); | 146 | platform_clear_ipi(cpu, IRQ_SUPPLE_1); |
147 | 147 | ||
148 | smp_rmb(); | 148 | smp_rmb(); |
149 | bfin_ipi_data = &__get_cpu_var(bfin_ipi); | 149 | bfin_ipi_data = this_cpu_ptr(&bfin_ipi); |
150 | while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) { | 150 | while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) { |
151 | msg = 0; | 151 | msg = 0; |
152 | do { | 152 | do { |
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index 029bab36cd91..668786e84af8 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h | |||
@@ -159,7 +159,7 @@ static inline ia64_vector __ia64_irq_to_vector(int irq) | |||
159 | static inline unsigned int | 159 | static inline unsigned int |
160 | __ia64_local_vector_to_irq (ia64_vector vec) | 160 | __ia64_local_vector_to_irq (ia64_vector vec) |
161 | { | 161 | { |
162 | return __get_cpu_var(vector_irq)[vec]; | 162 | return __this_cpu_read(vector_irq[vec]); |
163 | } | 163 | } |
164 | #endif | 164 | #endif |
165 | 165 | ||
diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h index 7caa1f44cd95..31eb784866f8 100644 --- a/arch/ia64/include/asm/sn/arch.h +++ b/arch/ia64/include/asm/sn/arch.h | |||
@@ -57,7 +57,7 @@ struct sn_hub_info_s { | |||
57 | u16 nasid_bitmask; | 57 | u16 nasid_bitmask; |
58 | }; | 58 | }; |
59 | DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | 59 | DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); |
60 | #define sn_hub_info (&__get_cpu_var(__sn_hub_info)) | 60 | #define sn_hub_info this_cpu_ptr(&__sn_hub_info) |
61 | #define is_shub2() (sn_hub_info->shub2) | 61 | #define is_shub2() (sn_hub_info->shub2) |
62 | #define is_shub1() (sn_hub_info->shub2 == 0) | 62 | #define is_shub1() (sn_hub_info->shub2 == 0) |
63 | 63 | ||
@@ -72,7 +72,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | |||
72 | * cpu. | 72 | * cpu. |
73 | */ | 73 | */ |
74 | DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); | 74 | DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); |
75 | #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) | 75 | #define sn_cnodeid_to_nasid this_cpu_ptr(&__sn_cnodeid_to_nasid[0]) |
76 | 76 | ||
77 | 77 | ||
78 | extern u8 sn_partition_id; | 78 | extern u8 sn_partition_id; |
diff --git a/arch/ia64/include/asm/sn/nodepda.h b/arch/ia64/include/asm/sn/nodepda.h index ee118b901de4..7c8b4710f071 100644 --- a/arch/ia64/include/asm/sn/nodepda.h +++ b/arch/ia64/include/asm/sn/nodepda.h | |||
@@ -70,7 +70,7 @@ typedef struct nodepda_s nodepda_t; | |||
70 | */ | 70 | */ |
71 | 71 | ||
72 | DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); | 72 | DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); |
73 | #define sn_nodepda (__get_cpu_var(__sn_nodepda)) | 73 | #define sn_nodepda __this_cpu_read(__sn_nodepda) |
74 | #define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) | 74 | #define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) |
75 | 75 | ||
76 | /* | 76 | /* |
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h index d38c7ea5eea5..e8f3585e7e7a 100644 --- a/arch/ia64/include/asm/switch_to.h +++ b/arch/ia64/include/asm/switch_to.h | |||
@@ -32,7 +32,7 @@ extern void ia64_load_extra (struct task_struct *task); | |||
32 | 32 | ||
33 | #ifdef CONFIG_PERFMON | 33 | #ifdef CONFIG_PERFMON |
34 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | 34 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); |
35 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | 35 | # define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1) |
36 | #else | 36 | #else |
37 | # define PERFMON_IS_SYSWIDE() (0) | 37 | # define PERFMON_IS_SYSWIDE() (0) |
38 | #endif | 38 | #endif |
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h index 53e9dfacd073..2a88c7204e52 100644 --- a/arch/ia64/include/asm/uv/uv_hub.h +++ b/arch/ia64/include/asm/uv/uv_hub.h | |||
@@ -108,7 +108,7 @@ struct uv_hub_info_s { | |||
108 | unsigned char n_val; | 108 | unsigned char n_val; |
109 | }; | 109 | }; |
110 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 110 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
111 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | 111 | #define uv_hub_info this_cpu_ptr(&__uv_hub_info) |
112 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | 112 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) |
113 | 113 | ||
114 | /* | 114 | /* |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index f2c418281130..812a1e6b3179 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -42,7 +42,7 @@ ia64_vector __ia64_irq_to_vector(int irq) | |||
42 | 42 | ||
43 | unsigned int __ia64_local_vector_to_irq (ia64_vector vec) | 43 | unsigned int __ia64_local_vector_to_irq (ia64_vector vec) |
44 | { | 44 | { |
45 | return __get_cpu_var(vector_irq)[vec]; | 45 | return __this_cpu_read(vector_irq[vec]); |
46 | } | 46 | } |
47 | #endif | 47 | #endif |
48 | 48 | ||
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 03ea78ed64a9..698d8fefde6c 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -330,7 +330,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
330 | int irq; | 330 | int irq; |
331 | struct irq_desc *desc; | 331 | struct irq_desc *desc; |
332 | struct irq_cfg *cfg; | 332 | struct irq_cfg *cfg; |
333 | irq = __get_cpu_var(vector_irq)[vector]; | 333 | irq = __this_cpu_read(vector_irq[vector]); |
334 | if (irq < 0) | 334 | if (irq < 0) |
335 | continue; | 335 | continue; |
336 | 336 | ||
@@ -344,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
344 | goto unlock; | 344 | goto unlock; |
345 | 345 | ||
346 | spin_lock_irqsave(&vector_lock, flags); | 346 | spin_lock_irqsave(&vector_lock, flags); |
347 | __get_cpu_var(vector_irq)[vector] = -1; | 347 | __this_cpu_write(vector_irq[vector], -1); |
348 | cpu_clear(me, vector_table[vector]); | 348 | cpu_clear(me, vector_table[vector]); |
349 | spin_unlock_irqrestore(&vector_lock, flags); | 349 | spin_unlock_irqrestore(&vector_lock, flags); |
350 | cfg->move_cleanup_count--; | 350 | cfg->move_cleanup_count--; |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 074fde49c9e6..c7c51445c3be 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -396,7 +396,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
396 | { | 396 | { |
397 | unsigned int i; | 397 | unsigned int i; |
398 | i = atomic_read(&kcb->prev_kprobe_index); | 398 | i = atomic_read(&kcb->prev_kprobe_index); |
399 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp; | 399 | __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp); |
400 | kcb->kprobe_status = kcb->prev_kprobe[i-1].status; | 400 | kcb->kprobe_status = kcb->prev_kprobe[i-1].status; |
401 | atomic_sub(1, &kcb->prev_kprobe_index); | 401 | atomic_sub(1, &kcb->prev_kprobe_index); |
402 | } | 402 | } |
@@ -404,7 +404,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
404 | static void __kprobes set_current_kprobe(struct kprobe *p, | 404 | static void __kprobes set_current_kprobe(struct kprobe *p, |
405 | struct kprobe_ctlblk *kcb) | 405 | struct kprobe_ctlblk *kcb) |
406 | { | 406 | { |
407 | __get_cpu_var(current_kprobe) = p; | 407 | __this_cpu_write(current_kprobe, p); |
408 | } | 408 | } |
409 | 409 | ||
410 | static void kretprobe_trampoline(void) | 410 | static void kretprobe_trampoline(void) |
@@ -823,7 +823,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
823 | /* | 823 | /* |
824 | * jprobe instrumented function just completed | 824 | * jprobe instrumented function just completed |
825 | */ | 825 | */ |
826 | p = __get_cpu_var(current_kprobe); | 826 | p = __this_cpu_read(current_kprobe); |
827 | if (p->break_handler && p->break_handler(p, regs)) { | 827 | if (p->break_handler && p->break_handler(p, regs)) { |
828 | goto ss_probe; | 828 | goto ss_probe; |
829 | } | 829 | } |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index db7b36bb068b..8bfd36af46f8 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1341,7 +1341,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1341 | ia64_mlogbuf_finish(1); | 1341 | ia64_mlogbuf_finish(1); |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | if (__get_cpu_var(ia64_mca_tr_reload)) { | 1344 | if (__this_cpu_read(ia64_mca_tr_reload)) { |
1345 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ | 1345 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ |
1346 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ | 1346 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ |
1347 | } | 1347 | } |
@@ -1868,14 +1868,14 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1868 | "MCA", cpu); | 1868 | "MCA", cpu); |
1869 | format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), | 1869 | format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), |
1870 | "INIT", cpu); | 1870 | "INIT", cpu); |
1871 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data); | 1871 | __this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data))); |
1872 | 1872 | ||
1873 | /* | 1873 | /* |
1874 | * Stash away a copy of the PTE needed to map the per-CPU page. | 1874 | * Stash away a copy of the PTE needed to map the per-CPU page. |
1875 | * We may need it during MCA recovery. | 1875 | * We may need it during MCA recovery. |
1876 | */ | 1876 | */ |
1877 | __get_cpu_var(ia64_mca_per_cpu_pte) = | 1877 | __this_cpu_write(ia64_mca_per_cpu_pte, |
1878 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); | 1878 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL))); |
1879 | 1879 | ||
1880 | /* | 1880 | /* |
1881 | * Also, stash away a copy of the PAL address and the PTE | 1881 | * Also, stash away a copy of the PAL address and the PTE |
@@ -1884,10 +1884,10 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1884 | pal_vaddr = efi_get_pal_addr(); | 1884 | pal_vaddr = efi_get_pal_addr(); |
1885 | if (!pal_vaddr) | 1885 | if (!pal_vaddr) |
1886 | return; | 1886 | return; |
1887 | __get_cpu_var(ia64_mca_pal_base) = | 1887 | __this_cpu_write(ia64_mca_pal_base, |
1888 | GRANULEROUNDDOWN((unsigned long) pal_vaddr); | 1888 | GRANULEROUNDDOWN((unsigned long) pal_vaddr)); |
1889 | __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), | 1889 | __this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr), |
1890 | PAGE_KERNEL)); | 1890 | PAGE_KERNEL))); |
1891 | } | 1891 | } |
1892 | 1892 | ||
1893 | static void ia64_mca_cmc_vector_adjust(void *dummy) | 1893 | static void ia64_mca_cmc_vector_adjust(void *dummy) |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index deed6fa96bb0..b51514957620 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -215,7 +215,7 @@ static inline void play_dead(void) | |||
215 | unsigned int this_cpu = smp_processor_id(); | 215 | unsigned int this_cpu = smp_processor_id(); |
216 | 216 | ||
217 | /* Ack it */ | 217 | /* Ack it */ |
218 | __get_cpu_var(cpu_state) = CPU_DEAD; | 218 | __this_cpu_write(cpu_state, CPU_DEAD); |
219 | 219 | ||
220 | max_xtp(); | 220 | max_xtp(); |
221 | local_irq_disable(); | 221 | local_irq_disable(); |
@@ -273,7 +273,7 @@ ia64_save_extra (struct task_struct *task) | |||
273 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) | 273 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) |
274 | pfm_save_regs(task); | 274 | pfm_save_regs(task); |
275 | 275 | ||
276 | info = __get_cpu_var(pfm_syst_info); | 276 | info = __this_cpu_read(pfm_syst_info); |
277 | if (info & PFM_CPUINFO_SYST_WIDE) | 277 | if (info & PFM_CPUINFO_SYST_WIDE) |
278 | pfm_syst_wide_update_task(task, info, 0); | 278 | pfm_syst_wide_update_task(task, info, 0); |
279 | #endif | 279 | #endif |
@@ -293,7 +293,7 @@ ia64_load_extra (struct task_struct *task) | |||
293 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) | 293 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) |
294 | pfm_load_regs(task); | 294 | pfm_load_regs(task); |
295 | 295 | ||
296 | info = __get_cpu_var(pfm_syst_info); | 296 | info = __this_cpu_read(pfm_syst_info); |
297 | if (info & PFM_CPUINFO_SYST_WIDE) | 297 | if (info & PFM_CPUINFO_SYST_WIDE) |
298 | pfm_syst_wide_update_task(task, info, 1); | 298 | pfm_syst_wide_update_task(task, info, 1); |
299 | #endif | 299 | #endif |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index d3636e67a98e..6f7d4a4dcf24 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -299,7 +299,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) | |||
299 | 299 | ||
300 | if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { | 300 | if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { |
301 | unsigned long count, current_jiffies = jiffies; | 301 | unsigned long count, current_jiffies = jiffies; |
302 | struct fpu_swa_msg *cp = &__get_cpu_var(cpulast); | 302 | struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast); |
303 | 303 | ||
304 | if (unlikely(current_jiffies > cp->time)) | 304 | if (unlikely(current_jiffies > cp->time)) |
305 | cp->count = 0; | 305 | cp->count = 0; |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 36182c84363c..5f6b6b48c1d5 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -629,7 +629,7 @@ void sn_cpu_init(void) | |||
629 | 629 | ||
630 | cnode = nasid_to_cnodeid(nasid); | 630 | cnode = nasid_to_cnodeid(nasid); |
631 | 631 | ||
632 | sn_nodepda = nodepdaindr[cnode]; | 632 | __this_cpu_write(__sn_nodepda, nodepdaindr[cnode]); |
633 | 633 | ||
634 | pda->led_address = | 634 | pda->led_address = |
635 | (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); | 635 | (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 68c845411624..f9c8d9fc5939 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -134,8 +134,8 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) | |||
134 | itc = ia64_get_itc(); | 134 | itc = ia64_get_itc(); |
135 | smp_flush_tlb_cpumask(*mm_cpumask(mm)); | 135 | smp_flush_tlb_cpumask(*mm_cpumask(mm)); |
136 | itc = ia64_get_itc() - itc; | 136 | itc = ia64_get_itc() - itc; |
137 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; | 137 | __this_cpu_add(ptcstats.shub_ipi_flushes_itc_clocks, itc); |
138 | __get_cpu_var(ptcstats).shub_ipi_flushes++; | 138 | __this_cpu_inc(ptcstats.shub_ipi_flushes); |
139 | } | 139 | } |
140 | 140 | ||
141 | /** | 141 | /** |
@@ -199,14 +199,14 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
199 | start += (1UL << nbits); | 199 | start += (1UL << nbits); |
200 | } while (start < end); | 200 | } while (start < end); |
201 | ia64_srlz_i(); | 201 | ia64_srlz_i(); |
202 | __get_cpu_var(ptcstats).ptc_l++; | 202 | __this_cpu_inc(ptcstats.ptc_l); |
203 | preempt_enable(); | 203 | preempt_enable(); |
204 | return; | 204 | return; |
205 | } | 205 | } |
206 | 206 | ||
207 | if (atomic_read(&mm->mm_users) == 1 && mymm) { | 207 | if (atomic_read(&mm->mm_users) == 1 && mymm) { |
208 | flush_tlb_mm(mm); | 208 | flush_tlb_mm(mm); |
209 | __get_cpu_var(ptcstats).change_rid++; | 209 | __this_cpu_inc(ptcstats.change_rid); |
210 | preempt_enable(); | 210 | preempt_enable(); |
211 | return; | 211 | return; |
212 | } | 212 | } |
@@ -250,11 +250,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
250 | spin_lock_irqsave(PTC_LOCK(shub1), flags); | 250 | spin_lock_irqsave(PTC_LOCK(shub1), flags); |
251 | itc2 = ia64_get_itc(); | 251 | itc2 = ia64_get_itc(); |
252 | 252 | ||
253 | __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; | 253 | __this_cpu_add(ptcstats.lock_itc_clocks, itc2 - itc); |
254 | __get_cpu_var(ptcstats).shub_ptc_flushes++; | 254 | __this_cpu_inc(ptcstats.shub_ptc_flushes); |
255 | __get_cpu_var(ptcstats).nodes_flushed += nix; | 255 | __this_cpu_add(ptcstats.nodes_flushed, nix); |
256 | if (!mymm) | 256 | if (!mymm) |
257 | __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; | 257 | __this_cpu_inc(ptcstats.shub_ptc_flushes_not_my_mm); |
258 | 258 | ||
259 | if (use_cpu_ptcga && !mymm) { | 259 | if (use_cpu_ptcga && !mymm) { |
260 | old_rr = ia64_get_rr(start); | 260 | old_rr = ia64_get_rr(start); |
@@ -299,9 +299,9 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
299 | 299 | ||
300 | done: | 300 | done: |
301 | itc2 = ia64_get_itc() - itc2; | 301 | itc2 = ia64_get_itc() - itc2; |
302 | __get_cpu_var(ptcstats).shub_itc_clocks += itc2; | 302 | __this_cpu_add(ptcstats.shub_itc_clocks, itc2); |
303 | if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) | 303 | if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max)) |
304 | __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; | 304 | __this_cpu_write(ptcstats.shub_itc_clocks_max, itc2); |
305 | 305 | ||
306 | if (old_rr) { | 306 | if (old_rr) { |
307 | ia64_set_rr(start, old_rr); | 307 | ia64_set_rr(start, old_rr); |
@@ -311,7 +311,7 @@ done: | |||
311 | spin_unlock_irqrestore(PTC_LOCK(shub1), flags); | 311 | spin_unlock_irqrestore(PTC_LOCK(shub1), flags); |
312 | 312 | ||
313 | if (flush_opt == 1 && deadlock) { | 313 | if (flush_opt == 1 && deadlock) { |
314 | __get_cpu_var(ptcstats).deadlocks++; | 314 | __this_cpu_inc(ptcstats.deadlocks); |
315 | sn2_ipi_flush_all_tlb(mm); | 315 | sn2_ipi_flush_all_tlb(mm); |
316 | } | 316 | } |
317 | 317 | ||
@@ -334,7 +334,7 @@ sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, | |||
334 | short nasid, i; | 334 | short nasid, i; |
335 | unsigned long *piows, zeroval, n; | 335 | unsigned long *piows, zeroval, n; |
336 | 336 | ||
337 | __get_cpu_var(ptcstats).deadlocks++; | 337 | __this_cpu_inc(ptcstats.deadlocks); |
338 | 338 | ||
339 | piows = (unsigned long *) pda->pio_write_status_addr; | 339 | piows = (unsigned long *) pda->pio_write_status_addr; |
340 | zeroval = pda->pio_write_status_val; | 340 | zeroval = pda->pio_write_status_val; |
@@ -349,7 +349,7 @@ sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, | |||
349 | ptc1 = CHANGE_NASID(nasid, ptc1); | 349 | ptc1 = CHANGE_NASID(nasid, ptc1); |
350 | 350 | ||
351 | n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); | 351 | n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); |
352 | __get_cpu_var(ptcstats).deadlocks2 += n; | 352 | __this_cpu_add(ptcstats.deadlocks2, n); |
353 | } | 353 | } |
354 | 354 | ||
355 | } | 355 | } |
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index 02c08737f6aa..2478ec6d23c9 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c | |||
@@ -258,7 +258,7 @@ int metag_pmu_event_set_period(struct perf_event *event, | |||
258 | 258 | ||
259 | static void metag_pmu_start(struct perf_event *event, int flags) | 259 | static void metag_pmu_start(struct perf_event *event, int flags) |
260 | { | 260 | { |
261 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 261 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
262 | struct hw_perf_event *hwc = &event->hw; | 262 | struct hw_perf_event *hwc = &event->hw; |
263 | int idx = hwc->idx; | 263 | int idx = hwc->idx; |
264 | 264 | ||
@@ -306,7 +306,7 @@ static void metag_pmu_stop(struct perf_event *event, int flags) | |||
306 | 306 | ||
307 | static int metag_pmu_add(struct perf_event *event, int flags) | 307 | static int metag_pmu_add(struct perf_event *event, int flags) |
308 | { | 308 | { |
309 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 309 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
310 | struct hw_perf_event *hwc = &event->hw; | 310 | struct hw_perf_event *hwc = &event->hw; |
311 | int idx = 0, ret = 0; | 311 | int idx = 0, ret = 0; |
312 | 312 | ||
@@ -348,7 +348,7 @@ out: | |||
348 | 348 | ||
349 | static void metag_pmu_del(struct perf_event *event, int flags) | 349 | static void metag_pmu_del(struct perf_event *event, int flags) |
350 | { | 350 | { |
351 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 351 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
352 | struct hw_perf_event *hwc = &event->hw; | 352 | struct hw_perf_event *hwc = &event->hw; |
353 | int idx = hwc->idx; | 353 | int idx = hwc->idx; |
354 | 354 | ||
@@ -597,7 +597,7 @@ static int _hw_perf_event_init(struct perf_event *event) | |||
597 | 597 | ||
598 | static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) | 598 | static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) |
599 | { | 599 | { |
600 | struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); | 600 | struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events); |
601 | unsigned int config = event->config; | 601 | unsigned int config = event->config; |
602 | unsigned int tmp = config & 0xf0; | 602 | unsigned int tmp = config & 0xf0; |
603 | unsigned long flags; | 603 | unsigned long flags; |
@@ -670,7 +670,7 @@ unlock: | |||
670 | 670 | ||
671 | static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) | 671 | static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) |
672 | { | 672 | { |
673 | struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); | 673 | struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events); |
674 | unsigned int tmp = 0; | 674 | unsigned int tmp = 0; |
675 | unsigned long flags; | 675 | unsigned long flags; |
676 | 676 | ||
@@ -718,7 +718,7 @@ out: | |||
718 | 718 | ||
719 | static void metag_pmu_write_counter(int idx, u32 val) | 719 | static void metag_pmu_write_counter(int idx, u32 val) |
720 | { | 720 | { |
721 | struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); | 721 | struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events); |
722 | u32 tmp = 0; | 722 | u32 tmp = 0; |
723 | unsigned long flags; | 723 | unsigned long flags; |
724 | 724 | ||
@@ -751,7 +751,7 @@ static int metag_pmu_event_map(int idx) | |||
751 | static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) | 751 | static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) |
752 | { | 752 | { |
753 | int idx = (int)dev; | 753 | int idx = (int)dev; |
754 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 754 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
755 | struct perf_event *event = cpuhw->events[idx]; | 755 | struct perf_event *event = cpuhw->events[idx]; |
756 | struct hw_perf_event *hwc = &event->hw; | 756 | struct hw_perf_event *hwc = &event->hw; |
757 | struct pt_regs *regs = get_irq_regs(); | 757 | struct pt_regs *regs = get_irq_regs(); |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 1b82ac6921e0..741734049675 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -264,13 +264,13 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
264 | unsigned long *pen; | 264 | unsigned long *pen; |
265 | unsigned long flags; | 265 | unsigned long flags; |
266 | union octeon_ciu_chip_data cd; | 266 | union octeon_ciu_chip_data cd; |
267 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); | 267 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
268 | 268 | ||
269 | cd.p = irq_data_get_irq_chip_data(data); | 269 | cd.p = irq_data_get_irq_chip_data(data); |
270 | 270 | ||
271 | raw_spin_lock_irqsave(lock, flags); | 271 | raw_spin_lock_irqsave(lock, flags); |
272 | if (cd.s.line == 0) { | 272 | if (cd.s.line == 0) { |
273 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | 273 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
274 | __set_bit(cd.s.bit, pen); | 274 | __set_bit(cd.s.bit, pen); |
275 | /* | 275 | /* |
276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
@@ -279,7 +279,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
279 | wmb(); | 279 | wmb(); |
280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
281 | } else { | 281 | } else { |
282 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | 282 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
283 | __set_bit(cd.s.bit, pen); | 283 | __set_bit(cd.s.bit, pen); |
284 | /* | 284 | /* |
285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
@@ -296,13 +296,13 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
296 | unsigned long *pen; | 296 | unsigned long *pen; |
297 | unsigned long flags; | 297 | unsigned long flags; |
298 | union octeon_ciu_chip_data cd; | 298 | union octeon_ciu_chip_data cd; |
299 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); | 299 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
300 | 300 | ||
301 | cd.p = irq_data_get_irq_chip_data(data); | 301 | cd.p = irq_data_get_irq_chip_data(data); |
302 | 302 | ||
303 | raw_spin_lock_irqsave(lock, flags); | 303 | raw_spin_lock_irqsave(lock, flags); |
304 | if (cd.s.line == 0) { | 304 | if (cd.s.line == 0) { |
305 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | 305 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
306 | __clear_bit(cd.s.bit, pen); | 306 | __clear_bit(cd.s.bit, pen); |
307 | /* | 307 | /* |
308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
@@ -311,7 +311,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
311 | wmb(); | 311 | wmb(); |
312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
313 | } else { | 313 | } else { |
314 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | 314 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
315 | __clear_bit(cd.s.bit, pen); | 315 | __clear_bit(cd.s.bit, pen); |
316 | /* | 316 | /* |
317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
@@ -431,11 +431,11 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | |||
431 | 431 | ||
432 | if (cd.s.line == 0) { | 432 | if (cd.s.line == 0) { |
433 | int index = cvmx_get_core_num() * 2; | 433 | int index = cvmx_get_core_num() * 2; |
434 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | 434 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
436 | } else { | 436 | } else { |
437 | int index = cvmx_get_core_num() * 2 + 1; | 437 | int index = cvmx_get_core_num() * 2 + 1; |
438 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | 438 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
440 | } | 440 | } |
441 | } | 441 | } |
@@ -450,11 +450,11 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | |||
450 | 450 | ||
451 | if (cd.s.line == 0) { | 451 | if (cd.s.line == 0) { |
452 | int index = cvmx_get_core_num() * 2; | 452 | int index = cvmx_get_core_num() * 2; |
453 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | 453 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
455 | } else { | 455 | } else { |
456 | int index = cvmx_get_core_num() * 2 + 1; | 456 | int index = cvmx_get_core_num() * 2 + 1; |
457 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | 457 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
459 | } | 459 | } |
460 | } | 460 | } |
@@ -1063,7 +1063,7 @@ static void octeon_irq_ip2_ciu(void) | |||
1063 | const unsigned long core_id = cvmx_get_core_num(); | 1063 | const unsigned long core_id = cvmx_get_core_num(); |
1064 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); | 1064 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
1065 | 1065 | ||
1066 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); | 1066 | ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); |
1067 | if (likely(ciu_sum)) { | 1067 | if (likely(ciu_sum)) { |
1068 | int bit = fls64(ciu_sum) - 1; | 1068 | int bit = fls64(ciu_sum) - 1; |
1069 | int irq = octeon_irq_ciu_to_irq[0][bit]; | 1069 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
@@ -1080,7 +1080,7 @@ static void octeon_irq_ip3_ciu(void) | |||
1080 | { | 1080 | { |
1081 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); | 1081 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
1082 | 1082 | ||
1083 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); | 1083 | ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); |
1084 | if (likely(ciu_sum)) { | 1084 | if (likely(ciu_sum)) { |
1085 | int bit = fls64(ciu_sum) - 1; | 1085 | int bit = fls64(ciu_sum) - 1; |
1086 | int irq = octeon_irq_ciu_to_irq[1][bit]; | 1086 | int irq = octeon_irq_ciu_to_irq[1][bit]; |
@@ -1129,10 +1129,10 @@ static void octeon_irq_init_ciu_percpu(void) | |||
1129 | int coreid = cvmx_get_core_num(); | 1129 | int coreid = cvmx_get_core_num(); |
1130 | 1130 | ||
1131 | 1131 | ||
1132 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; | 1132 | __this_cpu_write(octeon_irq_ciu0_en_mirror, 0); |
1133 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; | 1133 | __this_cpu_write(octeon_irq_ciu1_en_mirror, 0); |
1134 | wmb(); | 1134 | wmb(); |
1135 | raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); | 1135 | raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock)); |
1136 | /* | 1136 | /* |
1137 | * Disable All CIU Interrupts. The ones we need will be | 1137 | * Disable All CIU Interrupts. The ones we need will be |
1138 | * enabled later. Read the SUM register so we know the write | 1138 | * enabled later. Read the SUM register so we know the write |
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 0195745b4b1b..3ee347713307 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h | |||
@@ -33,17 +33,17 @@ | |||
33 | #ifdef CONFIG_DEBUG_FS | 33 | #ifdef CONFIG_DEBUG_FS |
34 | 34 | ||
35 | struct mips_fpu_emulator_stats { | 35 | struct mips_fpu_emulator_stats { |
36 | local_t emulated; | 36 | unsigned long emulated; |
37 | local_t loads; | 37 | unsigned long loads; |
38 | local_t stores; | 38 | unsigned long stores; |
39 | local_t cp1ops; | 39 | unsigned long cp1ops; |
40 | local_t cp1xops; | 40 | unsigned long cp1xops; |
41 | local_t errors; | 41 | unsigned long errors; |
42 | local_t ieee754_inexact; | 42 | unsigned long ieee754_inexact; |
43 | local_t ieee754_underflow; | 43 | unsigned long ieee754_underflow; |
44 | local_t ieee754_overflow; | 44 | unsigned long ieee754_overflow; |
45 | local_t ieee754_zerodiv; | 45 | unsigned long ieee754_zerodiv; |
46 | local_t ieee754_invalidop; | 46 | unsigned long ieee754_invalidop; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); | 49 | DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); |
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); | |||
51 | #define MIPS_FPU_EMU_INC_STATS(M) \ | 51 | #define MIPS_FPU_EMU_INC_STATS(M) \ |
52 | do { \ | 52 | do { \ |
53 | preempt_disable(); \ | 53 | preempt_disable(); \ |
54 | __local_inc(&__get_cpu_var(fpuemustats).M); \ | 54 | __this_cpu_inc(fpuemustats.M); \ |
55 | preempt_enable(); \ | 55 | preempt_enable(); \ |
56 | } while (0) | 56 | } while (0) |
57 | 57 | ||
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 1f8187ab0997..212f46f2014e 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c | |||
@@ -224,7 +224,7 @@ static void save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
224 | 224 | ||
225 | static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 225 | static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
226 | { | 226 | { |
227 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 227 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
228 | kcb->kprobe_status = kcb->prev_kprobe.status; | 228 | kcb->kprobe_status = kcb->prev_kprobe.status; |
229 | kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; | 229 | kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; |
230 | kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; | 230 | kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; |
@@ -234,7 +234,7 @@ static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
234 | static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 234 | static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
235 | struct kprobe_ctlblk *kcb) | 235 | struct kprobe_ctlblk *kcb) |
236 | { | 236 | { |
237 | __get_cpu_var(current_kprobe) = p; | 237 | __this_cpu_write(current_kprobe, p); |
238 | kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); | 238 | kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); |
239 | kcb->kprobe_saved_epc = regs->cp0_epc; | 239 | kcb->kprobe_saved_epc = regs->cp0_epc; |
240 | } | 240 | } |
@@ -385,7 +385,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
385 | ret = 1; | 385 | ret = 1; |
386 | goto no_kprobe; | 386 | goto no_kprobe; |
387 | } | 387 | } |
388 | p = __get_cpu_var(current_kprobe); | 388 | p = __this_cpu_read(current_kprobe); |
389 | if (p->break_handler && p->break_handler(p, regs)) | 389 | if (p->break_handler && p->break_handler(p, regs)) |
390 | goto ss_probe; | 390 | goto ss_probe; |
391 | } | 391 | } |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index b63f2482f288..a8f9cdc6f8b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -340,7 +340,7 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | |||
340 | 340 | ||
341 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | 341 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) |
342 | { | 342 | { |
343 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 343 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
344 | 344 | ||
345 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | 345 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
346 | 346 | ||
@@ -360,7 +360,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | |||
360 | 360 | ||
361 | static void mipsxx_pmu_disable_event(int idx) | 361 | static void mipsxx_pmu_disable_event(int idx) |
362 | { | 362 | { |
363 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 363 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
364 | unsigned long flags; | 364 | unsigned long flags; |
365 | 365 | ||
366 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | 366 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); |
@@ -460,7 +460,7 @@ static void mipspmu_stop(struct perf_event *event, int flags) | |||
460 | 460 | ||
461 | static int mipspmu_add(struct perf_event *event, int flags) | 461 | static int mipspmu_add(struct perf_event *event, int flags) |
462 | { | 462 | { |
463 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 463 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
464 | struct hw_perf_event *hwc = &event->hw; | 464 | struct hw_perf_event *hwc = &event->hw; |
465 | int idx; | 465 | int idx; |
466 | int err = 0; | 466 | int err = 0; |
@@ -496,7 +496,7 @@ out: | |||
496 | 496 | ||
497 | static void mipspmu_del(struct perf_event *event, int flags) | 497 | static void mipspmu_del(struct perf_event *event, int flags) |
498 | { | 498 | { |
499 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 499 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
500 | struct hw_perf_event *hwc = &event->hw; | 500 | struct hw_perf_event *hwc = &event->hw; |
501 | int idx = hwc->idx; | 501 | int idx = hwc->idx; |
502 | 502 | ||
@@ -1275,7 +1275,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1275 | 1275 | ||
1276 | static void pause_local_counters(void) | 1276 | static void pause_local_counters(void) |
1277 | { | 1277 | { |
1278 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1278 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1279 | int ctr = mipspmu.num_counters; | 1279 | int ctr = mipspmu.num_counters; |
1280 | unsigned long flags; | 1280 | unsigned long flags; |
1281 | 1281 | ||
@@ -1291,7 +1291,7 @@ static void pause_local_counters(void) | |||
1291 | 1291 | ||
1292 | static void resume_local_counters(void) | 1292 | static void resume_local_counters(void) |
1293 | { | 1293 | { |
1294 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1294 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1295 | int ctr = mipspmu.num_counters; | 1295 | int ctr = mipspmu.num_counters; |
1296 | 1296 | ||
1297 | do { | 1297 | do { |
@@ -1302,7 +1302,7 @@ static void resume_local_counters(void) | |||
1302 | 1302 | ||
1303 | static int mipsxx_pmu_handle_shared_irq(void) | 1303 | static int mipsxx_pmu_handle_shared_irq(void) |
1304 | { | 1304 | { |
1305 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1305 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1306 | struct perf_sample_data data; | 1306 | struct perf_sample_data data; |
1307 | unsigned int counters = mipspmu.num_counters; | 1307 | unsigned int counters = mipspmu.num_counters; |
1308 | u64 counter; | 1308 | u64 counter; |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index df9e2bd9b2c2..06bb5ed6d80a 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -346,7 +346,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) | |||
346 | int action, cpu = irq - IPI0_IRQ; | 346 | int action, cpu = irq - IPI0_IRQ; |
347 | 347 | ||
348 | spin_lock_irqsave(&ipi_lock, flags); | 348 | spin_lock_irqsave(&ipi_lock, flags); |
349 | action = __get_cpu_var(ipi_action_mask); | 349 | action = __this_cpu_read(ipi_action_mask); |
350 | per_cpu(ipi_action_mask, cpu) = 0; | 350 | per_cpu(ipi_action_mask, cpu) = 0; |
351 | clear_c0_cause(cpu ? C_SW1 : C_SW0); | 351 | clear_c0_cause(cpu ? C_SW1 : C_SW0); |
352 | spin_unlock_irqrestore(&ipi_lock, flags); | 352 | spin_unlock_irqrestore(&ipi_lock, flags); |
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index 74e827b4ec8f..d8c63af6c7cc 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c | |||
@@ -299,16 +299,16 @@ static void loongson3_init_secondary(void) | |||
299 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | 299 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
300 | 300 | ||
301 | i = 0; | 301 | i = 0; |
302 | __get_cpu_var(core0_c0count) = 0; | 302 | __this_cpu_write(core0_c0count, 0); |
303 | loongson3_send_ipi_single(0, SMP_ASK_C0COUNT); | 303 | loongson3_send_ipi_single(0, SMP_ASK_C0COUNT); |
304 | while (!__get_cpu_var(core0_c0count)) { | 304 | while (!__this_cpu_read(core0_c0count)) { |
305 | i++; | 305 | i++; |
306 | cpu_relax(); | 306 | cpu_relax(); |
307 | } | 307 | } |
308 | 308 | ||
309 | if (i > MAX_LOOPS) | 309 | if (i > MAX_LOOPS) |
310 | i = MAX_LOOPS; | 310 | i = MAX_LOOPS; |
311 | initcount = __get_cpu_var(core0_c0count) + i; | 311 | initcount = __this_cpu_read(core0_c0count) + i; |
312 | write_c0_count(initcount); | 312 | write_c0_count(initcount); |
313 | } | 313 | } |
314 | 314 | ||
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 6c840ceab820..e2452550bcb1 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
@@ -58,10 +58,10 @@ static inline unsigned long cputime_to_jiffies(const cputime_t ct) | |||
58 | static inline cputime_t cputime_to_scaled(const cputime_t ct) | 58 | static inline cputime_t cputime_to_scaled(const cputime_t ct) |
59 | { | 59 | { |
60 | if (cpu_has_feature(CPU_FTR_SPURR) && | 60 | if (cpu_has_feature(CPU_FTR_SPURR) && |
61 | __get_cpu_var(cputime_last_delta)) | 61 | __this_cpu_read(cputime_last_delta)) |
62 | return (__force u64) ct * | 62 | return (__force u64) ct * |
63 | __get_cpu_var(cputime_scaled_last_delta) / | 63 | __this_cpu_read(cputime_scaled_last_delta) / |
64 | __get_cpu_var(cputime_last_delta); | 64 | __this_cpu_read(cputime_last_delta); |
65 | return ct; | 65 | return ct; |
66 | } | 66 | } |
67 | 67 | ||
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index e787cc1bff8f..b0d5f0a97a01 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -82,7 +82,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); | |||
82 | 82 | ||
83 | static __always_inline void inc_irq_stat(enum interruption_class irq) | 83 | static __always_inline void inc_irq_stat(enum interruption_class irq) |
84 | { | 84 | { |
85 | __get_cpu_var(irq_stat).irqs[irq]++; | 85 | __this_cpu_inc(irq_stat.irqs[irq]); |
86 | } | 86 | } |
87 | 87 | ||
88 | struct ext_code { | 88 | struct ext_code { |
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index fa91e0097458..933355e0d091 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h | |||
@@ -31,7 +31,7 @@ | |||
31 | pcp_op_T__ old__, new__, prev__; \ | 31 | pcp_op_T__ old__, new__, prev__; \ |
32 | pcp_op_T__ *ptr__; \ | 32 | pcp_op_T__ *ptr__; \ |
33 | preempt_disable(); \ | 33 | preempt_disable(); \ |
34 | ptr__ = __this_cpu_ptr(&(pcp)); \ | 34 | ptr__ = raw_cpu_ptr(&(pcp)); \ |
35 | prev__ = *ptr__; \ | 35 | prev__ = *ptr__; \ |
36 | do { \ | 36 | do { \ |
37 | old__ = prev__; \ | 37 | old__ = prev__; \ |
@@ -70,7 +70,7 @@ | |||
70 | pcp_op_T__ val__ = (val); \ | 70 | pcp_op_T__ val__ = (val); \ |
71 | pcp_op_T__ old__, *ptr__; \ | 71 | pcp_op_T__ old__, *ptr__; \ |
72 | preempt_disable(); \ | 72 | preempt_disable(); \ |
73 | ptr__ = __this_cpu_ptr(&(pcp)); \ | 73 | ptr__ = raw_cpu_ptr(&(pcp)); \ |
74 | if (__builtin_constant_p(val__) && \ | 74 | if (__builtin_constant_p(val__) && \ |
75 | ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ | 75 | ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ |
76 | asm volatile( \ | 76 | asm volatile( \ |
@@ -97,7 +97,7 @@ | |||
97 | pcp_op_T__ val__ = (val); \ | 97 | pcp_op_T__ val__ = (val); \ |
98 | pcp_op_T__ old__, *ptr__; \ | 98 | pcp_op_T__ old__, *ptr__; \ |
99 | preempt_disable(); \ | 99 | preempt_disable(); \ |
100 | ptr__ = __this_cpu_ptr(&(pcp)); \ | 100 | ptr__ = raw_cpu_ptr(&(pcp)); \ |
101 | asm volatile( \ | 101 | asm volatile( \ |
102 | op " %[old__],%[val__],%[ptr__]\n" \ | 102 | op " %[old__],%[val__],%[ptr__]\n" \ |
103 | : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ | 103 | : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ |
@@ -116,7 +116,7 @@ | |||
116 | pcp_op_T__ val__ = (val); \ | 116 | pcp_op_T__ val__ = (val); \ |
117 | pcp_op_T__ old__, *ptr__; \ | 117 | pcp_op_T__ old__, *ptr__; \ |
118 | preempt_disable(); \ | 118 | preempt_disable(); \ |
119 | ptr__ = __this_cpu_ptr(&(pcp)); \ | 119 | ptr__ = raw_cpu_ptr(&(pcp)); \ |
120 | asm volatile( \ | 120 | asm volatile( \ |
121 | op " %[old__],%[val__],%[ptr__]\n" \ | 121 | op " %[old__],%[val__],%[ptr__]\n" \ |
122 | : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ | 122 | : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ |
@@ -138,7 +138,7 @@ | |||
138 | pcp_op_T__ ret__; \ | 138 | pcp_op_T__ ret__; \ |
139 | pcp_op_T__ *ptr__; \ | 139 | pcp_op_T__ *ptr__; \ |
140 | preempt_disable(); \ | 140 | preempt_disable(); \ |
141 | ptr__ = __this_cpu_ptr(&(pcp)); \ | 141 | ptr__ = raw_cpu_ptr(&(pcp)); \ |
142 | ret__ = cmpxchg(ptr__, oval, nval); \ | 142 | ret__ = cmpxchg(ptr__, oval, nval); \ |
143 | preempt_enable(); \ | 143 | preempt_enable(); \ |
144 | ret__; \ | 144 | ret__; \ |
@@ -154,7 +154,7 @@ | |||
154 | typeof(pcp) *ptr__; \ | 154 | typeof(pcp) *ptr__; \ |
155 | typeof(pcp) ret__; \ | 155 | typeof(pcp) ret__; \ |
156 | preempt_disable(); \ | 156 | preempt_disable(); \ |
157 | ptr__ = __this_cpu_ptr(&(pcp)); \ | 157 | ptr__ = raw_cpu_ptr(&(pcp)); \ |
158 | ret__ = xchg(ptr__, nval); \ | 158 | ret__ = xchg(ptr__, nval); \ |
159 | preempt_enable(); \ | 159 | preempt_enable(); \ |
160 | ret__; \ | 160 | ret__; \ |
@@ -173,8 +173,8 @@ | |||
173 | typeof(pcp2) *p2__; \ | 173 | typeof(pcp2) *p2__; \ |
174 | int ret__; \ | 174 | int ret__; \ |
175 | preempt_disable(); \ | 175 | preempt_disable(); \ |
176 | p1__ = __this_cpu_ptr(&(pcp1)); \ | 176 | p1__ = raw_cpu_ptr(&(pcp1)); \ |
177 | p2__ = __this_cpu_ptr(&(pcp2)); \ | 177 | p2__ = raw_cpu_ptr(&(pcp2)); \ |
178 | ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ | 178 | ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ |
179 | preempt_enable(); \ | 179 | preempt_enable(); \ |
180 | ret__; \ | 180 | ret__; \ |
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index c846aee7372f..7559f1beab29 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c | |||
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | |||
21 | 21 | ||
22 | void __kprobes enabled_wait(void) | 22 | void __kprobes enabled_wait(void) |
23 | { | 23 | { |
24 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); | 24 | struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); |
25 | unsigned long long idle_time; | 25 | unsigned long long idle_time; |
26 | unsigned long psw_mask; | 26 | unsigned long psw_mask; |
27 | 27 | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 27ae5433fe4d..014d4729b134 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -218,9 +218,9 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, | |||
218 | */ | 218 | */ |
219 | static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) | 219 | static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) |
220 | { | 220 | { |
221 | kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe); | 221 | kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); |
222 | kcb->prev_kprobe.status = kcb->kprobe_status; | 222 | kcb->prev_kprobe.status = kcb->kprobe_status; |
223 | __get_cpu_var(current_kprobe) = p; | 223 | __this_cpu_write(current_kprobe, p); |
224 | } | 224 | } |
225 | 225 | ||
226 | /* | 226 | /* |
@@ -230,7 +230,7 @@ static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) | |||
230 | */ | 230 | */ |
231 | static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) | 231 | static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) |
232 | { | 232 | { |
233 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 233 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
234 | kcb->kprobe_status = kcb->prev_kprobe.status; | 234 | kcb->kprobe_status = kcb->prev_kprobe.status; |
235 | } | 235 | } |
236 | 236 | ||
@@ -311,7 +311,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
311 | enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); | 311 | enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); |
312 | return 1; | 312 | return 1; |
313 | } else if (kprobe_running()) { | 313 | } else if (kprobe_running()) { |
314 | p = __get_cpu_var(current_kprobe); | 314 | p = __this_cpu_read(current_kprobe); |
315 | if (p->break_handler && p->break_handler(p, regs)) { | 315 | if (p->break_handler && p->break_handler(p, regs)) { |
316 | /* | 316 | /* |
317 | * Continuation after the jprobe completed and | 317 | * Continuation after the jprobe completed and |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index db96b418160a..dd1c24ceda50 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -54,8 +54,12 @@ void s390_handle_mcck(void) | |||
54 | */ | 54 | */ |
55 | local_irq_save(flags); | 55 | local_irq_save(flags); |
56 | local_mcck_disable(); | 56 | local_mcck_disable(); |
57 | mcck = __get_cpu_var(cpu_mcck); | 57 | /* |
58 | memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); | 58 | * Ummm... Does this make sense at all? Copying the percpu struct |
59 | * and then zapping it one statement later? | ||
60 | */ | ||
61 | memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck)); | ||
62 | memset(&mcck, 0, sizeof(struct mcck_struct)); | ||
59 | clear_cpu_flag(CIF_MCCK_PENDING); | 63 | clear_cpu_flag(CIF_MCCK_PENDING); |
60 | local_mcck_enable(); | 64 | local_mcck_enable(); |
61 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
@@ -269,7 +273,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) | |||
269 | nmi_enter(); | 273 | nmi_enter(); |
270 | inc_irq_stat(NMI_NMI); | 274 | inc_irq_stat(NMI_NMI); |
271 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; | 275 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; |
272 | mcck = &__get_cpu_var(cpu_mcck); | 276 | mcck = this_cpu_ptr(&cpu_mcck); |
273 | umode = user_mode(regs); | 277 | umode = user_mode(regs); |
274 | 278 | ||
275 | if (mci->sd) { | 279 | if (mci->sd) { |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index d3194de7ae1e..56fdad479115 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -173,7 +173,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc) | |||
173 | */ | 173 | */ |
174 | static void cpumf_pmu_enable(struct pmu *pmu) | 174 | static void cpumf_pmu_enable(struct pmu *pmu) |
175 | { | 175 | { |
176 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 176 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
177 | int err; | 177 | int err; |
178 | 178 | ||
179 | if (cpuhw->flags & PMU_F_ENABLED) | 179 | if (cpuhw->flags & PMU_F_ENABLED) |
@@ -196,7 +196,7 @@ static void cpumf_pmu_enable(struct pmu *pmu) | |||
196 | */ | 196 | */ |
197 | static void cpumf_pmu_disable(struct pmu *pmu) | 197 | static void cpumf_pmu_disable(struct pmu *pmu) |
198 | { | 198 | { |
199 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 199 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
200 | int err; | 200 | int err; |
201 | u64 inactive; | 201 | u64 inactive; |
202 | 202 | ||
@@ -230,7 +230,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, | |||
230 | return; | 230 | return; |
231 | 231 | ||
232 | inc_irq_stat(IRQEXT_CMC); | 232 | inc_irq_stat(IRQEXT_CMC); |
233 | cpuhw = &__get_cpu_var(cpu_hw_events); | 233 | cpuhw = this_cpu_ptr(&cpu_hw_events); |
234 | 234 | ||
235 | /* Measurement alerts are shared and might happen when the PMU | 235 | /* Measurement alerts are shared and might happen when the PMU |
236 | * is not reserved. Ignore these alerts in this case. */ | 236 | * is not reserved. Ignore these alerts in this case. */ |
@@ -250,7 +250,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, | |||
250 | #define PMC_RELEASE 1 | 250 | #define PMC_RELEASE 1 |
251 | static void setup_pmc_cpu(void *flags) | 251 | static void setup_pmc_cpu(void *flags) |
252 | { | 252 | { |
253 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 253 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
254 | 254 | ||
255 | switch (*((int *) flags)) { | 255 | switch (*((int *) flags)) { |
256 | case PMC_INIT: | 256 | case PMC_INIT: |
@@ -475,7 +475,7 @@ static void cpumf_pmu_read(struct perf_event *event) | |||
475 | 475 | ||
476 | static void cpumf_pmu_start(struct perf_event *event, int flags) | 476 | static void cpumf_pmu_start(struct perf_event *event, int flags) |
477 | { | 477 | { |
478 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 478 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
479 | struct hw_perf_event *hwc = &event->hw; | 479 | struct hw_perf_event *hwc = &event->hw; |
480 | 480 | ||
481 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | 481 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
@@ -506,7 +506,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags) | |||
506 | 506 | ||
507 | static void cpumf_pmu_stop(struct perf_event *event, int flags) | 507 | static void cpumf_pmu_stop(struct perf_event *event, int flags) |
508 | { | 508 | { |
509 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 509 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
510 | struct hw_perf_event *hwc = &event->hw; | 510 | struct hw_perf_event *hwc = &event->hw; |
511 | 511 | ||
512 | if (!(hwc->state & PERF_HES_STOPPED)) { | 512 | if (!(hwc->state & PERF_HES_STOPPED)) { |
@@ -527,7 +527,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) | |||
527 | 527 | ||
528 | static int cpumf_pmu_add(struct perf_event *event, int flags) | 528 | static int cpumf_pmu_add(struct perf_event *event, int flags) |
529 | { | 529 | { |
530 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 530 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
531 | 531 | ||
532 | /* Check authorization for the counter set to which this | 532 | /* Check authorization for the counter set to which this |
533 | * counter belongs. | 533 | * counter belongs. |
@@ -551,7 +551,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) | |||
551 | 551 | ||
552 | static void cpumf_pmu_del(struct perf_event *event, int flags) | 552 | static void cpumf_pmu_del(struct perf_event *event, int flags) |
553 | { | 553 | { |
554 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 554 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
555 | 555 | ||
556 | cpumf_pmu_stop(event, PERF_EF_UPDATE); | 556 | cpumf_pmu_stop(event, PERF_EF_UPDATE); |
557 | 557 | ||
@@ -575,7 +575,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) | |||
575 | */ | 575 | */ |
576 | static void cpumf_pmu_start_txn(struct pmu *pmu) | 576 | static void cpumf_pmu_start_txn(struct pmu *pmu) |
577 | { | 577 | { |
578 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 578 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
579 | 579 | ||
580 | perf_pmu_disable(pmu); | 580 | perf_pmu_disable(pmu); |
581 | cpuhw->flags |= PERF_EVENT_TXN; | 581 | cpuhw->flags |= PERF_EVENT_TXN; |
@@ -589,7 +589,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu) | |||
589 | */ | 589 | */ |
590 | static void cpumf_pmu_cancel_txn(struct pmu *pmu) | 590 | static void cpumf_pmu_cancel_txn(struct pmu *pmu) |
591 | { | 591 | { |
592 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 592 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
593 | 593 | ||
594 | WARN_ON(cpuhw->tx_state != cpuhw->state); | 594 | WARN_ON(cpuhw->tx_state != cpuhw->state); |
595 | 595 | ||
@@ -604,7 +604,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu) | |||
604 | */ | 604 | */ |
605 | static int cpumf_pmu_commit_txn(struct pmu *pmu) | 605 | static int cpumf_pmu_commit_txn(struct pmu *pmu) |
606 | { | 606 | { |
607 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 607 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
608 | u64 state; | 608 | u64 state; |
609 | 609 | ||
610 | /* check if the updated state can be scheduled */ | 610 | /* check if the updated state can be scheduled */ |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index ea0c7b2ef030..08e761318c17 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -562,7 +562,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex); | |||
562 | static void setup_pmc_cpu(void *flags) | 562 | static void setup_pmc_cpu(void *flags) |
563 | { | 563 | { |
564 | int err; | 564 | int err; |
565 | struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); | 565 | struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); |
566 | 566 | ||
567 | err = 0; | 567 | err = 0; |
568 | switch (*((int *) flags)) { | 568 | switch (*((int *) flags)) { |
@@ -849,7 +849,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event) | |||
849 | 849 | ||
850 | static void cpumsf_pmu_enable(struct pmu *pmu) | 850 | static void cpumsf_pmu_enable(struct pmu *pmu) |
851 | { | 851 | { |
852 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | 852 | struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); |
853 | struct hw_perf_event *hwc; | 853 | struct hw_perf_event *hwc; |
854 | int err; | 854 | int err; |
855 | 855 | ||
@@ -898,7 +898,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu) | |||
898 | 898 | ||
899 | static void cpumsf_pmu_disable(struct pmu *pmu) | 899 | static void cpumsf_pmu_disable(struct pmu *pmu) |
900 | { | 900 | { |
901 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | 901 | struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); |
902 | struct hws_lsctl_request_block inactive; | 902 | struct hws_lsctl_request_block inactive; |
903 | struct hws_qsi_info_block si; | 903 | struct hws_qsi_info_block si; |
904 | int err; | 904 | int err; |
@@ -1306,7 +1306,7 @@ static void cpumsf_pmu_read(struct perf_event *event) | |||
1306 | */ | 1306 | */ |
1307 | static void cpumsf_pmu_start(struct perf_event *event, int flags) | 1307 | static void cpumsf_pmu_start(struct perf_event *event, int flags) |
1308 | { | 1308 | { |
1309 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | 1309 | struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); |
1310 | 1310 | ||
1311 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 1311 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
1312 | return; | 1312 | return; |
@@ -1327,7 +1327,7 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags) | |||
1327 | */ | 1327 | */ |
1328 | static void cpumsf_pmu_stop(struct perf_event *event, int flags) | 1328 | static void cpumsf_pmu_stop(struct perf_event *event, int flags) |
1329 | { | 1329 | { |
1330 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | 1330 | struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); |
1331 | 1331 | ||
1332 | if (event->hw.state & PERF_HES_STOPPED) | 1332 | if (event->hw.state & PERF_HES_STOPPED) |
1333 | return; | 1333 | return; |
@@ -1346,7 +1346,7 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags) | |||
1346 | 1346 | ||
1347 | static int cpumsf_pmu_add(struct perf_event *event, int flags) | 1347 | static int cpumsf_pmu_add(struct perf_event *event, int flags) |
1348 | { | 1348 | { |
1349 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | 1349 | struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); |
1350 | int err; | 1350 | int err; |
1351 | 1351 | ||
1352 | if (cpuhw->flags & PMU_F_IN_USE) | 1352 | if (cpuhw->flags & PMU_F_IN_USE) |
@@ -1397,7 +1397,7 @@ out: | |||
1397 | 1397 | ||
1398 | static void cpumsf_pmu_del(struct perf_event *event, int flags) | 1398 | static void cpumsf_pmu_del(struct perf_event *event, int flags) |
1399 | { | 1399 | { |
1400 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | 1400 | struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); |
1401 | 1401 | ||
1402 | perf_pmu_disable(event->pmu); | 1402 | perf_pmu_disable(event->pmu); |
1403 | cpumsf_pmu_stop(event, PERF_EF_UPDATE); | 1403 | cpumsf_pmu_stop(event, PERF_EF_UPDATE); |
@@ -1470,7 +1470,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, | |||
1470 | if (!(alert & CPU_MF_INT_SF_MASK)) | 1470 | if (!(alert & CPU_MF_INT_SF_MASK)) |
1471 | return; | 1471 | return; |
1472 | inc_irq_stat(IRQEXT_CMS); | 1472 | inc_irq_stat(IRQEXT_CMS); |
1473 | cpuhw = &__get_cpu_var(cpu_hw_sf); | 1473 | cpuhw = this_cpu_ptr(&cpu_hw_sf); |
1474 | 1474 | ||
1475 | /* Measurement alerts are shared and might happen when the PMU | 1475 | /* Measurement alerts are shared and might happen when the PMU |
1476 | * is not reserved. Ignore these alerts in this case. */ | 1476 | * is not reserved. Ignore these alerts in this case. */ |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index edefead3b43a..dbdd33ee0102 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -23,7 +23,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); | |||
23 | */ | 23 | */ |
24 | void cpu_init(void) | 24 | void cpu_init(void) |
25 | { | 25 | { |
26 | struct cpuid *id = &__get_cpu_var(cpu_id); | 26 | struct cpuid *id = this_cpu_ptr(&cpu_id); |
27 | 27 | ||
28 | get_cpu_id(id); | 28 | get_cpu_id(id); |
29 | atomic_inc(&init_mm.mm_count); | 29 | atomic_inc(&init_mm.mm_count); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 69e980de0f62..005d665fe4a5 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -92,7 +92,7 @@ void clock_comparator_work(void) | |||
92 | struct clock_event_device *cd; | 92 | struct clock_event_device *cd; |
93 | 93 | ||
94 | S390_lowcore.clock_comparator = -1ULL; | 94 | S390_lowcore.clock_comparator = -1ULL; |
95 | cd = &__get_cpu_var(comparators); | 95 | cd = this_cpu_ptr(&comparators); |
96 | cd->event_handler(cd); | 96 | cd->event_handler(cd); |
97 | } | 97 | } |
98 | 98 | ||
@@ -373,7 +373,7 @@ EXPORT_SYMBOL(get_sync_clock); | |||
373 | */ | 373 | */ |
374 | static void disable_sync_clock(void *dummy) | 374 | static void disable_sync_clock(void *dummy) |
375 | { | 375 | { |
376 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | 376 | atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); |
377 | /* | 377 | /* |
378 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | 378 | * Clear the in-sync bit 2^31. All get_sync_clock calls will |
379 | * fail until the sync bit is turned back on. In addition | 379 | * fail until the sync bit is turned back on. In addition |
@@ -390,7 +390,7 @@ static void disable_sync_clock(void *dummy) | |||
390 | */ | 390 | */ |
391 | static void enable_sync_clock(void) | 391 | static void enable_sync_clock(void) |
392 | { | 392 | { |
393 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | 393 | atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); |
394 | atomic_set_mask(0x80000000, sw_ptr); | 394 | atomic_set_mask(0x80000000, sw_ptr); |
395 | } | 395 | } |
396 | 396 | ||
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index e53c6f268807..ff9b4eb34589 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -178,7 +178,7 @@ static int smp_ctl_qsi(int cpu) | |||
178 | static void hws_ext_handler(struct ext_code ext_code, | 178 | static void hws_ext_handler(struct ext_code ext_code, |
179 | unsigned int param32, unsigned long param64) | 179 | unsigned int param32, unsigned long param64) |
180 | { | 180 | { |
181 | struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer); | 181 | struct hws_cpu_buffer *cb = this_cpu_ptr(&sampler_cpu_buffer); |
182 | 182 | ||
183 | if (!(param32 & CPU_MF_INT_SF_MASK)) | 183 | if (!(param32 & CPU_MF_INT_SF_MASK)) |
184 | return; | 184 | return; |
diff --git a/arch/sparc/include/asm/cpudata_32.h b/arch/sparc/include/asm/cpudata_32.h index 0300d94c25b3..05f366379f53 100644 --- a/arch/sparc/include/asm/cpudata_32.h +++ b/arch/sparc/include/asm/cpudata_32.h | |||
@@ -26,6 +26,6 @@ typedef struct { | |||
26 | 26 | ||
27 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 27 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
28 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 28 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
29 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 29 | #define local_cpu_data() (*this_cpu_ptr(&__cpu_data)) |
30 | 30 | ||
31 | #endif /* _SPARC_CPUDATA_H */ | 31 | #endif /* _SPARC_CPUDATA_H */ |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 0e594076912c..a6e424d185d0 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
@@ -30,7 +30,7 @@ typedef struct { | |||
30 | 30 | ||
31 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 31 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
32 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 32 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
33 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 33 | #define local_cpu_data() (*this_cpu_ptr(&__cpu_data)) |
34 | 34 | ||
35 | #endif /* !(__ASSEMBLY__) */ | 35 | #endif /* !(__ASSEMBLY__) */ |
36 | 36 | ||
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index 98d712843413..cd83be527586 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c | |||
@@ -83,7 +83,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
83 | 83 | ||
84 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 84 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
85 | { | 85 | { |
86 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 86 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
87 | kcb->kprobe_status = kcb->prev_kprobe.status; | 87 | kcb->kprobe_status = kcb->prev_kprobe.status; |
88 | kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; | 88 | kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; |
89 | kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; | 89 | kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; |
@@ -92,7 +92,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
92 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 92 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
93 | struct kprobe_ctlblk *kcb) | 93 | struct kprobe_ctlblk *kcb) |
94 | { | 94 | { |
95 | __get_cpu_var(current_kprobe) = p; | 95 | __this_cpu_write(current_kprobe, p); |
96 | kcb->kprobe_orig_tnpc = regs->tnpc; | 96 | kcb->kprobe_orig_tnpc = regs->tnpc; |
97 | kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); | 97 | kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); |
98 | } | 98 | } |
@@ -155,7 +155,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
155 | ret = 1; | 155 | ret = 1; |
156 | goto no_kprobe; | 156 | goto no_kprobe; |
157 | } | 157 | } |
158 | p = __get_cpu_var(current_kprobe); | 158 | p = __this_cpu_read(current_kprobe); |
159 | if (p->break_handler && p->break_handler(p, regs)) | 159 | if (p->break_handler && p->break_handler(p, regs)) |
160 | goto ss_probe; | 160 | goto ss_probe; |
161 | } | 161 | } |
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 018ef11f57df..ea2bad306f93 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c | |||
@@ -343,7 +343,7 @@ static void leon_ipi_resched(int cpu) | |||
343 | 343 | ||
344 | void leonsmp_ipi_interrupt(void) | 344 | void leonsmp_ipi_interrupt(void) |
345 | { | 345 | { |
346 | struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work); | 346 | struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work); |
347 | 347 | ||
348 | if (work->single) { | 348 | if (work->single) { |
349 | work->single = 0; | 349 | work->single = 0; |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 5b1151dcba13..a9973bb4a1b2 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -100,20 +100,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
100 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 100 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
101 | 101 | ||
102 | sum = local_cpu_data().irq0_irqs; | 102 | sum = local_cpu_data().irq0_irqs; |
103 | if (__get_cpu_var(nmi_touch)) { | 103 | if (__this_cpu_read(nmi_touch)) { |
104 | __get_cpu_var(nmi_touch) = 0; | 104 | __this_cpu_write(nmi_touch, 0); |
105 | touched = 1; | 105 | touched = 1; |
106 | } | 106 | } |
107 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 107 | if (!touched && __this_cpu_read(last_irq_sum) == sum) { |
108 | __this_cpu_inc(alert_counter); | 108 | __this_cpu_inc(alert_counter); |
109 | if (__this_cpu_read(alert_counter) == 30 * nmi_hz) | 109 | if (__this_cpu_read(alert_counter) == 30 * nmi_hz) |
110 | die_nmi("BUG: NMI Watchdog detected LOCKUP", | 110 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
111 | regs, panic_on_timeout); | 111 | regs, panic_on_timeout); |
112 | } else { | 112 | } else { |
113 | __get_cpu_var(last_irq_sum) = sum; | 113 | __this_cpu_write(last_irq_sum, sum); |
114 | __this_cpu_write(alert_counter, 0); | 114 | __this_cpu_write(alert_counter, 0); |
115 | } | 115 | } |
116 | if (__get_cpu_var(wd_enabled)) { | 116 | if (__this_cpu_read(wd_enabled)) { |
117 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); | 117 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); |
118 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); | 118 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); |
119 | } | 119 | } |
@@ -154,7 +154,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) | |||
154 | void stop_nmi_watchdog(void *unused) | 154 | void stop_nmi_watchdog(void *unused) |
155 | { | 155 | { |
156 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 156 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
157 | __get_cpu_var(wd_enabled) = 0; | 157 | __this_cpu_write(wd_enabled, 0); |
158 | atomic_dec(&nmi_active); | 158 | atomic_dec(&nmi_active); |
159 | } | 159 | } |
160 | 160 | ||
@@ -207,7 +207,7 @@ error: | |||
207 | 207 | ||
208 | void start_nmi_watchdog(void *unused) | 208 | void start_nmi_watchdog(void *unused) |
209 | { | 209 | { |
210 | __get_cpu_var(wd_enabled) = 1; | 210 | __this_cpu_write(wd_enabled, 1); |
211 | atomic_inc(&nmi_active); | 211 | atomic_inc(&nmi_active); |
212 | 212 | ||
213 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 213 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
@@ -218,7 +218,7 @@ void start_nmi_watchdog(void *unused) | |||
218 | 218 | ||
219 | static void nmi_adjust_hz_one(void *unused) | 219 | static void nmi_adjust_hz_one(void *unused) |
220 | { | 220 | { |
221 | if (!__get_cpu_var(wd_enabled)) | 221 | if (!__this_cpu_read(wd_enabled)) |
222 | return; | 222 | return; |
223 | 223 | ||
224 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 224 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index d07f6b29aed8..49d33b178793 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -48,7 +48,7 @@ static int iommu_batch_initialized; | |||
48 | /* Interrupts must be disabled. */ | 48 | /* Interrupts must be disabled. */ |
49 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) | 49 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) |
50 | { | 50 | { |
51 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 51 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
52 | 52 | ||
53 | p->dev = dev; | 53 | p->dev = dev; |
54 | p->prot = prot; | 54 | p->prot = prot; |
@@ -94,7 +94,7 @@ static long iommu_batch_flush(struct iommu_batch *p) | |||
94 | 94 | ||
95 | static inline void iommu_batch_new_entry(unsigned long entry) | 95 | static inline void iommu_batch_new_entry(unsigned long entry) |
96 | { | 96 | { |
97 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 97 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
98 | 98 | ||
99 | if (p->entry + p->npages == entry) | 99 | if (p->entry + p->npages == entry) |
100 | return; | 100 | return; |
@@ -106,7 +106,7 @@ static inline void iommu_batch_new_entry(unsigned long entry) | |||
106 | /* Interrupts must be disabled. */ | 106 | /* Interrupts must be disabled. */ |
107 | static inline long iommu_batch_add(u64 phys_page) | 107 | static inline long iommu_batch_add(u64 phys_page) |
108 | { | 108 | { |
109 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 109 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
110 | 110 | ||
111 | BUG_ON(p->npages >= PGLIST_NENTS); | 111 | BUG_ON(p->npages >= PGLIST_NENTS); |
112 | 112 | ||
@@ -120,7 +120,7 @@ static inline long iommu_batch_add(u64 phys_page) | |||
120 | /* Interrupts must be disabled. */ | 120 | /* Interrupts must be disabled. */ |
121 | static inline long iommu_batch_end(void) | 121 | static inline long iommu_batch_end(void) |
122 | { | 122 | { |
123 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 123 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
124 | 124 | ||
125 | BUG_ON(p->npages >= PGLIST_NENTS); | 125 | BUG_ON(p->npages >= PGLIST_NENTS); |
126 | 126 | ||
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index c9759ad3f34a..46a5e4508752 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1013,7 +1013,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc) | |||
1013 | 1013 | ||
1014 | static void sparc_pmu_enable(struct pmu *pmu) | 1014 | static void sparc_pmu_enable(struct pmu *pmu) |
1015 | { | 1015 | { |
1016 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1016 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1017 | int i; | 1017 | int i; |
1018 | 1018 | ||
1019 | if (cpuc->enabled) | 1019 | if (cpuc->enabled) |
@@ -1031,7 +1031,7 @@ static void sparc_pmu_enable(struct pmu *pmu) | |||
1031 | 1031 | ||
1032 | static void sparc_pmu_disable(struct pmu *pmu) | 1032 | static void sparc_pmu_disable(struct pmu *pmu) |
1033 | { | 1033 | { |
1034 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1034 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1035 | int i; | 1035 | int i; |
1036 | 1036 | ||
1037 | if (!cpuc->enabled) | 1037 | if (!cpuc->enabled) |
@@ -1065,7 +1065,7 @@ static int active_event_index(struct cpu_hw_events *cpuc, | |||
1065 | 1065 | ||
1066 | static void sparc_pmu_start(struct perf_event *event, int flags) | 1066 | static void sparc_pmu_start(struct perf_event *event, int flags) |
1067 | { | 1067 | { |
1068 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1068 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1069 | int idx = active_event_index(cpuc, event); | 1069 | int idx = active_event_index(cpuc, event); |
1070 | 1070 | ||
1071 | if (flags & PERF_EF_RELOAD) { | 1071 | if (flags & PERF_EF_RELOAD) { |
@@ -1080,7 +1080,7 @@ static void sparc_pmu_start(struct perf_event *event, int flags) | |||
1080 | 1080 | ||
1081 | static void sparc_pmu_stop(struct perf_event *event, int flags) | 1081 | static void sparc_pmu_stop(struct perf_event *event, int flags) |
1082 | { | 1082 | { |
1083 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1083 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1084 | int idx = active_event_index(cpuc, event); | 1084 | int idx = active_event_index(cpuc, event); |
1085 | 1085 | ||
1086 | if (!(event->hw.state & PERF_HES_STOPPED)) { | 1086 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
@@ -1096,7 +1096,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags) | |||
1096 | 1096 | ||
1097 | static void sparc_pmu_del(struct perf_event *event, int _flags) | 1097 | static void sparc_pmu_del(struct perf_event *event, int _flags) |
1098 | { | 1098 | { |
1099 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1099 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1100 | unsigned long flags; | 1100 | unsigned long flags; |
1101 | int i; | 1101 | int i; |
1102 | 1102 | ||
@@ -1133,7 +1133,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1133 | 1133 | ||
1134 | static void sparc_pmu_read(struct perf_event *event) | 1134 | static void sparc_pmu_read(struct perf_event *event) |
1135 | { | 1135 | { |
1136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1137 | int idx = active_event_index(cpuc, event); | 1137 | int idx = active_event_index(cpuc, event); |
1138 | struct hw_perf_event *hwc = &event->hw; | 1138 | struct hw_perf_event *hwc = &event->hw; |
1139 | 1139 | ||
@@ -1145,7 +1145,7 @@ static DEFINE_MUTEX(pmc_grab_mutex); | |||
1145 | 1145 | ||
1146 | static void perf_stop_nmi_watchdog(void *unused) | 1146 | static void perf_stop_nmi_watchdog(void *unused) |
1147 | { | 1147 | { |
1148 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1148 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1149 | int i; | 1149 | int i; |
1150 | 1150 | ||
1151 | stop_nmi_watchdog(NULL); | 1151 | stop_nmi_watchdog(NULL); |
@@ -1356,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
1356 | 1356 | ||
1357 | static int sparc_pmu_add(struct perf_event *event, int ef_flags) | 1357 | static int sparc_pmu_add(struct perf_event *event, int ef_flags) |
1358 | { | 1358 | { |
1359 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1359 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1360 | int n0, ret = -EAGAIN; | 1360 | int n0, ret = -EAGAIN; |
1361 | unsigned long flags; | 1361 | unsigned long flags; |
1362 | 1362 | ||
@@ -1498,7 +1498,7 @@ static int sparc_pmu_event_init(struct perf_event *event) | |||
1498 | */ | 1498 | */ |
1499 | static void sparc_pmu_start_txn(struct pmu *pmu) | 1499 | static void sparc_pmu_start_txn(struct pmu *pmu) |
1500 | { | 1500 | { |
1501 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1501 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
1502 | 1502 | ||
1503 | perf_pmu_disable(pmu); | 1503 | perf_pmu_disable(pmu); |
1504 | cpuhw->group_flag |= PERF_EVENT_TXN; | 1504 | cpuhw->group_flag |= PERF_EVENT_TXN; |
@@ -1511,7 +1511,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) | |||
1511 | */ | 1511 | */ |
1512 | static void sparc_pmu_cancel_txn(struct pmu *pmu) | 1512 | static void sparc_pmu_cancel_txn(struct pmu *pmu) |
1513 | { | 1513 | { |
1514 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1514 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
1515 | 1515 | ||
1516 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 1516 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1517 | perf_pmu_enable(pmu); | 1517 | perf_pmu_enable(pmu); |
@@ -1524,13 +1524,13 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) | |||
1524 | */ | 1524 | */ |
1525 | static int sparc_pmu_commit_txn(struct pmu *pmu) | 1525 | static int sparc_pmu_commit_txn(struct pmu *pmu) |
1526 | { | 1526 | { |
1527 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1527 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1528 | int n; | 1528 | int n; |
1529 | 1529 | ||
1530 | if (!sparc_pmu) | 1530 | if (!sparc_pmu) |
1531 | return -EINVAL; | 1531 | return -EINVAL; |
1532 | 1532 | ||
1533 | cpuc = &__get_cpu_var(cpu_hw_events); | 1533 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1534 | n = cpuc->n_events; | 1534 | n = cpuc->n_events; |
1535 | if (check_excludes(cpuc->event, 0, n)) | 1535 | if (check_excludes(cpuc->event, 0, n)) |
1536 | return -EINVAL; | 1536 | return -EINVAL; |
@@ -1601,7 +1601,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1601 | 1601 | ||
1602 | regs = args->regs; | 1602 | regs = args->regs; |
1603 | 1603 | ||
1604 | cpuc = &__get_cpu_var(cpu_hw_events); | 1604 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1605 | 1605 | ||
1606 | /* If the PMU has the TOE IRQ enable bits, we need to do a | 1606 | /* If the PMU has the TOE IRQ enable bits, we need to do a |
1607 | * dummy write to the %pcr to clear the overflow bits and thus | 1607 | * dummy write to the %pcr to clear the overflow bits and thus |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index d5c319553fd0..9d98e5002a09 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -204,7 +204,7 @@ static void __init smp4d_ipi_init(void) | |||
204 | 204 | ||
205 | void sun4d_ipi_interrupt(void) | 205 | void sun4d_ipi_interrupt(void) |
206 | { | 206 | { |
207 | struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work); | 207 | struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work); |
208 | 208 | ||
209 | if (work->single) { | 209 | if (work->single) { |
210 | work->single = 0; | 210 | work->single = 0; |
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3fddf64c7fc6..59da0c3ea788 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c | |||
@@ -765,7 +765,7 @@ void setup_sparc64_timer(void) | |||
765 | : /* no outputs */ | 765 | : /* no outputs */ |
766 | : "r" (pstate)); | 766 | : "r" (pstate)); |
767 | 767 | ||
768 | sevt = &__get_cpu_var(sparc64_events); | 768 | sevt = this_cpu_ptr(&sparc64_events); |
769 | 769 | ||
770 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); | 770 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); |
771 | sevt->cpumask = cpumask_of(smp_processor_id()); | 771 | sevt->cpumask = cpumask_of(smp_processor_id()); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b89aba217e3b..9df2190c097e 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -52,14 +52,14 @@ out: | |||
52 | 52 | ||
53 | void arch_enter_lazy_mmu_mode(void) | 53 | void arch_enter_lazy_mmu_mode(void) |
54 | { | 54 | { |
55 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); | 55 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
56 | 56 | ||
57 | tb->active = 1; | 57 | tb->active = 1; |
58 | } | 58 | } |
59 | 59 | ||
60 | void arch_leave_lazy_mmu_mode(void) | 60 | void arch_leave_lazy_mmu_mode(void) |
61 | { | 61 | { |
62 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); | 62 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
63 | 63 | ||
64 | if (tb->tlb_nr) | 64 | if (tb->tlb_nr) |
65 | flush_tlb_pending(); | 65 | flush_tlb_pending(); |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 71af5747874d..60d62a292fce 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -140,12 +140,12 @@ extern unsigned int debug_smp_processor_id(void); | |||
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Read the set of maskable interrupts. | 142 | * Read the set of maskable interrupts. |
143 | * We avoid the preemption warning here via __this_cpu_ptr since even | 143 | * We avoid the preemption warning here via raw_cpu_ptr since even |
144 | * if irqs are already enabled, it's harmless to read the wrong cpu's | 144 | * if irqs are already enabled, it's harmless to read the wrong cpu's |
145 | * enabled mask. | 145 | * enabled mask. |
146 | */ | 146 | */ |
147 | #define arch_local_irqs_enabled() \ | 147 | #define arch_local_irqs_enabled() \ |
148 | (*__this_cpu_ptr(&interrupts_enabled_mask)) | 148 | (*raw_cpu_ptr(&interrupts_enabled_mask)) |
149 | 149 | ||
150 | /* Re-enable all maskable interrupts. */ | 150 | /* Re-enable all maskable interrupts. */ |
151 | #define arch_local_irq_enable() \ | 151 | #define arch_local_irq_enable() \ |
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index 4734215e2ad4..f67753db1f78 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -84,7 +84,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) | |||
84 | * clear any pending DMA interrupts. | 84 | * clear any pending DMA interrupts. |
85 | */ | 85 | */ |
86 | if (current->thread.tile_dma_state.enabled) | 86 | if (current->thread.tile_dma_state.enabled) |
87 | install_page_table(mm->pgd, __get_cpu_var(current_asid)); | 87 | install_page_table(mm->pgd, __this_cpu_read(current_asid)); |
88 | #endif | 88 | #endif |
89 | } | 89 | } |
90 | 90 | ||
@@ -96,12 +96,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
96 | int cpu = smp_processor_id(); | 96 | int cpu = smp_processor_id(); |
97 | 97 | ||
98 | /* Pick new ASID. */ | 98 | /* Pick new ASID. */ |
99 | int asid = __get_cpu_var(current_asid) + 1; | 99 | int asid = __this_cpu_read(current_asid) + 1; |
100 | if (asid > max_asid) { | 100 | if (asid > max_asid) { |
101 | asid = min_asid; | 101 | asid = min_asid; |
102 | local_flush_tlb(); | 102 | local_flush_tlb(); |
103 | } | 103 | } |
104 | __get_cpu_var(current_asid) = asid; | 104 | __this_cpu_write(current_asid, asid); |
105 | 105 | ||
106 | /* Clear cpu from the old mm, and set it in the new one. */ | 106 | /* Clear cpu from the old mm, and set it in the new one. */ |
107 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | 107 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 637f2ffaa5f5..ba85765e1436 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth); | |||
73 | */ | 73 | */ |
74 | void tile_dev_intr(struct pt_regs *regs, int intnum) | 74 | void tile_dev_intr(struct pt_regs *regs, int intnum) |
75 | { | 75 | { |
76 | int depth = __get_cpu_var(irq_depth)++; | 76 | int depth = __this_cpu_inc_return(irq_depth); |
77 | unsigned long original_irqs; | 77 | unsigned long original_irqs; |
78 | unsigned long remaining_irqs; | 78 | unsigned long remaining_irqs; |
79 | struct pt_regs *old_regs; | 79 | struct pt_regs *old_regs; |
@@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) | |||
120 | 120 | ||
121 | /* Count device irqs; Linux IPIs are counted elsewhere. */ | 121 | /* Count device irqs; Linux IPIs are counted elsewhere. */ |
122 | if (irq != IRQ_RESCHEDULE) | 122 | if (irq != IRQ_RESCHEDULE) |
123 | __get_cpu_var(irq_stat).irq_dev_intr_count++; | 123 | __this_cpu_inc(irq_stat.irq_dev_intr_count); |
124 | 124 | ||
125 | generic_handle_irq(irq); | 125 | generic_handle_irq(irq); |
126 | } | 126 | } |
@@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) | |||
130 | * including any that were reenabled during interrupt | 130 | * including any that were reenabled during interrupt |
131 | * handling. | 131 | * handling. |
132 | */ | 132 | */ |
133 | if (depth == 0) | 133 | if (depth == 1) |
134 | unmask_irqs(~__get_cpu_var(irq_disable_mask)); | 134 | unmask_irqs(~__this_cpu_read(irq_disable_mask)); |
135 | 135 | ||
136 | __get_cpu_var(irq_depth)--; | 136 | __this_cpu_dec(irq_depth); |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Track time spent against the current process again and | 139 | * Track time spent against the current process again and |
@@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) | |||
151 | static void tile_irq_chip_enable(struct irq_data *d) | 151 | static void tile_irq_chip_enable(struct irq_data *d) |
152 | { | 152 | { |
153 | get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); | 153 | get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); |
154 | if (__get_cpu_var(irq_depth) == 0) | 154 | if (__this_cpu_read(irq_depth) == 0) |
155 | unmask_irqs(1UL << d->irq); | 155 | unmask_irqs(1UL << d->irq); |
156 | put_cpu_var(irq_disable_mask); | 156 | put_cpu_var(irq_disable_mask); |
157 | } | 157 | } |
@@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d) | |||
197 | */ | 197 | */ |
198 | static void tile_irq_chip_eoi(struct irq_data *d) | 198 | static void tile_irq_chip_eoi(struct irq_data *d) |
199 | { | 199 | { |
200 | if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) | 200 | if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq))) |
201 | unmask_irqs(1UL << d->irq); | 201 | unmask_irqs(1UL << d->irq); |
202 | } | 202 | } |
203 | 203 | ||
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index 7867266f9716..ac950be1318e 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c | |||
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state); | |||
28 | void init_messaging(void) | 28 | void init_messaging(void) |
29 | { | 29 | { |
30 | /* Allocate storage for messages in kernel space */ | 30 | /* Allocate storage for messages in kernel space */ |
31 | HV_MsgState *state = &__get_cpu_var(msg_state); | 31 | HV_MsgState *state = this_cpu_ptr(&msg_state); |
32 | int rc = hv_register_message_state(state); | 32 | int rc = hv_register_message_state(state); |
33 | if (rc != HV_OK) | 33 | if (rc != HV_OK) |
34 | panic("hv_register_message_state: error %d", rc); | 34 | panic("hv_register_message_state: error %d", rc); |
@@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum) | |||
96 | struct hv_driver_cb *cb = | 96 | struct hv_driver_cb *cb = |
97 | (struct hv_driver_cb *)him->intarg; | 97 | (struct hv_driver_cb *)him->intarg; |
98 | cb->callback(cb, him->intdata); | 98 | cb->callback(cb, him->intdata); |
99 | __get_cpu_var(irq_stat).irq_hv_msg_count++; | 99 | __this_cpu_inc(irq_stat.irq_hv_msg_count); |
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c index 2bf6c9c135c1..bb509cee3b59 100644 --- a/arch/tile/kernel/perf_event.c +++ b/arch/tile/kernel/perf_event.c | |||
@@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event) | |||
590 | */ | 590 | */ |
591 | static void tile_pmu_stop(struct perf_event *event, int flags) | 591 | static void tile_pmu_stop(struct perf_event *event, int flags) |
592 | { | 592 | { |
593 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 593 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
594 | struct hw_perf_event *hwc = &event->hw; | 594 | struct hw_perf_event *hwc = &event->hw; |
595 | int idx = hwc->idx; | 595 | int idx = hwc->idx; |
596 | 596 | ||
@@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags) | |||
616 | */ | 616 | */ |
617 | static void tile_pmu_start(struct perf_event *event, int flags) | 617 | static void tile_pmu_start(struct perf_event *event, int flags) |
618 | { | 618 | { |
619 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 619 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
620 | int idx = event->hw.idx; | 620 | int idx = event->hw.idx; |
621 | 621 | ||
622 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 622 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
@@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags) | |||
650 | */ | 650 | */ |
651 | static int tile_pmu_add(struct perf_event *event, int flags) | 651 | static int tile_pmu_add(struct perf_event *event, int flags) |
652 | { | 652 | { |
653 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 653 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
654 | struct hw_perf_event *hwc; | 654 | struct hw_perf_event *hwc; |
655 | unsigned long mask; | 655 | unsigned long mask; |
656 | int b, max_cnt; | 656 | int b, max_cnt; |
@@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags) | |||
706 | */ | 706 | */ |
707 | static void tile_pmu_del(struct perf_event *event, int flags) | 707 | static void tile_pmu_del(struct perf_event *event, int flags) |
708 | { | 708 | { |
709 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 709 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
710 | int i; | 710 | int i; |
711 | 711 | ||
712 | /* | 712 | /* |
@@ -880,14 +880,14 @@ static struct pmu tilera_pmu = { | |||
880 | int tile_pmu_handle_irq(struct pt_regs *regs, int fault) | 880 | int tile_pmu_handle_irq(struct pt_regs *regs, int fault) |
881 | { | 881 | { |
882 | struct perf_sample_data data; | 882 | struct perf_sample_data data; |
883 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 883 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
884 | struct perf_event *event; | 884 | struct perf_event *event; |
885 | struct hw_perf_event *hwc; | 885 | struct hw_perf_event *hwc; |
886 | u64 val; | 886 | u64 val; |
887 | unsigned long status; | 887 | unsigned long status; |
888 | int bit; | 888 | int bit; |
889 | 889 | ||
890 | __get_cpu_var(perf_irqs)++; | 890 | __this_cpu_inc(perf_irqs); |
891 | 891 | ||
892 | if (!atomic_read(&tile_active_events)) | 892 | if (!atomic_read(&tile_active_events)) |
893 | return 0; | 893 | return 0; |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 16ed58948757..0050cbc1d9de 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -64,7 +64,7 @@ early_param("idle", idle_setup); | |||
64 | 64 | ||
65 | void arch_cpu_idle(void) | 65 | void arch_cpu_idle(void) |
66 | { | 66 | { |
67 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 67 | __this_cpu_write(irq_stat.idle_timestamp, jiffies); |
68 | _cpu_idle(); | 68 | _cpu_idle(); |
69 | } | 69 | } |
70 | 70 | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 112ababa9e55..b9736ded06f2 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -1218,7 +1218,8 @@ static void __init validate_hv(void) | |||
1218 | * various asid variables to their appropriate initial states. | 1218 | * various asid variables to their appropriate initial states. |
1219 | */ | 1219 | */ |
1220 | asid_range = hv_inquire_asid(0); | 1220 | asid_range = hv_inquire_asid(0); |
1221 | __get_cpu_var(current_asid) = min_asid = asid_range.start; | 1221 | min_asid = asid_range.start; |
1222 | __this_cpu_write(current_asid, min_asid); | ||
1222 | max_asid = asid_range.start + asid_range.size - 1; | 1223 | max_asid = asid_range.start + asid_range.size - 1; |
1223 | 1224 | ||
1224 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, | 1225 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, |
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index de07fa7d1315..6cb2ce31b5a2 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c | |||
@@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc); | |||
740 | 740 | ||
741 | void gx_singlestep_handle(struct pt_regs *regs, int fault_num) | 741 | void gx_singlestep_handle(struct pt_regs *regs, int fault_num) |
742 | { | 742 | { |
743 | unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); | 743 | unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc); |
744 | struct thread_info *info = (void *)current_thread_info(); | 744 | struct thread_info *info = (void *)current_thread_info(); |
745 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | 745 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); |
746 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | 746 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); |
@@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num) | |||
766 | 766 | ||
767 | void single_step_once(struct pt_regs *regs) | 767 | void single_step_once(struct pt_regs *regs) |
768 | { | 768 | { |
769 | unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); | 769 | unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc); |
770 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | 770 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); |
771 | 771 | ||
772 | *ss_pc = regs->pc; | 772 | *ss_pc = regs->pc; |
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index 19eaa62d456a..d3c4ed780ce2 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c | |||
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(flush_icache_range); | |||
189 | /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ | 189 | /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ |
190 | static irqreturn_t handle_reschedule_ipi(int irq, void *token) | 190 | static irqreturn_t handle_reschedule_ipi(int irq, void *token) |
191 | { | 191 | { |
192 | __get_cpu_var(irq_stat).irq_resched_count++; | 192 | __this_cpu_inc(irq_stat.irq_resched_count); |
193 | scheduler_ipi(); | 193 | scheduler_ipi(); |
194 | 194 | ||
195 | return IRQ_HANDLED; | 195 | return IRQ_HANDLED; |
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 732e9d138661..0d59a1b60c74 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c | |||
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void) | |||
41 | int cpu = smp_processor_id(); | 41 | int cpu = smp_processor_id(); |
42 | set_cpu_online(cpu, 1); | 42 | set_cpu_online(cpu, 1); |
43 | set_cpu_present(cpu, 1); | 43 | set_cpu_present(cpu, 1); |
44 | __get_cpu_var(cpu_state) = CPU_ONLINE; | 44 | __this_cpu_write(cpu_state, CPU_ONLINE); |
45 | 45 | ||
46 | init_messaging(); | 46 | init_messaging(); |
47 | } | 47 | } |
@@ -158,7 +158,7 @@ static void start_secondary(void) | |||
158 | /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ | 158 | /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ |
159 | 159 | ||
160 | /* Initialize the current asid for our first page table. */ | 160 | /* Initialize the current asid for our first page table. */ |
161 | __get_cpu_var(current_asid) = min_asid; | 161 | __this_cpu_write(current_asid, min_asid); |
162 | 162 | ||
163 | /* Set up this thread as another owner of the init_mm */ | 163 | /* Set up this thread as another owner of the init_mm */ |
164 | atomic_inc(&init_mm.mm_count); | 164 | atomic_inc(&init_mm.mm_count); |
@@ -201,7 +201,7 @@ void online_secondary(void) | |||
201 | notify_cpu_starting(smp_processor_id()); | 201 | notify_cpu_starting(smp_processor_id()); |
202 | 202 | ||
203 | set_cpu_online(smp_processor_id(), 1); | 203 | set_cpu_online(smp_processor_id(), 1); |
204 | __get_cpu_var(cpu_state) = CPU_ONLINE; | 204 | __this_cpu_write(cpu_state, CPU_ONLINE); |
205 | 205 | ||
206 | /* Set up tile-specific state for this cpu. */ | 206 | /* Set up tile-specific state for this cpu. */ |
207 | setup_cpu(0); | 207 | setup_cpu(0); |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index c1b362277fb7..b854a1cd0079 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = { | |||
162 | 162 | ||
163 | void setup_tile_timer(void) | 163 | void setup_tile_timer(void) |
164 | { | 164 | { |
165 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | 165 | struct clock_event_device *evt = this_cpu_ptr(&tile_timer); |
166 | 166 | ||
167 | /* Fill in fields that are speed-specific. */ | 167 | /* Fill in fields that are speed-specific. */ |
168 | clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); | 168 | clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); |
@@ -182,7 +182,7 @@ void setup_tile_timer(void) | |||
182 | void do_timer_interrupt(struct pt_regs *regs, int fault_num) | 182 | void do_timer_interrupt(struct pt_regs *regs, int fault_num) |
183 | { | 183 | { |
184 | struct pt_regs *old_regs = set_irq_regs(regs); | 184 | struct pt_regs *old_regs = set_irq_regs(regs); |
185 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | 185 | struct clock_event_device *evt = this_cpu_ptr(&tile_timer); |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Mask the timer interrupt here, since we are a oneshot timer | 188 | * Mask the timer interrupt here, since we are a oneshot timer |
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) | |||
194 | irq_enter(); | 194 | irq_enter(); |
195 | 195 | ||
196 | /* Track interrupt count. */ | 196 | /* Track interrupt count. */ |
197 | __get_cpu_var(irq_stat).irq_timer_count++; | 197 | __this_cpu_inc(irq_stat.irq_timer_count); |
198 | 198 | ||
199 | /* Call the generic timer handler */ | 199 | /* Call the generic timer handler */ |
200 | evt->event_handler(evt); | 200 | evt->event_handler(evt); |
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs) | |||
235 | * We do not have to disable preemption here as each core has the same | 235 | * We do not have to disable preemption here as each core has the same |
236 | * clock frequency. | 236 | * clock frequency. |
237 | */ | 237 | */ |
238 | struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); | 238 | struct clock_event_device *dev = raw_cpu_ptr(&tile_timer); |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * as in clocksource.h and x86's timer.h, we split the calculation | 241 | * as in clocksource.h and x86's timer.h, we split the calculation |
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 0dc218294770..6aa2f2625447 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c | |||
@@ -103,7 +103,7 @@ static void kmap_atomic_register(struct page *page, int type, | |||
103 | spin_lock(&_lock); | 103 | spin_lock(&_lock); |
104 | 104 | ||
105 | /* With interrupts disabled, now fill in the per-cpu info. */ | 105 | /* With interrupts disabled, now fill in the per-cpu info. */ |
106 | amp = &__get_cpu_var(amps).per_type[type]; | 106 | amp = this_cpu_ptr(&s.per_type[type]); |
107 | amp->page = page; | 107 | amp->page = page; |
108 | amp->cpu = smp_processor_id(); | 108 | amp->cpu = smp_processor_id(); |
109 | amp->va = va; | 109 | amp->va = va; |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index a092e393bd20..caa270165f86 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -593,14 +593,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
593 | interrupt_mask_set_mask(-1ULL); | 593 | interrupt_mask_set_mask(-1ULL); |
594 | rc = flush_and_install_context(__pa(pgtables), | 594 | rc = flush_and_install_context(__pa(pgtables), |
595 | init_pgprot((unsigned long)pgtables), | 595 | init_pgprot((unsigned long)pgtables), |
596 | __get_cpu_var(current_asid), | 596 | __this_cpu_read(current_asid), |
597 | cpumask_bits(my_cpu_mask)); | 597 | cpumask_bits(my_cpu_mask)); |
598 | interrupt_mask_restore_mask(irqmask); | 598 | interrupt_mask_restore_mask(irqmask); |
599 | BUG_ON(rc != 0); | 599 | BUG_ON(rc != 0); |
600 | 600 | ||
601 | /* Copy the page table back to the normal swapper_pg_dir. */ | 601 | /* Copy the page table back to the normal swapper_pg_dir. */ |
602 | memcpy(pgd_base, pgtables, sizeof(pgtables)); | 602 | memcpy(pgd_base, pgtables, sizeof(pgtables)); |
603 | __install_page_table(pgd_base, __get_cpu_var(current_asid), | 603 | __install_page_table(pgd_base, __this_cpu_read(current_asid), |
604 | swapper_pgprot); | 604 | swapper_pgprot); |
605 | 605 | ||
606 | /* | 606 | /* |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 4b528a970bd4..61fd18b83b6c 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void); | |||
97 | DECLARE_PER_CPU(int, debug_stack_usage); | 97 | DECLARE_PER_CPU(int, debug_stack_usage); |
98 | static inline void debug_stack_usage_inc(void) | 98 | static inline void debug_stack_usage_inc(void) |
99 | { | 99 | { |
100 | __get_cpu_var(debug_stack_usage)++; | 100 | __this_cpu_inc(debug_stack_usage); |
101 | } | 101 | } |
102 | static inline void debug_stack_usage_dec(void) | 102 | static inline void debug_stack_usage_dec(void) |
103 | { | 103 | { |
104 | __get_cpu_var(debug_stack_usage)--; | 104 | __this_cpu_dec(debug_stack_usage); |
105 | } | 105 | } |
106 | int is_debug_stack(unsigned long addr); | 106 | int is_debug_stack(unsigned long addr); |
107 | void debug_stack_set_zero(void); | 107 | void debug_stack_set_zero(void); |
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 85e13ccf15c4..d725382c2ae0 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -189,7 +189,7 @@ static inline int p4_ht_thread(int cpu) | |||
189 | { | 189 | { |
190 | #ifdef CONFIG_SMP | 190 | #ifdef CONFIG_SMP |
191 | if (smp_num_siblings == 2) | 191 | if (smp_num_siblings == 2) |
192 | return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); | 192 | return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map)); |
193 | #endif | 193 | #endif |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index c63e925fd6b7..a00ad8f2a657 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -164,7 +164,7 @@ struct uv_hub_info_s { | |||
164 | }; | 164 | }; |
165 | 165 | ||
166 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 166 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
167 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | 167 | #define uv_hub_info this_cpu_ptr(&__uv_hub_info) |
168 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | 168 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) |
169 | 169 | ||
170 | /* | 170 | /* |
@@ -601,16 +601,16 @@ struct uv_hub_nmi_s { | |||
601 | 601 | ||
602 | struct uv_cpu_nmi_s { | 602 | struct uv_cpu_nmi_s { |
603 | struct uv_hub_nmi_s *hub; | 603 | struct uv_hub_nmi_s *hub; |
604 | atomic_t state; | 604 | int state; |
605 | atomic_t pinging; | 605 | int pinging; |
606 | int queries; | 606 | int queries; |
607 | int pings; | 607 | int pings; |
608 | }; | 608 | }; |
609 | 609 | ||
610 | DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); | 610 | DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
611 | #define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi)) | 611 | |
612 | #define uv_hub_nmi (uv_cpu_nmi.hub) | 612 | #define uv_hub_nmi (uv_cpu_nmi.hub) |
613 | #define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu)) | 613 | #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu)) |
614 | #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) | 614 | #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) |
615 | 615 | ||
616 | /* uv_cpu_nmi_states */ | 616 | /* uv_cpu_nmi_states */ |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index af5b08ab3b71..5972b108f15a 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void) | |||
146 | static int __init apbt_clockevent_register(void) | 146 | static int __init apbt_clockevent_register(void) |
147 | { | 147 | { |
148 | struct sfi_timer_table_entry *mtmr; | 148 | struct sfi_timer_table_entry *mtmr; |
149 | struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); | 149 | struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); |
150 | 150 | ||
151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | 151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); |
152 | if (mtmr == NULL) { | 152 | if (mtmr == NULL) { |
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void) | |||
200 | if (!cpu) | 200 | if (!cpu) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | adev = &__get_cpu_var(cpu_apbt_dev); | 203 | adev = this_cpu_ptr(&cpu_apbt_dev); |
204 | if (!adev->timer) { | 204 | if (!adev->timer) { |
205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, | 205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, |
206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), | 206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 67760275544b..00853b254ab0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
561 | */ | 561 | */ |
562 | static void setup_APIC_timer(void) | 562 | static void setup_APIC_timer(void) |
563 | { | 563 | { |
564 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 564 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
565 | 565 | ||
566 | if (this_cpu_has(X86_FEATURE_ARAT)) { | 566 | if (this_cpu_has(X86_FEATURE_ARAT)) { |
567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) | |||
696 | 696 | ||
697 | static int __init calibrate_APIC_clock(void) | 697 | static int __init calibrate_APIC_clock(void) |
698 | { | 698 | { |
699 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 699 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
700 | void (*real_handler)(struct clock_event_device *dev); | 700 | void (*real_handler)(struct clock_event_device *dev); |
701 | unsigned long deltaj; | 701 | unsigned long deltaj; |
702 | long delta, deltatsc; | 702 | long delta, deltatsc; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 6ce600f9bc78..e658f21681c8 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -42,7 +42,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) | |||
42 | * We are to modify mask, so we need an own copy | 42 | * We are to modify mask, so we need an own copy |
43 | * and be sure it's manipulated with irq off. | 43 | * and be sure it's manipulated with irq off. |
44 | */ | 44 | */ |
45 | ipi_mask_ptr = __raw_get_cpu_var(ipi_mask); | 45 | ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask); |
46 | cpumask_copy(ipi_mask_ptr, mask); | 46 | cpumask_copy(ipi_mask_ptr, mask); |
47 | 47 | ||
48 | /* | 48 | /* |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3eff36f719fb..4b4f78c9ba19 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1200,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage); | |||
1200 | 1200 | ||
1201 | int is_debug_stack(unsigned long addr) | 1201 | int is_debug_stack(unsigned long addr) |
1202 | { | 1202 | { |
1203 | return __get_cpu_var(debug_stack_usage) || | 1203 | return __this_cpu_read(debug_stack_usage) || |
1204 | (addr <= __get_cpu_var(debug_stack_addr) && | 1204 | (addr <= __this_cpu_read(debug_stack_addr) && |
1205 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); | 1205 | addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); |
1206 | } | 1206 | } |
1207 | NOKPROBE_SYMBOL(is_debug_stack); | 1207 | NOKPROBE_SYMBOL(is_debug_stack); |
1208 | 1208 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 5ac2d1fb28bc..4cfba4371a71 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex); | |||
83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | 83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) |
84 | { | 84 | { |
85 | int cpu = smp_processor_id(); | 85 | int cpu = smp_processor_id(); |
86 | struct mce *m = &__get_cpu_var(injectm); | 86 | struct mce *m = this_cpu_ptr(&injectm); |
87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) | 87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) |
88 | return NMI_DONE; | 88 | return NMI_DONE; |
89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | |||
97 | static void mce_irq_ipi(void *info) | 97 | static void mce_irq_ipi(void *info) |
98 | { | 98 | { |
99 | int cpu = smp_processor_id(); | 99 | int cpu = smp_processor_id(); |
100 | struct mce *m = &__get_cpu_var(injectm); | 100 | struct mce *m = this_cpu_ptr(&injectm); |
101 | 101 | ||
102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && | 102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && |
103 | m->inject_flags & MCJ_EXCEPTION) { | 103 | m->inject_flags & MCJ_EXCEPTION) { |
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info) | |||
109 | /* Inject mce on current CPU */ | 109 | /* Inject mce on current CPU */ |
110 | static int raise_local(void) | 110 | static int raise_local(void) |
111 | { | 111 | { |
112 | struct mce *m = &__get_cpu_var(injectm); | 112 | struct mce *m = this_cpu_ptr(&injectm); |
113 | int context = MCJ_CTX(m->inject_flags); | 113 | int context = MCJ_CTX(m->inject_flags); |
114 | int ret = 0; | 114 | int ret = 0; |
115 | int cpu = m->extcpu; | 115 | int cpu = m->extcpu; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bd9ccda8087f..61a9668cebfd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
400 | 400 | ||
401 | if (offset < 0) | 401 | if (offset < 0) |
402 | return 0; | 402 | return 0; |
403 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | 403 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
404 | } | 404 | } |
405 | 405 | ||
406 | if (rdmsrl_safe(msr, &v)) { | 406 | if (rdmsrl_safe(msr, &v)) { |
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v) | |||
422 | int offset = msr_to_offset(msr); | 422 | int offset = msr_to_offset(msr); |
423 | 423 | ||
424 | if (offset >= 0) | 424 | if (offset >= 0) |
425 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | 425 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
426 | return; | 426 | return; |
427 | } | 427 | } |
428 | wrmsrl(msr, v); | 428 | wrmsrl(msr, v); |
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring); | |||
478 | /* Runs with CPU affinity in workqueue */ | 478 | /* Runs with CPU affinity in workqueue */ |
479 | static int mce_ring_empty(void) | 479 | static int mce_ring_empty(void) |
480 | { | 480 | { |
481 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 481 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
482 | 482 | ||
483 | return r->start == r->end; | 483 | return r->start == r->end; |
484 | } | 484 | } |
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn) | |||
490 | 490 | ||
491 | *pfn = 0; | 491 | *pfn = 0; |
492 | get_cpu(); | 492 | get_cpu(); |
493 | r = &__get_cpu_var(mce_ring); | 493 | r = this_cpu_ptr(&mce_ring); |
494 | if (r->start == r->end) | 494 | if (r->start == r->end) |
495 | goto out; | 495 | goto out; |
496 | *pfn = r->ring[r->start]; | 496 | *pfn = r->ring[r->start]; |
@@ -504,7 +504,7 @@ out: | |||
504 | /* Always runs in MCE context with preempt off */ | 504 | /* Always runs in MCE context with preempt off */ |
505 | static int mce_ring_add(unsigned long pfn) | 505 | static int mce_ring_add(unsigned long pfn) |
506 | { | 506 | { |
507 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 507 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
508 | unsigned next; | 508 | unsigned next; |
509 | 509 | ||
510 | next = (r->end + 1) % MCE_RING_SIZE; | 510 | next = (r->end + 1) % MCE_RING_SIZE; |
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c) | |||
526 | static void mce_schedule_work(void) | 526 | static void mce_schedule_work(void) |
527 | { | 527 | { |
528 | if (!mce_ring_empty()) | 528 | if (!mce_ring_empty()) |
529 | schedule_work(&__get_cpu_var(mce_work)); | 529 | schedule_work(this_cpu_ptr(&mce_work)); |
530 | } | 530 | } |
531 | 531 | ||
532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); | 532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
551 | return; | 551 | return; |
552 | } | 552 | } |
553 | 553 | ||
554 | irq_work_queue(&__get_cpu_var(mce_irq_work)); | 554 | irq_work_queue(this_cpu_ptr(&mce_irq_work)); |
555 | } | 555 | } |
556 | 556 | ||
557 | /* | 557 | /* |
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
1045 | 1045 | ||
1046 | mce_gather_info(&m, regs); | 1046 | mce_gather_info(&m, regs); |
1047 | 1047 | ||
1048 | final = &__get_cpu_var(mces_seen); | 1048 | final = this_cpu_ptr(&mces_seen); |
1049 | *final = m; | 1049 | *final = m; |
1050 | 1050 | ||
1051 | memset(valid_banks, 0, sizeof(valid_banks)); | 1051 | memset(valid_banks, 0, sizeof(valid_banks)); |
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) = | |||
1278 | 1278 | ||
1279 | static int cmc_error_seen(void) | 1279 | static int cmc_error_seen(void) |
1280 | { | 1280 | { |
1281 | unsigned long *v = &__get_cpu_var(mce_polled_error); | 1281 | unsigned long *v = this_cpu_ptr(&mce_polled_error); |
1282 | 1282 | ||
1283 | return test_and_clear_bit(0, v); | 1283 | return test_and_clear_bit(0, v); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static void mce_timer_fn(unsigned long data) | 1286 | static void mce_timer_fn(unsigned long data) |
1287 | { | 1287 | { |
1288 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1288 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1289 | unsigned long iv; | 1289 | unsigned long iv; |
1290 | int notify; | 1290 | int notify; |
1291 | 1291 | ||
1292 | WARN_ON(smp_processor_id() != data); | 1292 | WARN_ON(smp_processor_id() != data); |
1293 | 1293 | ||
1294 | if (mce_available(__this_cpu_ptr(&cpu_info))) { | 1294 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
1295 | machine_check_poll(MCP_TIMESTAMP, | 1295 | machine_check_poll(MCP_TIMESTAMP, |
1296 | &__get_cpu_var(mce_poll_banks)); | 1296 | this_cpu_ptr(&mce_poll_banks)); |
1297 | mce_intel_cmci_poll(); | 1297 | mce_intel_cmci_poll(); |
1298 | } | 1298 | } |
1299 | 1299 | ||
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data) | |||
1323 | */ | 1323 | */ |
1324 | void mce_timer_kick(unsigned long interval) | 1324 | void mce_timer_kick(unsigned long interval) |
1325 | { | 1325 | { |
1326 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1326 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1327 | unsigned long when = jiffies + interval; | 1327 | unsigned long when = jiffies + interval; |
1328 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1328 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1329 | 1329 | ||
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) | |||
1659 | 1659 | ||
1660 | static void __mcheck_cpu_init_timer(void) | 1660 | static void __mcheck_cpu_init_timer(void) |
1661 | { | 1661 | { |
1662 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1662 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1663 | unsigned int cpu = smp_processor_id(); | 1663 | unsigned int cpu = smp_processor_id(); |
1664 | 1664 | ||
1665 | setup_timer(t, mce_timer_fn, cpu); | 1665 | setup_timer(t, mce_timer_fn, cpu); |
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) | |||
1702 | __mcheck_cpu_init_generic(); | 1702 | __mcheck_cpu_init_generic(); |
1703 | __mcheck_cpu_init_vendor(c); | 1703 | __mcheck_cpu_init_vendor(c); |
1704 | __mcheck_cpu_init_timer(); | 1704 | __mcheck_cpu_init_timer(); |
1705 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | 1705 | INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); |
1706 | init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); | 1706 | init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); |
1707 | } | 1707 | } |
1708 | 1708 | ||
1709 | /* | 1709 | /* |
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = { | |||
1955 | static void __mce_disable_bank(void *arg) | 1955 | static void __mce_disable_bank(void *arg) |
1956 | { | 1956 | { |
1957 | int bank = *((int *)arg); | 1957 | int bank = *((int *)arg); |
1958 | __clear_bit(bank, __get_cpu_var(mce_poll_banks)); | 1958 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
1959 | cmci_disable_bank(bank); | 1959 | cmci_disable_bank(bank); |
1960 | } | 1960 | } |
1961 | 1961 | ||
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void) | |||
2065 | static void mce_syscore_resume(void) | 2065 | static void mce_syscore_resume(void) |
2066 | { | 2066 | { |
2067 | __mcheck_cpu_init_generic(); | 2067 | __mcheck_cpu_init_generic(); |
2068 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); | 2068 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
2069 | } | 2069 | } |
2070 | 2070 | ||
2071 | static struct syscore_ops mce_syscore_ops = { | 2071 | static struct syscore_ops mce_syscore_ops = { |
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = { | |||
2080 | 2080 | ||
2081 | static void mce_cpu_restart(void *data) | 2081 | static void mce_cpu_restart(void *data) |
2082 | { | 2082 | { |
2083 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2083 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2084 | return; | 2084 | return; |
2085 | __mcheck_cpu_init_generic(); | 2085 | __mcheck_cpu_init_generic(); |
2086 | __mcheck_cpu_init_timer(); | 2086 | __mcheck_cpu_init_timer(); |
@@ -2096,14 +2096,14 @@ static void mce_restart(void) | |||
2096 | /* Toggle features for corrected errors */ | 2096 | /* Toggle features for corrected errors */ |
2097 | static void mce_disable_cmci(void *data) | 2097 | static void mce_disable_cmci(void *data) |
2098 | { | 2098 | { |
2099 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2099 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2100 | return; | 2100 | return; |
2101 | cmci_clear(); | 2101 | cmci_clear(); |
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | static void mce_enable_ce(void *all) | 2104 | static void mce_enable_ce(void *all) |
2105 | { | 2105 | { |
2106 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2106 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2107 | return; | 2107 | return; |
2108 | cmci_reenable(); | 2108 | cmci_reenable(); |
2109 | cmci_recheck(); | 2109 | cmci_recheck(); |
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h) | |||
2336 | unsigned long action = *(unsigned long *)h; | 2336 | unsigned long action = *(unsigned long *)h; |
2337 | int i; | 2337 | int i; |
2338 | 2338 | ||
2339 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2339 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2340 | return; | 2340 | return; |
2341 | 2341 | ||
2342 | if (!(action & CPU_TASKS_FROZEN)) | 2342 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h) | |||
2354 | unsigned long action = *(unsigned long *)h; | 2354 | unsigned long action = *(unsigned long *)h; |
2355 | int i; | 2355 | int i; |
2356 | 2356 | ||
2357 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2357 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2358 | return; | 2358 | return; |
2359 | 2359 | ||
2360 | if (!(action & CPU_TASKS_FROZEN)) | 2360 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1e49f8f41276..5d4999f95aec 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void) | |||
310 | * event. | 310 | * event. |
311 | */ | 311 | */ |
312 | machine_check_poll(MCP_TIMESTAMP, | 312 | machine_check_poll(MCP_TIMESTAMP, |
313 | &__get_cpu_var(mce_poll_banks)); | 313 | this_cpu_ptr(&mce_poll_banks)); |
314 | 314 | ||
315 | if (high & MASK_OVERFLOW_HI) { | 315 | if (high & MASK_OVERFLOW_HI) { |
316 | rdmsrl(address, m.misc); | 316 | rdmsrl(address, m.misc); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..b3c97bafc123 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void) | |||
86 | { | 86 | { |
87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) | 87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) |
88 | return; | 88 | return; |
89 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 89 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
90 | } | 90 | } |
91 | 91 | ||
92 | void mce_intel_hcpu_update(unsigned long cpu) | 92 | void mce_intel_hcpu_update(unsigned long cpu) |
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void) | |||
145 | u64 val; | 145 | u64 val; |
146 | 146 | ||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
148 | owned = __get_cpu_var(mce_banks_owned); | 148 | owned = this_cpu_ptr(mce_banks_owned); |
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | 149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
151 | val &= ~MCI_CTL2_CMCI_EN; | 151 | val &= ~MCI_CTL2_CMCI_EN; |
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void) | |||
195 | { | 195 | { |
196 | if (cmci_storm_detect()) | 196 | if (cmci_storm_detect()) |
197 | return; | 197 | return; |
198 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 198 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
199 | mce_notify_irq(); | 199 | mce_notify_irq(); |
200 | } | 200 | } |
201 | 201 | ||
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void) | |||
206 | */ | 206 | */ |
207 | static void cmci_discover(int banks) | 207 | static void cmci_discover(int banks) |
208 | { | 208 | { |
209 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | 209 | unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | int i; | 211 | int i; |
212 | int bios_wrong_thresh = 0; | 212 | int bios_wrong_thresh = 0; |
@@ -228,7 +228,7 @@ static void cmci_discover(int banks) | |||
228 | /* Already owned by someone else? */ | 228 | /* Already owned by someone else? */ |
229 | if (val & MCI_CTL2_CMCI_EN) { | 229 | if (val & MCI_CTL2_CMCI_EN) { |
230 | clear_bit(i, owned); | 230 | clear_bit(i, owned); |
231 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 231 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
232 | continue; | 232 | continue; |
233 | } | 233 | } |
234 | 234 | ||
@@ -252,7 +252,7 @@ static void cmci_discover(int banks) | |||
252 | /* Did the enable bit stick? -- the bank supports CMCI */ | 252 | /* Did the enable bit stick? -- the bank supports CMCI */ |
253 | if (val & MCI_CTL2_CMCI_EN) { | 253 | if (val & MCI_CTL2_CMCI_EN) { |
254 | set_bit(i, owned); | 254 | set_bit(i, owned); |
255 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 255 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
256 | /* | 256 | /* |
257 | * We are able to set thresholds for some banks that | 257 | * We are able to set thresholds for some banks that |
258 | * had a threshold of 0. This means the BIOS has not | 258 | * had a threshold of 0. This means the BIOS has not |
@@ -263,7 +263,7 @@ static void cmci_discover(int banks) | |||
263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) | 263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) |
264 | bios_wrong_thresh = 1; | 264 | bios_wrong_thresh = 1; |
265 | } else { | 265 | } else { |
266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 266 | WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
@@ -284,10 +284,10 @@ void cmci_recheck(void) | |||
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | int banks; | 285 | int banks; |
286 | 286 | ||
287 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) | 287 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
288 | return; | 288 | return; |
289 | local_irq_save(flags); | 289 | local_irq_save(flags); |
290 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 290 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
291 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
292 | } | 292 | } |
293 | 293 | ||
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank) | |||
296 | { | 296 | { |
297 | u64 val; | 297 | u64 val; |
298 | 298 | ||
299 | if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) | 299 | if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) |
300 | return; | 300 | return; |
301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
302 | val &= ~MCI_CTL2_CMCI_EN; | 302 | val &= ~MCI_CTL2_CMCI_EN; |
303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | 303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
304 | __clear_bit(bank, __get_cpu_var(mce_banks_owned)); | 304 | __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | 307 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 16c73022306e..1b8299dd3d91 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
494 | 494 | ||
495 | void x86_pmu_disable_all(void) | 495 | void x86_pmu_disable_all(void) |
496 | { | 496 | { |
497 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 497 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
498 | int idx; | 498 | int idx; |
499 | 499 | ||
500 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 500 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -512,7 +512,7 @@ void x86_pmu_disable_all(void) | |||
512 | 512 | ||
513 | static void x86_pmu_disable(struct pmu *pmu) | 513 | static void x86_pmu_disable(struct pmu *pmu) |
514 | { | 514 | { |
515 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 515 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
516 | 516 | ||
517 | if (!x86_pmu_initialized()) | 517 | if (!x86_pmu_initialized()) |
518 | return; | 518 | return; |
@@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
529 | 529 | ||
530 | void x86_pmu_enable_all(int added) | 530 | void x86_pmu_enable_all(int added) |
531 | { | 531 | { |
532 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 532 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
533 | int idx; | 533 | int idx; |
534 | 534 | ||
535 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 535 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags); | |||
876 | 876 | ||
877 | static void x86_pmu_enable(struct pmu *pmu) | 877 | static void x86_pmu_enable(struct pmu *pmu) |
878 | { | 878 | { |
879 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 879 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
880 | struct perf_event *event; | 880 | struct perf_event *event; |
881 | struct hw_perf_event *hwc; | 881 | struct hw_perf_event *hwc; |
882 | int i, added = cpuc->n_added; | 882 | int i, added = cpuc->n_added; |
@@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event) | |||
1030 | */ | 1030 | */ |
1031 | static int x86_pmu_add(struct perf_event *event, int flags) | 1031 | static int x86_pmu_add(struct perf_event *event, int flags) |
1032 | { | 1032 | { |
1033 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1033 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1034 | struct hw_perf_event *hwc; | 1034 | struct hw_perf_event *hwc; |
1035 | int assign[X86_PMC_IDX_MAX]; | 1035 | int assign[X86_PMC_IDX_MAX]; |
1036 | int n, n0, ret; | 1036 | int n, n0, ret; |
@@ -1081,7 +1081,7 @@ out: | |||
1081 | 1081 | ||
1082 | static void x86_pmu_start(struct perf_event *event, int flags) | 1082 | static void x86_pmu_start(struct perf_event *event, int flags) |
1083 | { | 1083 | { |
1084 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1084 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1085 | int idx = event->hw.idx; | 1085 | int idx = event->hw.idx; |
1086 | 1086 | ||
1087 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 1087 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
@@ -1160,7 +1160,7 @@ void perf_event_print_debug(void) | |||
1160 | 1160 | ||
1161 | void x86_pmu_stop(struct perf_event *event, int flags) | 1161 | void x86_pmu_stop(struct perf_event *event, int flags) |
1162 | { | 1162 | { |
1163 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1163 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1164 | struct hw_perf_event *hwc = &event->hw; | 1164 | struct hw_perf_event *hwc = &event->hw; |
1165 | 1165 | ||
1166 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { | 1166 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { |
@@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) | |||
1182 | 1182 | ||
1183 | static void x86_pmu_del(struct perf_event *event, int flags) | 1183 | static void x86_pmu_del(struct perf_event *event, int flags) |
1184 | { | 1184 | { |
1185 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1185 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1186 | int i; | 1186 | int i; |
1187 | 1187 | ||
1188 | /* | 1188 | /* |
@@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1237 | int idx, handled = 0; | 1237 | int idx, handled = 0; |
1238 | u64 val; | 1238 | u64 val; |
1239 | 1239 | ||
1240 | cpuc = &__get_cpu_var(cpu_hw_events); | 1240 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1241 | 1241 | ||
1242 | /* | 1242 | /* |
1243 | * Some chipsets need to unmask the LVTPC in a particular spot | 1243 | * Some chipsets need to unmask the LVTPC in a particular spot |
@@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) | |||
1646 | */ | 1646 | */ |
1647 | static int x86_pmu_commit_txn(struct pmu *pmu) | 1647 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1648 | { | 1648 | { |
1649 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1649 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1650 | int assign[X86_PMC_IDX_MAX]; | 1650 | int assign[X86_PMC_IDX_MAX]; |
1651 | int n, ret; | 1651 | int n, ret; |
1652 | 1652 | ||
@@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
2005 | if (idx > GDT_ENTRIES) | 2005 | if (idx > GDT_ENTRIES) |
2006 | return 0; | 2006 | return 0; |
2007 | 2007 | ||
2008 | desc = __this_cpu_ptr(&gdt_page.gdt[0]); | 2008 | desc = raw_cpu_ptr(gdt_page.gdt); |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | return get_desc_base(desc + idx); | 2011 | return get_desc_base(desc + idx); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index beeb7cc07044..28926311aac1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void) | |||
699 | 699 | ||
700 | void amd_pmu_enable_virt(void) | 700 | void amd_pmu_enable_virt(void) |
701 | { | 701 | { |
702 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 702 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
703 | 703 | ||
704 | cpuc->perf_ctr_virt_mask = 0; | 704 | cpuc->perf_ctr_virt_mask = 0; |
705 | 705 | ||
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |||
711 | 711 | ||
712 | void amd_pmu_disable_virt(void) | 712 | void amd_pmu_disable_virt(void) |
713 | { | 713 | { |
714 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 714 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
715 | 715 | ||
716 | /* | 716 | /* |
717 | * We only mask out the Host-only bit so that host-only counting works | 717 | * We only mask out the Host-only bit so that host-only counting works |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3851def5057c..a73947c53b65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | |||
1174 | 1174 | ||
1175 | static void intel_pmu_disable_all(void) | 1175 | static void intel_pmu_disable_all(void) |
1176 | { | 1176 | { |
1177 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1177 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1178 | 1178 | ||
1179 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1179 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1180 | 1180 | ||
@@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void) | |||
1187 | 1187 | ||
1188 | static void intel_pmu_enable_all(int added) | 1188 | static void intel_pmu_enable_all(int added) |
1189 | { | 1189 | { |
1190 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1190 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1191 | 1191 | ||
1192 | intel_pmu_pebs_enable_all(); | 1192 | intel_pmu_pebs_enable_all(); |
1193 | intel_pmu_lbr_enable_all(); | 1193 | intel_pmu_lbr_enable_all(); |
@@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added) | |||
1221 | */ | 1221 | */ |
1222 | static void intel_pmu_nhm_workaround(void) | 1222 | static void intel_pmu_nhm_workaround(void) |
1223 | { | 1223 | { |
1224 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1224 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1225 | static const unsigned long nhm_magic[4] = { | 1225 | static const unsigned long nhm_magic[4] = { |
1226 | 0x4300B5, | 1226 | 0x4300B5, |
1227 | 0x4300D2, | 1227 | 0x4300D2, |
@@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event) | |||
1320 | static void intel_pmu_disable_event(struct perf_event *event) | 1320 | static void intel_pmu_disable_event(struct perf_event *event) |
1321 | { | 1321 | { |
1322 | struct hw_perf_event *hwc = &event->hw; | 1322 | struct hw_perf_event *hwc = &event->hw; |
1323 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1323 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1324 | 1324 | ||
1325 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1325 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1326 | intel_pmu_disable_bts(); | 1326 | intel_pmu_disable_bts(); |
@@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) | |||
1384 | static void intel_pmu_enable_event(struct perf_event *event) | 1384 | static void intel_pmu_enable_event(struct perf_event *event) |
1385 | { | 1385 | { |
1386 | struct hw_perf_event *hwc = &event->hw; | 1386 | struct hw_perf_event *hwc = &event->hw; |
1387 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1387 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1388 | 1388 | ||
1389 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1389 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1390 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 1390 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
@@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1478 | u64 status; | 1478 | u64 status; |
1479 | int handled; | 1479 | int handled; |
1480 | 1480 | ||
1481 | cpuc = &__get_cpu_var(cpu_hw_events); | 1481 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1482 | 1482 | ||
1483 | /* | 1483 | /* |
1484 | * No known reason to not always do late ACK, | 1484 | * No known reason to not always do late ACK, |
@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs); | |||
1910 | 1910 | ||
1911 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | 1911 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) |
1912 | { | 1912 | { |
1913 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1913 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1914 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1914 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1915 | 1915 | ||
1916 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; | 1916 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; |
@@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | |||
1931 | 1931 | ||
1932 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) | 1932 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) |
1933 | { | 1933 | { |
1934 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1934 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1935 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1935 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1936 | int idx; | 1936 | int idx; |
1937 | 1937 | ||
@@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event) | |||
1965 | 1965 | ||
1966 | static void core_pmu_enable_all(int added) | 1966 | static void core_pmu_enable_all(int added) |
1967 | { | 1967 | { |
1968 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1968 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1969 | int idx; | 1969 | int idx; |
1970 | 1970 | ||
1971 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1971 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b1553d05a5cb..46211bcc813e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config) | |||
474 | 474 | ||
475 | void intel_pmu_disable_bts(void) | 475 | void intel_pmu_disable_bts(void) |
476 | { | 476 | { |
477 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 477 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
478 | unsigned long debugctlmsr; | 478 | unsigned long debugctlmsr; |
479 | 479 | ||
480 | if (!cpuc->ds) | 480 | if (!cpuc->ds) |
@@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void) | |||
491 | 491 | ||
492 | int intel_pmu_drain_bts_buffer(void) | 492 | int intel_pmu_drain_bts_buffer(void) |
493 | { | 493 | { |
494 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 494 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
495 | struct debug_store *ds = cpuc->ds; | 495 | struct debug_store *ds = cpuc->ds; |
496 | struct bts_record { | 496 | struct bts_record { |
497 | u64 from; | 497 | u64 from; |
@@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) | |||
669 | 669 | ||
670 | void intel_pmu_pebs_enable(struct perf_event *event) | 670 | void intel_pmu_pebs_enable(struct perf_event *event) |
671 | { | 671 | { |
672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 672 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
673 | struct hw_perf_event *hwc = &event->hw; | 673 | struct hw_perf_event *hwc = &event->hw; |
674 | 674 | ||
675 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 675 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
@@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
684 | 684 | ||
685 | void intel_pmu_pebs_disable(struct perf_event *event) | 685 | void intel_pmu_pebs_disable(struct perf_event *event) |
686 | { | 686 | { |
687 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 687 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
688 | struct hw_perf_event *hwc = &event->hw; | 688 | struct hw_perf_event *hwc = &event->hw; |
689 | 689 | ||
690 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); | 690 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
@@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) | |||
702 | 702 | ||
703 | void intel_pmu_pebs_enable_all(void) | 703 | void intel_pmu_pebs_enable_all(void) |
704 | { | 704 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 705 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
706 | 706 | ||
707 | if (cpuc->pebs_enabled) | 707 | if (cpuc->pebs_enabled) |
708 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 708 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
@@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void) | |||
710 | 710 | ||
711 | void intel_pmu_pebs_disable_all(void) | 711 | void intel_pmu_pebs_disable_all(void) |
712 | { | 712 | { |
713 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 713 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
714 | 714 | ||
715 | if (cpuc->pebs_enabled) | 715 | if (cpuc->pebs_enabled) |
716 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); | 716 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
@@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void) | |||
718 | 718 | ||
719 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | 719 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
720 | { | 720 | { |
721 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 721 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
722 | unsigned long from = cpuc->lbr_entries[0].from; | 722 | unsigned long from = cpuc->lbr_entries[0].from; |
723 | unsigned long old_to, to = cpuc->lbr_entries[0].to; | 723 | unsigned long old_to, to = cpuc->lbr_entries[0].to; |
724 | unsigned long ip = regs->ip; | 724 | unsigned long ip = regs->ip; |
@@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
829 | * We cast to the biggest pebs_record but are careful not to | 829 | * We cast to the biggest pebs_record but are careful not to |
830 | * unconditionally access the 'extra' entries. | 830 | * unconditionally access the 'extra' entries. |
831 | */ | 831 | */ |
832 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 832 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
833 | struct pebs_record_hsw *pebs = __pebs; | 833 | struct pebs_record_hsw *pebs = __pebs; |
834 | struct perf_sample_data data; | 834 | struct perf_sample_data data; |
835 | struct pt_regs regs; | 835 | struct pt_regs regs; |
@@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
916 | 916 | ||
917 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 917 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
918 | { | 918 | { |
919 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 919 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
920 | struct debug_store *ds = cpuc->ds; | 920 | struct debug_store *ds = cpuc->ds; |
921 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ | 921 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ |
922 | struct pebs_record_core *at, *top; | 922 | struct pebs_record_core *at, *top; |
@@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
957 | 957 | ||
958 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 958 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
959 | { | 959 | { |
960 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 960 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
961 | struct debug_store *ds = cpuc->ds; | 961 | struct debug_store *ds = cpuc->ds; |
962 | struct perf_event *event = NULL; | 962 | struct perf_event *event = NULL; |
963 | void *at, *top; | 963 | void *at, *top; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 4af10617de33..45fa730a5283 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |||
133 | static void __intel_pmu_lbr_enable(void) | 133 | static void __intel_pmu_lbr_enable(void) |
134 | { | 134 | { |
135 | u64 debugctl; | 135 | u64 debugctl; |
136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
137 | 137 | ||
138 | if (cpuc->lbr_sel) | 138 | if (cpuc->lbr_sel) |
139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); | 139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); |
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void) | |||
183 | 183 | ||
184 | void intel_pmu_lbr_enable(struct perf_event *event) | 184 | void intel_pmu_lbr_enable(struct perf_event *event) |
185 | { | 185 | { |
186 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 186 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
187 | 187 | ||
188 | if (!x86_pmu.lbr_nr) | 188 | if (!x86_pmu.lbr_nr) |
189 | return; | 189 | return; |
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
203 | 203 | ||
204 | void intel_pmu_lbr_disable(struct perf_event *event) | 204 | void intel_pmu_lbr_disable(struct perf_event *event) |
205 | { | 205 | { |
206 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 206 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
207 | 207 | ||
208 | if (!x86_pmu.lbr_nr) | 208 | if (!x86_pmu.lbr_nr) |
209 | return; | 209 | return; |
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event) | |||
220 | 220 | ||
221 | void intel_pmu_lbr_enable_all(void) | 221 | void intel_pmu_lbr_enable_all(void) |
222 | { | 222 | { |
223 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 223 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
224 | 224 | ||
225 | if (cpuc->lbr_users) | 225 | if (cpuc->lbr_users) |
226 | __intel_pmu_lbr_enable(); | 226 | __intel_pmu_lbr_enable(); |
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void) | |||
228 | 228 | ||
229 | void intel_pmu_lbr_disable_all(void) | 229 | void intel_pmu_lbr_disable_all(void) |
230 | { | 230 | { |
231 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 231 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
232 | 232 | ||
233 | if (cpuc->lbr_users) | 233 | if (cpuc->lbr_users) |
234 | __intel_pmu_lbr_disable(); | 234 | __intel_pmu_lbr_disable(); |
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
332 | 332 | ||
333 | void intel_pmu_lbr_read(void) | 333 | void intel_pmu_lbr_read(void) |
334 | { | 334 | { |
335 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 335 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
336 | 336 | ||
337 | if (!cpuc->lbr_users) | 337 | if (!cpuc->lbr_users) |
338 | return; | 338 | return; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 619f7699487a..d64f275fe274 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v) | |||
135 | * or use ldexp(count, -32). | 135 | * or use ldexp(count, -32). |
136 | * Watts = Joules/Time delta | 136 | * Watts = Joules/Time delta |
137 | */ | 137 | */ |
138 | return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); | 138 | return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); |
139 | } | 139 | } |
140 | 140 | ||
141 | static u64 rapl_event_update(struct perf_event *event) | 141 | static u64 rapl_event_update(struct perf_event *event) |
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu) | |||
187 | 187 | ||
188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) | 188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) |
189 | { | 189 | { |
190 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 190 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
191 | struct perf_event *event; | 191 | struct perf_event *event; |
192 | unsigned long flags; | 192 | unsigned long flags; |
193 | 193 | ||
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, | |||
234 | 234 | ||
235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) | 235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) |
236 | { | 236 | { |
237 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 237 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
238 | unsigned long flags; | 238 | unsigned long flags; |
239 | 239 | ||
240 | spin_lock_irqsave(&pmu->lock, flags); | 240 | spin_lock_irqsave(&pmu->lock, flags); |
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) | |||
244 | 244 | ||
245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) | 245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) |
246 | { | 246 | { |
247 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 247 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
248 | struct hw_perf_event *hwc = &event->hw; | 248 | struct hw_perf_event *hwc = &event->hw; |
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) | |||
278 | 278 | ||
279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) | 279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) |
280 | { | 280 | { |
281 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 281 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
282 | struct hw_perf_event *hwc = &event->hw; | 282 | struct hw_perf_event *hwc = &event->hw; |
283 | unsigned long flags; | 283 | unsigned long flags; |
284 | 284 | ||
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void) | |||
696 | return -1; | 696 | return -1; |
697 | } | 697 | } |
698 | 698 | ||
699 | pmu = __get_cpu_var(rapl_pmu); | 699 | pmu = __this_cpu_read(rapl_pmu); |
700 | 700 | ||
701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," | 701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," |
702 | " API unit is 2^-32 Joules," | 702 | " API unit is 2^-32 Joules," |
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c index 838fa8772c62..5b0c232d1ee6 100644 --- a/arch/x86/kernel/cpu/perf_event_knc.c +++ b/arch/x86/kernel/cpu/perf_event_knc.c | |||
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs) | |||
217 | int bit, loops; | 217 | int bit, loops; |
218 | u64 status; | 218 | u64 status; |
219 | 219 | ||
220 | cpuc = &__get_cpu_var(cpu_hw_events); | 220 | cpuc = this_cpu_ptr(&cpu_hw_events); |
221 | 221 | ||
222 | knc_pmu_disable_all(); | 222 | knc_pmu_disable_all(); |
223 | 223 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 5d466b7d8609..f2e56783af3d 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
915 | 915 | ||
916 | static void p4_pmu_disable_all(void) | 916 | static void p4_pmu_disable_all(void) |
917 | { | 917 | { |
918 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 918 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
919 | int idx; | 919 | int idx; |
920 | 920 | ||
921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
984 | 984 | ||
985 | static void p4_pmu_enable_all(int added) | 985 | static void p4_pmu_enable_all(int added) |
986 | { | 986 | { |
987 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
988 | int idx; | 988 | int idx; |
989 | 989 | ||
990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
1004 | int idx, handled = 0; | 1004 | int idx, handled = 0; |
1005 | u64 val; | 1005 | u64 val; |
1006 | 1006 | ||
1007 | cpuc = &__get_cpu_var(cpu_hw_events); | 1007 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1008 | 1008 | ||
1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1010 | int overflow; | 1010 | int overflow; |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 5f9cf20cdb68..3d5fb509bdeb 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
108 | int i; | 108 | int i; |
109 | 109 | ||
110 | for (i = 0; i < HBP_NUM; i++) { | 110 | for (i = 0; i < HBP_NUM; i++) { |
111 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 111 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
112 | 112 | ||
113 | if (!*slot) { | 113 | if (!*slot) { |
114 | *slot = bp; | 114 | *slot = bp; |
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
122 | set_debugreg(info->address, i); | 122 | set_debugreg(info->address, i); |
123 | __this_cpu_write(cpu_debugreg[i], info->address); | 123 | __this_cpu_write(cpu_debugreg[i], info->address); |
124 | 124 | ||
125 | dr7 = &__get_cpu_var(cpu_dr7); | 125 | dr7 = this_cpu_ptr(&cpu_dr7); |
126 | *dr7 |= encode_dr7(i, info->len, info->type); | 126 | *dr7 |= encode_dr7(i, info->len, info->type); |
127 | 127 | ||
128 | set_debugreg(*dr7, 7); | 128 | set_debugreg(*dr7, 7); |
@@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
146 | int i; | 146 | int i; |
147 | 147 | ||
148 | for (i = 0; i < HBP_NUM; i++) { | 148 | for (i = 0; i < HBP_NUM; i++) { |
149 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 149 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
150 | 150 | ||
151 | if (*slot == bp) { | 151 | if (*slot == bp) { |
152 | *slot = NULL; | 152 | *slot = NULL; |
@@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | 157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) |
158 | return; | 158 | return; |
159 | 159 | ||
160 | dr7 = &__get_cpu_var(cpu_dr7); | 160 | dr7 = this_cpu_ptr(&cpu_dr7); |
161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); | 161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); |
162 | 162 | ||
163 | set_debugreg(*dr7, 7); | 163 | set_debugreg(*dr7, 7); |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 4d1c746892eb..e4b503d5558c 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
52 | regs->sp <= curbase + THREAD_SIZE) | 52 | regs->sp <= curbase + THREAD_SIZE) |
53 | return; | 53 | return; |
54 | 54 | ||
55 | irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + | 55 | irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + |
56 | STACK_TOP_MARGIN; | 56 | STACK_TOP_MARGIN; |
57 | irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); | 57 | irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); |
58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) | 58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) |
59 | return; | 59 | return; |
60 | 60 | ||
61 | oist = &__get_cpu_var(orig_ist); | 61 | oist = this_cpu_ptr(&orig_ist); |
62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; | 62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; |
63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; | 63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; |
64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) | 64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 95c3cb16af3e..f6945bef2cd1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -244,9 +244,9 @@ u32 kvm_read_and_reset_pf_reason(void) | |||
244 | { | 244 | { |
245 | u32 reason = 0; | 245 | u32 reason = 0; |
246 | 246 | ||
247 | if (__get_cpu_var(apf_reason).enabled) { | 247 | if (__this_cpu_read(apf_reason.enabled)) { |
248 | reason = __get_cpu_var(apf_reason).reason; | 248 | reason = __this_cpu_read(apf_reason.reason); |
249 | __get_cpu_var(apf_reason).reason = 0; | 249 | __this_cpu_write(apf_reason.reason, 0); |
250 | } | 250 | } |
251 | 251 | ||
252 | return reason; | 252 | return reason; |
@@ -319,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |||
319 | * there's no need for lock or memory barriers. | 319 | * there's no need for lock or memory barriers. |
320 | * An optimization barrier is implied in apic write. | 320 | * An optimization barrier is implied in apic write. |
321 | */ | 321 | */ |
322 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) | 322 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) |
323 | return; | 323 | return; |
324 | apic_write(APIC_EOI, APIC_EOI_ACK); | 324 | apic_write(APIC_EOI, APIC_EOI_ACK); |
325 | } | 325 | } |
@@ -330,13 +330,13 @@ void kvm_guest_cpu_init(void) | |||
330 | return; | 330 | return; |
331 | 331 | ||
332 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | 332 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { |
333 | u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); | 333 | u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); |
334 | 334 | ||
335 | #ifdef CONFIG_PREEMPT | 335 | #ifdef CONFIG_PREEMPT |
336 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | 336 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; |
337 | #endif | 337 | #endif |
338 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); | 338 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
339 | __get_cpu_var(apf_reason).enabled = 1; | 339 | __this_cpu_write(apf_reason.enabled, 1); |
340 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 340 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
341 | smp_processor_id()); | 341 | smp_processor_id()); |
342 | } | 342 | } |
@@ -345,8 +345,8 @@ void kvm_guest_cpu_init(void) | |||
345 | unsigned long pa; | 345 | unsigned long pa; |
346 | /* Size alignment is implied but just to make it explicit. */ | 346 | /* Size alignment is implied but just to make it explicit. */ |
347 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | 347 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); |
348 | __get_cpu_var(kvm_apic_eoi) = 0; | 348 | __this_cpu_write(kvm_apic_eoi, 0); |
349 | pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) | 349 | pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) |
350 | | KVM_MSR_ENABLED; | 350 | | KVM_MSR_ENABLED; |
351 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); | 351 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
352 | } | 352 | } |
@@ -357,11 +357,11 @@ void kvm_guest_cpu_init(void) | |||
357 | 357 | ||
358 | static void kvm_pv_disable_apf(void) | 358 | static void kvm_pv_disable_apf(void) |
359 | { | 359 | { |
360 | if (!__get_cpu_var(apf_reason).enabled) | 360 | if (!__this_cpu_read(apf_reason.enabled)) |
361 | return; | 361 | return; |
362 | 362 | ||
363 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | 363 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); |
364 | __get_cpu_var(apf_reason).enabled = 0; | 364 | __this_cpu_write(apf_reason.enabled, 0); |
365 | 365 | ||
366 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | 366 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", |
367 | smp_processor_id()); | 367 | smp_processor_id()); |
@@ -724,7 +724,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | |||
724 | if (in_nmi()) | 724 | if (in_nmi()) |
725 | return; | 725 | return; |
726 | 726 | ||
727 | w = &__get_cpu_var(klock_waiting); | 727 | w = this_cpu_ptr(&klock_waiting); |
728 | cpu = smp_processor_id(); | 728 | cpu = smp_processor_id(); |
729 | start = spin_time_start(); | 729 | start = spin_time_start(); |
730 | 730 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f7f6a4a157a6..65510f624dfe 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -670,7 +670,7 @@ static int svm_hardware_enable(void) | |||
670 | 670 | ||
671 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { | 671 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
672 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 672 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
673 | __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; | 673 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
674 | } | 674 | } |
675 | 675 | ||
676 | 676 | ||
@@ -1313,8 +1313,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1313 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | 1313 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
1314 | 1314 | ||
1315 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && | 1315 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && |
1316 | svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { | 1316 | svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) { |
1317 | __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; | 1317 | __this_cpu_write(current_tsc_ratio, svm->tsc_ratio); |
1318 | wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); | 1318 | wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); |
1319 | } | 1319 | } |
1320 | } | 1320 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 04fa1b8298c8..d9dcfa27aa84 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1626,7 +1626,7 @@ static void reload_tss(void) | |||
1626 | /* | 1626 | /* |
1627 | * VT restores TR but not its size. Useless. | 1627 | * VT restores TR but not its size. Useless. |
1628 | */ | 1628 | */ |
1629 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1629 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1630 | struct desc_struct *descs; | 1630 | struct desc_struct *descs; |
1631 | 1631 | ||
1632 | descs = (void *)gdt->address; | 1632 | descs = (void *)gdt->address; |
@@ -1672,7 +1672,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) | |||
1672 | 1672 | ||
1673 | static unsigned long segment_base(u16 selector) | 1673 | static unsigned long segment_base(u16 selector) |
1674 | { | 1674 | { |
1675 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1675 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1676 | struct desc_struct *d; | 1676 | struct desc_struct *d; |
1677 | unsigned long table_base; | 1677 | unsigned long table_base; |
1678 | unsigned long v; | 1678 | unsigned long v; |
@@ -1802,7 +1802,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1802 | */ | 1802 | */ |
1803 | if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) | 1803 | if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) |
1804 | stts(); | 1804 | stts(); |
1805 | load_gdt(&__get_cpu_var(host_gdt)); | 1805 | load_gdt(this_cpu_ptr(&host_gdt)); |
1806 | } | 1806 | } |
1807 | 1807 | ||
1808 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | 1808 | static void vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -1832,7 +1832,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1832 | } | 1832 | } |
1833 | 1833 | ||
1834 | if (vmx->loaded_vmcs->cpu != cpu) { | 1834 | if (vmx->loaded_vmcs->cpu != cpu) { |
1835 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1835 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1836 | unsigned long sysenter_esp; | 1836 | unsigned long sysenter_esp; |
1837 | 1837 | ||
1838 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | 1838 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
@@ -2771,7 +2771,7 @@ static int hardware_enable(void) | |||
2771 | ept_sync_global(); | 2771 | ept_sync_global(); |
2772 | } | 2772 | } |
2773 | 2773 | ||
2774 | native_store_gdt(&__get_cpu_var(host_gdt)); | 2774 | native_store_gdt(this_cpu_ptr(&host_gdt)); |
2775 | 2775 | ||
2776 | return 0; | 2776 | return 0; |
2777 | } | 2777 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5430e4b0af29..34c8f94331f8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1559,7 +1559,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1559 | 1559 | ||
1560 | /* Keep irq disabled to prevent changes to the clock */ | 1560 | /* Keep irq disabled to prevent changes to the clock */ |
1561 | local_irq_save(flags); | 1561 | local_irq_save(flags); |
1562 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | 1562 | this_tsc_khz = __this_cpu_read(cpu_tsc_khz); |
1563 | if (unlikely(this_tsc_khz == 0)) { | 1563 | if (unlikely(this_tsc_khz == 0)) { |
1564 | local_irq_restore(flags); | 1564 | local_irq_restore(flags); |
1565 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | 1565 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index dd89a13f1051..b4f2e7e9e907 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -140,7 +140,7 @@ static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); | |||
140 | 140 | ||
141 | bool kmemcheck_active(struct pt_regs *regs) | 141 | bool kmemcheck_active(struct pt_regs *regs) |
142 | { | 142 | { |
143 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 143 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
144 | 144 | ||
145 | return data->balance > 0; | 145 | return data->balance > 0; |
146 | } | 146 | } |
@@ -148,7 +148,7 @@ bool kmemcheck_active(struct pt_regs *regs) | |||
148 | /* Save an address that needs to be shown/hidden */ | 148 | /* Save an address that needs to be shown/hidden */ |
149 | static void kmemcheck_save_addr(unsigned long addr) | 149 | static void kmemcheck_save_addr(unsigned long addr) |
150 | { | 150 | { |
151 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 151 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
152 | 152 | ||
153 | BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); | 153 | BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); |
154 | data->addr[data->n_addrs++] = addr; | 154 | data->addr[data->n_addrs++] = addr; |
@@ -156,7 +156,7 @@ static void kmemcheck_save_addr(unsigned long addr) | |||
156 | 156 | ||
157 | static unsigned int kmemcheck_show_all(void) | 157 | static unsigned int kmemcheck_show_all(void) |
158 | { | 158 | { |
159 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 159 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
160 | unsigned int i; | 160 | unsigned int i; |
161 | unsigned int n; | 161 | unsigned int n; |
162 | 162 | ||
@@ -169,7 +169,7 @@ static unsigned int kmemcheck_show_all(void) | |||
169 | 169 | ||
170 | static unsigned int kmemcheck_hide_all(void) | 170 | static unsigned int kmemcheck_hide_all(void) |
171 | { | 171 | { |
172 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 172 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
173 | unsigned int i; | 173 | unsigned int i; |
174 | unsigned int n; | 174 | unsigned int n; |
175 | 175 | ||
@@ -185,7 +185,7 @@ static unsigned int kmemcheck_hide_all(void) | |||
185 | */ | 185 | */ |
186 | void kmemcheck_show(struct pt_regs *regs) | 186 | void kmemcheck_show(struct pt_regs *regs) |
187 | { | 187 | { |
188 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 188 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
189 | 189 | ||
190 | BUG_ON(!irqs_disabled()); | 190 | BUG_ON(!irqs_disabled()); |
191 | 191 | ||
@@ -226,7 +226,7 @@ void kmemcheck_show(struct pt_regs *regs) | |||
226 | */ | 226 | */ |
227 | void kmemcheck_hide(struct pt_regs *regs) | 227 | void kmemcheck_hide(struct pt_regs *regs) |
228 | { | 228 | { |
229 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 229 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
230 | int n; | 230 | int n; |
231 | 231 | ||
232 | BUG_ON(!irqs_disabled()); | 232 | BUG_ON(!irqs_disabled()); |
@@ -528,7 +528,7 @@ static void kmemcheck_access(struct pt_regs *regs, | |||
528 | const uint8_t *insn_primary; | 528 | const uint8_t *insn_primary; |
529 | unsigned int size; | 529 | unsigned int size; |
530 | 530 | ||
531 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 531 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
532 | 532 | ||
533 | /* Recursive fault -- ouch. */ | 533 | /* Recursive fault -- ouch. */ |
534 | if (data->busy) { | 534 | if (data->busy) { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 379e8bd0deea..1d2e6392f5fa 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -64,11 +64,11 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | |||
64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) | 64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) |
65 | { | 65 | { |
66 | if (ctr_running) | 66 | if (ctr_running) |
67 | model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); | 67 | model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs)); |
68 | else if (!nmi_enabled) | 68 | else if (!nmi_enabled) |
69 | return NMI_DONE; | 69 | return NMI_DONE; |
70 | else | 70 | else |
71 | model->stop(&__get_cpu_var(cpu_msrs)); | 71 | model->stop(this_cpu_ptr(&cpu_msrs)); |
72 | return NMI_HANDLED; | 72 | return NMI_HANDLED; |
73 | } | 73 | } |
74 | 74 | ||
@@ -91,7 +91,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) | |||
91 | 91 | ||
92 | static void nmi_cpu_start(void *dummy) | 92 | static void nmi_cpu_start(void *dummy) |
93 | { | 93 | { |
94 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 94 | struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); |
95 | if (!msrs->controls) | 95 | if (!msrs->controls) |
96 | WARN_ON_ONCE(1); | 96 | WARN_ON_ONCE(1); |
97 | else | 97 | else |
@@ -111,7 +111,7 @@ static int nmi_start(void) | |||
111 | 111 | ||
112 | static void nmi_cpu_stop(void *dummy) | 112 | static void nmi_cpu_stop(void *dummy) |
113 | { | 113 | { |
114 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 114 | struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); |
115 | if (!msrs->controls) | 115 | if (!msrs->controls) |
116 | WARN_ON_ONCE(1); | 116 | WARN_ON_ONCE(1); |
117 | else | 117 | else |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 98ab13058f89..ad1d91f475ab 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -372,7 +372,7 @@ static unsigned int get_stagger(void) | |||
372 | { | 372 | { |
373 | #ifdef CONFIG_SMP | 373 | #ifdef CONFIG_SMP |
374 | int cpu = smp_processor_id(); | 374 | int cpu = smp_processor_id(); |
375 | return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); | 375 | return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map)); |
376 | #endif | 376 | #endif |
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index c89c93320c12..c6b146e67116 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c | |||
@@ -63,8 +63,8 @@ | |||
63 | 63 | ||
64 | static struct uv_hub_nmi_s **uv_hub_nmi_list; | 64 | static struct uv_hub_nmi_s **uv_hub_nmi_list; |
65 | 65 | ||
66 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); | 66 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
67 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi); | 67 | EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi); |
68 | 68 | ||
69 | static unsigned long nmi_mmr; | 69 | static unsigned long nmi_mmr; |
70 | static unsigned long nmi_mmr_clear; | 70 | static unsigned long nmi_mmr_clear; |
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) | |||
215 | int nmi = 0; | 215 | int nmi = 0; |
216 | 216 | ||
217 | local64_inc(&uv_nmi_count); | 217 | local64_inc(&uv_nmi_count); |
218 | uv_cpu_nmi.queries++; | 218 | this_cpu_inc(uv_cpu_nmi.queries); |
219 | 219 | ||
220 | do { | 220 | do { |
221 | nmi = atomic_read(&hub_nmi->in_nmi); | 221 | nmi = atomic_read(&hub_nmi->in_nmi); |
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void) | |||
293 | int cpu; | 293 | int cpu; |
294 | 294 | ||
295 | for_each_cpu(cpu, uv_nmi_cpu_mask) | 295 | for_each_cpu(cpu, uv_nmi_cpu_mask) |
296 | atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1); | 296 | uv_cpu_nmi_per(cpu).pinging = 1; |
297 | 297 | ||
298 | apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); | 298 | apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); |
299 | } | 299 | } |
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void) | |||
304 | int cpu; | 304 | int cpu; |
305 | 305 | ||
306 | for_each_cpu(cpu, uv_nmi_cpu_mask) { | 306 | for_each_cpu(cpu, uv_nmi_cpu_mask) { |
307 | atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0); | 307 | uv_cpu_nmi_per(cpu).pinging = 0; |
308 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT); | 308 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; |
309 | cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); | 309 | cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); |
310 | } | 310 | } |
311 | } | 311 | } |
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first) | |||
328 | int loop_delay = uv_nmi_loop_delay; | 328 | int loop_delay = uv_nmi_loop_delay; |
329 | 329 | ||
330 | for_each_cpu(j, uv_nmi_cpu_mask) { | 330 | for_each_cpu(j, uv_nmi_cpu_mask) { |
331 | if (atomic_read(&uv_cpu_nmi_per(j).state)) { | 331 | if (uv_cpu_nmi_per(j).state) { |
332 | cpumask_clear_cpu(j, uv_nmi_cpu_mask); | 332 | cpumask_clear_cpu(j, uv_nmi_cpu_mask); |
333 | if (++k >= n) | 333 | if (++k >= n) |
334 | break; | 334 | break; |
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first) | |||
359 | static void uv_nmi_wait(int master) | 359 | static void uv_nmi_wait(int master) |
360 | { | 360 | { |
361 | /* indicate this cpu is in */ | 361 | /* indicate this cpu is in */ |
362 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN); | 362 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); |
363 | 363 | ||
364 | /* if not the first cpu in (the master), then we are a slave cpu */ | 364 | /* if not the first cpu in (the master), then we are a slave cpu */ |
365 | if (!master) | 365 | if (!master) |
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) | |||
419 | "UV:%sNMI process trace for CPU %d\n", dots, cpu); | 419 | "UV:%sNMI process trace for CPU %d\n", dots, cpu); |
420 | show_regs(regs); | 420 | show_regs(regs); |
421 | } | 421 | } |
422 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); | 422 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); |
423 | } | 423 | } |
424 | 424 | ||
425 | /* Trigger a slave cpu to dump it's state */ | 425 | /* Trigger a slave cpu to dump it's state */ |
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu) | |||
427 | { | 427 | { |
428 | int retry = uv_nmi_trigger_delay; | 428 | int retry = uv_nmi_trigger_delay; |
429 | 429 | ||
430 | if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN) | 430 | if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) |
431 | return; | 431 | return; |
432 | 432 | ||
433 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP); | 433 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; |
434 | do { | 434 | do { |
435 | cpu_relax(); | 435 | cpu_relax(); |
436 | udelay(10); | 436 | udelay(10); |
437 | if (atomic_read(&uv_cpu_nmi_per(cpu).state) | 437 | if (uv_cpu_nmi_per(cpu).state |
438 | != UV_NMI_STATE_DUMP) | 438 | != UV_NMI_STATE_DUMP) |
439 | return; | 439 | return; |
440 | } while (--retry > 0); | 440 | } while (--retry > 0); |
441 | 441 | ||
442 | pr_crit("UV: CPU %d stuck in process dump function\n", cpu); | 442 | pr_crit("UV: CPU %d stuck in process dump function\n", cpu); |
443 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE); | 443 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* Wait until all cpus ready to exit */ | 446 | /* Wait until all cpus ready to exit */ |
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) | |||
488 | } else { | 488 | } else { |
489 | while (!atomic_read(&uv_nmi_slave_continue)) | 489 | while (!atomic_read(&uv_nmi_slave_continue)) |
490 | cpu_relax(); | 490 | cpu_relax(); |
491 | while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) | 491 | while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) |
492 | cpu_relax(); | 492 | cpu_relax(); |
493 | uv_nmi_dump_state_cpu(cpu, regs); | 493 | uv_nmi_dump_state_cpu(cpu, regs); |
494 | } | 494 | } |
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |||
615 | local_irq_save(flags); | 615 | local_irq_save(flags); |
616 | 616 | ||
617 | /* If not a UV System NMI, ignore */ | 617 | /* If not a UV System NMI, ignore */ |
618 | if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { | 618 | if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { |
619 | local_irq_restore(flags); | 619 | local_irq_restore(flags); |
620 | return NMI_DONE; | 620 | return NMI_DONE; |
621 | } | 621 | } |
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |||
639 | uv_call_kgdb_kdb(cpu, regs, master); | 639 | uv_call_kgdb_kdb(cpu, regs, master); |
640 | 640 | ||
641 | /* Clear per_cpu "in nmi" flag */ | 641 | /* Clear per_cpu "in nmi" flag */ |
642 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT); | 642 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); |
643 | 643 | ||
644 | /* Clear MMR NMI flag on each hub */ | 644 | /* Clear MMR NMI flag on each hub */ |
645 | uv_clear_nmi(cpu); | 645 | uv_clear_nmi(cpu); |
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) | |||
666 | { | 666 | { |
667 | int ret; | 667 | int ret; |
668 | 668 | ||
669 | uv_cpu_nmi.queries++; | 669 | this_cpu_inc(uv_cpu_nmi.queries); |
670 | if (!atomic_read(&uv_cpu_nmi.pinging)) { | 670 | if (!this_cpu_read(uv_cpu_nmi.pinging)) { |
671 | local64_inc(&uv_nmi_ping_misses); | 671 | local64_inc(&uv_nmi_ping_misses); |
672 | return NMI_DONE; | 672 | return NMI_DONE; |
673 | } | 673 | } |
674 | 674 | ||
675 | uv_cpu_nmi.pings++; | 675 | this_cpu_inc(uv_cpu_nmi.pings); |
676 | local64_inc(&uv_nmi_ping_count); | 676 | local64_inc(&uv_nmi_ping_count); |
677 | ret = uv_handle_nmi(reason, regs); | 677 | ret = uv_handle_nmi(reason, regs); |
678 | atomic_set(&uv_cpu_nmi.pinging, 0); | 678 | this_cpu_write(uv_cpu_nmi.pinging, 0); |
679 | return ret; | 679 | return ret; |
680 | } | 680 | } |
681 | 681 | ||
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 5c86786bbfd2..a244237f3cfa 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -365,7 +365,7 @@ __setup("uvrtcevt", uv_enable_evt_rtc); | |||
365 | 365 | ||
366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) | 366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) |
367 | { | 367 | { |
368 | struct clock_event_device *ced = &__get_cpu_var(cpu_ced); | 368 | struct clock_event_device *ced = this_cpu_ptr(&cpu_ced); |
369 | 369 | ||
370 | *ced = clock_event_device_uv; | 370 | *ced = clock_event_device_uv; |
371 | ced->cpumask = cpumask_of(smp_processor_id()); | 371 | ced->cpumask = cpumask_of(smp_processor_id()); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index acb0effd8077..1a3f0445432a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -821,7 +821,7 @@ static void xen_convert_trap_info(const struct desc_ptr *desc, | |||
821 | 821 | ||
822 | void xen_copy_trap_info(struct trap_info *traps) | 822 | void xen_copy_trap_info(struct trap_info *traps) |
823 | { | 823 | { |
824 | const struct desc_ptr *desc = &__get_cpu_var(idt_desc); | 824 | const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); |
825 | 825 | ||
826 | xen_convert_trap_info(desc, traps); | 826 | xen_convert_trap_info(desc, traps); |
827 | } | 827 | } |
@@ -838,7 +838,7 @@ static void xen_load_idt(const struct desc_ptr *desc) | |||
838 | 838 | ||
839 | spin_lock(&lock); | 839 | spin_lock(&lock); |
840 | 840 | ||
841 | __get_cpu_var(idt_desc) = *desc; | 841 | memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); |
842 | 842 | ||
843 | xen_convert_trap_info(desc, traps); | 843 | xen_convert_trap_info(desc, traps); |
844 | 844 | ||
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 0d82003e76ad..ea54a08d8301 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
@@ -54,7 +54,7 @@ DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); | |||
54 | 54 | ||
55 | void xen_mc_flush(void) | 55 | void xen_mc_flush(void) |
56 | { | 56 | { |
57 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 57 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
58 | struct multicall_entry *mc; | 58 | struct multicall_entry *mc; |
59 | int ret = 0; | 59 | int ret = 0; |
60 | unsigned long flags; | 60 | unsigned long flags; |
@@ -131,7 +131,7 @@ void xen_mc_flush(void) | |||
131 | 131 | ||
132 | struct multicall_space __xen_mc_entry(size_t args) | 132 | struct multicall_space __xen_mc_entry(size_t args) |
133 | { | 133 | { |
134 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 134 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
135 | struct multicall_space ret; | 135 | struct multicall_space ret; |
136 | unsigned argidx = roundup(b->argidx, sizeof(u64)); | 136 | unsigned argidx = roundup(b->argidx, sizeof(u64)); |
137 | 137 | ||
@@ -162,7 +162,7 @@ struct multicall_space __xen_mc_entry(size_t args) | |||
162 | 162 | ||
163 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) | 163 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) |
164 | { | 164 | { |
165 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 165 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
166 | struct multicall_space ret = { NULL, NULL }; | 166 | struct multicall_space ret = { NULL, NULL }; |
167 | 167 | ||
168 | BUG_ON(preemptible()); | 168 | BUG_ON(preemptible()); |
@@ -192,7 +192,7 @@ out: | |||
192 | 192 | ||
193 | void xen_mc_callback(void (*fn)(void *), void *data) | 193 | void xen_mc_callback(void (*fn)(void *), void *data) |
194 | { | 194 | { |
195 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 195 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
196 | struct callback *cb; | 196 | struct callback *cb; |
197 | 197 | ||
198 | if (b->cbidx == MC_BATCH) { | 198 | if (b->cbidx == MC_BATCH) { |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 0ba5f3b967f0..23b45eb9a89c 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -109,7 +109,7 @@ static bool xen_pvspin = true; | |||
109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | 109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) |
110 | { | 110 | { |
111 | int irq = __this_cpu_read(lock_kicker_irq); | 111 | int irq = __this_cpu_read(lock_kicker_irq); |
112 | struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); | 112 | struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); |
113 | int cpu = smp_processor_id(); | 113 | int cpu = smp_processor_id(); |
114 | u64 start; | 114 | u64 start; |
115 | unsigned long flags; | 115 | unsigned long flags; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5718b0b58b60..a1d430b112b3 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -80,7 +80,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
80 | 80 | ||
81 | BUG_ON(preemptible()); | 81 | BUG_ON(preemptible()); |
82 | 82 | ||
83 | state = &__get_cpu_var(xen_runstate); | 83 | state = this_cpu_ptr(&xen_runstate); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * The runstate info is always updated by the hypervisor on | 86 | * The runstate info is always updated by the hypervisor on |
@@ -123,7 +123,7 @@ static void do_stolen_accounting(void) | |||
123 | 123 | ||
124 | WARN_ON(state.state != RUNSTATE_running); | 124 | WARN_ON(state.state != RUNSTATE_running); |
125 | 125 | ||
126 | snap = &__get_cpu_var(xen_runstate_snapshot); | 126 | snap = this_cpu_ptr(&xen_runstate_snapshot); |
127 | 127 | ||
128 | /* work out how much time the VCPU has not been runn*ing* */ | 128 | /* work out how much time the VCPU has not been runn*ing* */ |
129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; | 129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; |
@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void) | |||
158 | cycle_t ret; | 158 | cycle_t ret; |
159 | 159 | ||
160 | preempt_disable_notrace(); | 160 | preempt_disable_notrace(); |
161 | src = &__get_cpu_var(xen_vcpu)->time; | 161 | src = this_cpu_ptr(&xen_vcpu->time); |
162 | ret = pvclock_clocksource_read(src); | 162 | ret = pvclock_clocksource_read(src); |
163 | preempt_enable_notrace(); | 163 | preempt_enable_notrace(); |
164 | return ret; | 164 | return ret; |
@@ -397,7 +397,7 @@ static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt. | |||
397 | 397 | ||
398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) | 398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) |
399 | { | 399 | { |
400 | struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt; | 400 | struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt); |
401 | irqreturn_t ret; | 401 | irqreturn_t ret; |
402 | 402 | ||
403 | ret = IRQ_NONE; | 403 | ret = IRQ_NONE; |
@@ -460,7 +460,7 @@ void xen_setup_cpu_clockevents(void) | |||
460 | { | 460 | { |
461 | BUG_ON(preemptible()); | 461 | BUG_ON(preemptible()); |
462 | 462 | ||
463 | clockevents_register_device(&__get_cpu_var(xen_clock_events).evt); | 463 | clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); |
464 | } | 464 | } |
465 | 465 | ||
466 | void xen_timer_resume(void) | 466 | void xen_timer_resume(void) |
diff --git a/drivers/char/random.c b/drivers/char/random.c index c18d41db83d8..82759cef9043 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -874,7 +874,7 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) | |||
874 | void add_interrupt_randomness(int irq, int irq_flags) | 874 | void add_interrupt_randomness(int irq, int irq_flags) |
875 | { | 875 | { |
876 | struct entropy_store *r; | 876 | struct entropy_store *r; |
877 | struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); | 877 | struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); |
878 | struct pt_regs *regs = get_irq_regs(); | 878 | struct pt_regs *regs = get_irq_regs(); |
879 | unsigned long now = jiffies; | 879 | unsigned long now = jiffies; |
880 | cycles_t cycles = random_get_entropy(); | 880 | cycles_t cycles = random_get_entropy(); |
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index ad3572541728..31990600fcff 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c | |||
@@ -28,7 +28,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode, | |||
28 | static void dummy_timer_setup(void) | 28 | static void dummy_timer_setup(void) |
29 | { | 29 | { |
30 | int cpu = smp_processor_id(); | 30 | int cpu = smp_processor_id(); |
31 | struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); | 31 | struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt); |
32 | 32 | ||
33 | evt->name = "dummy_timer"; | 33 | evt->name = "dummy_timer"; |
34 | evt->features = CLOCK_EVT_FEAT_PERIODIC | | 34 | evt->features = CLOCK_EVT_FEAT_PERIODIC | |
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index 9e4db41abe3c..b7384b853e5a 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c | |||
@@ -90,7 +90,7 @@ static struct clocksource clocksource_metag = { | |||
90 | 90 | ||
91 | static irqreturn_t metag_timer_interrupt(int irq, void *dummy) | 91 | static irqreturn_t metag_timer_interrupt(int irq, void *dummy) |
92 | { | 92 | { |
93 | struct clock_event_device *evt = &__get_cpu_var(local_clockevent); | 93 | struct clock_event_device *evt = this_cpu_ptr(&local_clockevent); |
94 | 94 | ||
95 | evt->event_handler(evt); | 95 | evt->event_handler(evt); |
96 | 96 | ||
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 8d115db1e651..098c542e5c53 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c | |||
@@ -219,7 +219,7 @@ static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, | |||
219 | } | 219 | } |
220 | 220 | ||
221 | /* Immediately configure the timer on the boot CPU */ | 221 | /* Immediately configure the timer on the boot CPU */ |
222 | msm_local_timer_setup(__this_cpu_ptr(msm_evt)); | 222 | msm_local_timer_setup(raw_cpu_ptr(msm_evt)); |
223 | } | 223 | } |
224 | 224 | ||
225 | err: | 225 | err: |
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 044ee0df5871..06b57c4c4d80 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -66,7 +66,7 @@ static inline void ladder_do_selection(struct ladder_device *ldev, | |||
66 | static int ladder_select_state(struct cpuidle_driver *drv, | 66 | static int ladder_select_state(struct cpuidle_driver *drv, |
67 | struct cpuidle_device *dev) | 67 | struct cpuidle_device *dev) |
68 | { | 68 | { |
69 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 69 | struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); |
70 | struct ladder_device_state *last_state; | 70 | struct ladder_device_state *last_state; |
71 | int last_residency, last_idx = ldev->last_state_idx; | 71 | int last_residency, last_idx = ldev->last_state_idx; |
72 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); | 72 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
@@ -170,7 +170,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv, | |||
170 | */ | 170 | */ |
171 | static void ladder_reflect(struct cpuidle_device *dev, int index) | 171 | static void ladder_reflect(struct cpuidle_device *dev, int index) |
172 | { | 172 | { |
173 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 173 | struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); |
174 | if (index > 0) | 174 | if (index > 0) |
175 | ldev->last_state_idx = index; | 175 | ldev->last_state_idx = index; |
176 | } | 176 | } |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 34db2fb3ef1e..710a233b9b0d 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -289,7 +289,7 @@ again: | |||
289 | */ | 289 | */ |
290 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | 290 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
291 | { | 291 | { |
292 | struct menu_device *data = &__get_cpu_var(menu_devices); | 292 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
293 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); | 293 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
294 | int i; | 294 | int i; |
295 | unsigned int interactivity_req; | 295 | unsigned int interactivity_req; |
@@ -372,7 +372,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
372 | */ | 372 | */ |
373 | static void menu_reflect(struct cpuidle_device *dev, int index) | 373 | static void menu_reflect(struct cpuidle_device *dev, int index) |
374 | { | 374 | { |
375 | struct menu_device *data = &__get_cpu_var(menu_devices); | 375 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
376 | data->last_state_idx = index; | 376 | data->last_state_idx = index; |
377 | if (index >= 0) | 377 | if (index >= 0) |
378 | data->needs_update = 1; | 378 | data->needs_update = 1; |
@@ -385,7 +385,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index) | |||
385 | */ | 385 | */ |
386 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) | 386 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
387 | { | 387 | { |
388 | struct menu_device *data = &__get_cpu_var(menu_devices); | 388 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
389 | int last_idx = data->last_state_idx; | 389 | int last_idx = data->last_state_idx; |
390 | struct cpuidle_state *target = &drv->states[last_idx]; | 390 | struct cpuidle_state *target = &drv->states[last_idx]; |
391 | unsigned int measured_us; | 391 | unsigned int measured_us; |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index f0a4800a15b0..38493ff28fa5 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -102,7 +102,7 @@ static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; | |||
102 | #ifdef CONFIG_GIC_NON_BANKED | 102 | #ifdef CONFIG_GIC_NON_BANKED |
103 | static void __iomem *gic_get_percpu_base(union gic_base *base) | 103 | static void __iomem *gic_get_percpu_base(union gic_base *base) |
104 | { | 104 | { |
105 | return *__this_cpu_ptr(base->percpu_base); | 105 | return raw_cpu_read(*base->percpu_base); |
106 | } | 106 | } |
107 | 107 | ||
108 | static void __iomem *gic_get_common_base(union gic_base *base) | 108 | static void __iomem *gic_get_common_base(union gic_base *base) |
@@ -522,11 +522,11 @@ static void gic_cpu_save(unsigned int gic_nr) | |||
522 | if (!dist_base || !cpu_base) | 522 | if (!dist_base || !cpu_base) |
523 | return; | 523 | return; |
524 | 524 | ||
525 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | 525 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
526 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 526 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
527 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 527 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
528 | 528 | ||
529 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 529 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
530 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 530 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
531 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | 531 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
532 | 532 | ||
@@ -548,11 +548,11 @@ static void gic_cpu_restore(unsigned int gic_nr) | |||
548 | if (!dist_base || !cpu_base) | 548 | if (!dist_base || !cpu_base) |
549 | return; | 549 | return; |
550 | 550 | ||
551 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | 551 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
552 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 552 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
553 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | 553 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
554 | 554 | ||
555 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 555 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
556 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 556 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
557 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | 557 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); |
558 | 558 | ||
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 28a90122a5a8..87f86c77b094 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -548,7 +548,7 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |||
548 | * A race condition can at worst result in the merged flag being | 548 | * A race condition can at worst result in the merged flag being |
549 | * misrepresented, so we don't have to disable preemption here. | 549 | * misrepresented, so we don't have to disable preemption here. |
550 | */ | 550 | */ |
551 | last = __this_cpu_ptr(stats->last); | 551 | last = raw_cpu_ptr(stats->last); |
552 | stats_aux->merged = | 552 | stats_aux->merged = |
553 | (bi_sector == (ACCESS_ONCE(last->last_sector) && | 553 | (bi_sector == (ACCESS_ONCE(last->last_sector) && |
554 | ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == | 554 | ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 69557a26f749..049747f558c9 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -423,7 +423,7 @@ static void tile_net_pop_all_buffers(int instance, int stack) | |||
423 | /* Provide linux buffers to mPIPE. */ | 423 | /* Provide linux buffers to mPIPE. */ |
424 | static void tile_net_provide_needed_buffers(void) | 424 | static void tile_net_provide_needed_buffers(void) |
425 | { | 425 | { |
426 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 426 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
427 | int instance, kind; | 427 | int instance, kind; |
428 | for (instance = 0; instance < NR_MPIPE_MAX && | 428 | for (instance = 0; instance < NR_MPIPE_MAX && |
429 | info->mpipe[instance].has_iqueue; instance++) { | 429 | info->mpipe[instance].has_iqueue; instance++) { |
@@ -551,7 +551,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf) | |||
551 | static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | 551 | static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, |
552 | gxio_mpipe_idesc_t *idesc, unsigned long len) | 552 | gxio_mpipe_idesc_t *idesc, unsigned long len) |
553 | { | 553 | { |
554 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 554 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
555 | struct tile_net_priv *priv = netdev_priv(dev); | 555 | struct tile_net_priv *priv = netdev_priv(dev); |
556 | int instance = priv->instance; | 556 | int instance = priv->instance; |
557 | 557 | ||
@@ -585,7 +585,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
585 | /* Handle a packet. Return true if "processed", false if "filtered". */ | 585 | /* Handle a packet. Return true if "processed", false if "filtered". */ |
586 | static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) | 586 | static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) |
587 | { | 587 | { |
588 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 588 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
589 | struct mpipe_data *md = &mpipe_data[instance]; | 589 | struct mpipe_data *md = &mpipe_data[instance]; |
590 | struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; | 590 | struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; |
591 | uint8_t l2_offset; | 591 | uint8_t l2_offset; |
@@ -651,7 +651,7 @@ drop: | |||
651 | */ | 651 | */ |
652 | static int tile_net_poll(struct napi_struct *napi, int budget) | 652 | static int tile_net_poll(struct napi_struct *napi, int budget) |
653 | { | 653 | { |
654 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 654 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
655 | unsigned int work = 0; | 655 | unsigned int work = 0; |
656 | gxio_mpipe_idesc_t *idesc; | 656 | gxio_mpipe_idesc_t *idesc; |
657 | int instance, i, n; | 657 | int instance, i, n; |
@@ -700,7 +700,7 @@ done: | |||
700 | /* Handle an ingress interrupt from an instance on the current cpu. */ | 700 | /* Handle an ingress interrupt from an instance on the current cpu. */ |
701 | static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) | 701 | static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) |
702 | { | 702 | { |
703 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 703 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
704 | napi_schedule(&info->mpipe[(uint64_t)id].napi); | 704 | napi_schedule(&info->mpipe[(uint64_t)id].napi); |
705 | return IRQ_HANDLED; | 705 | return IRQ_HANDLED; |
706 | } | 706 | } |
@@ -763,7 +763,7 @@ static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | |||
763 | /* Make sure the egress timer is scheduled. */ | 763 | /* Make sure the egress timer is scheduled. */ |
764 | static void tile_net_schedule_egress_timer(void) | 764 | static void tile_net_schedule_egress_timer(void) |
765 | { | 765 | { |
766 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 766 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
767 | 767 | ||
768 | if (!info->egress_timer_scheduled) { | 768 | if (!info->egress_timer_scheduled) { |
769 | hrtimer_start(&info->egress_timer, | 769 | hrtimer_start(&info->egress_timer, |
@@ -780,7 +780,7 @@ static void tile_net_schedule_egress_timer(void) | |||
780 | */ | 780 | */ |
781 | static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) | 781 | static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) |
782 | { | 782 | { |
783 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 783 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
784 | unsigned long irqflags; | 784 | unsigned long irqflags; |
785 | bool pending = false; | 785 | bool pending = false; |
786 | int i, instance; | 786 | int i, instance; |
@@ -1927,7 +1927,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | |||
1927 | */ | 1927 | */ |
1928 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | 1928 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) |
1929 | { | 1929 | { |
1930 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 1930 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
1931 | struct tile_net_priv *priv = netdev_priv(dev); | 1931 | struct tile_net_priv *priv = netdev_priv(dev); |
1932 | int channel = priv->echannel; | 1932 | int channel = priv->echannel; |
1933 | int instance = priv->instance; | 1933 | int instance = priv->instance; |
@@ -1996,7 +1996,7 @@ static unsigned int tile_net_tx_frags(struct frag *frags, | |||
1996 | /* Help the kernel transmit a packet. */ | 1996 | /* Help the kernel transmit a packet. */ |
1997 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | 1997 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) |
1998 | { | 1998 | { |
1999 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 1999 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
2000 | struct tile_net_priv *priv = netdev_priv(dev); | 2000 | struct tile_net_priv *priv = netdev_priv(dev); |
2001 | int instance = priv->instance; | 2001 | int instance = priv->instance; |
2002 | struct mpipe_data *md = &mpipe_data[instance]; | 2002 | struct mpipe_data *md = &mpipe_data[instance]; |
@@ -2138,7 +2138,7 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p) | |||
2138 | static void tile_net_netpoll(struct net_device *dev) | 2138 | static void tile_net_netpoll(struct net_device *dev) |
2139 | { | 2139 | { |
2140 | int instance = mpipe_instance(dev); | 2140 | int instance = mpipe_instance(dev); |
2141 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 2141 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
2142 | struct mpipe_data *md = &mpipe_data[instance]; | 2142 | struct mpipe_data *md = &mpipe_data[instance]; |
2143 | 2143 | ||
2144 | disable_percpu_irq(md->ingress_irq); | 2144 | disable_percpu_irq(md->ingress_irq); |
@@ -2237,7 +2237,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac) | |||
2237 | /* Per-cpu module initialization. */ | 2237 | /* Per-cpu module initialization. */ |
2238 | static void tile_net_init_module_percpu(void *unused) | 2238 | static void tile_net_init_module_percpu(void *unused) |
2239 | { | 2239 | { |
2240 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 2240 | struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); |
2241 | int my_cpu = smp_processor_id(); | 2241 | int my_cpu = smp_processor_id(); |
2242 | int instance; | 2242 | int instance; |
2243 | 2243 | ||
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 88818d5054ab..fb12d31cfcf6 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c | |||
@@ -996,13 +996,13 @@ static void tile_net_register(void *dev_ptr) | |||
996 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); | 996 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); |
997 | 997 | ||
998 | if (!strcmp(dev->name, "xgbe0")) | 998 | if (!strcmp(dev->name, "xgbe0")) |
999 | info = &__get_cpu_var(hv_xgbe0); | 999 | info = this_cpu_ptr(&hv_xgbe0); |
1000 | else if (!strcmp(dev->name, "xgbe1")) | 1000 | else if (!strcmp(dev->name, "xgbe1")) |
1001 | info = &__get_cpu_var(hv_xgbe1); | 1001 | info = this_cpu_ptr(&hv_xgbe1); |
1002 | else if (!strcmp(dev->name, "gbe0")) | 1002 | else if (!strcmp(dev->name, "gbe0")) |
1003 | info = &__get_cpu_var(hv_gbe0); | 1003 | info = this_cpu_ptr(&hv_gbe0); |
1004 | else if (!strcmp(dev->name, "gbe1")) | 1004 | else if (!strcmp(dev->name, "gbe1")) |
1005 | info = &__get_cpu_var(hv_gbe1); | 1005 | info = this_cpu_ptr(&hv_gbe1); |
1006 | else | 1006 | else |
1007 | BUG(); | 1007 | BUG(); |
1008 | 1008 | ||
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 8aa73fac6ad4..0581461c3a67 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -45,7 +45,7 @@ unsigned long oprofile_get_cpu_buffer_size(void) | |||
45 | 45 | ||
46 | void oprofile_cpu_buffer_inc_smpl_lost(void) | 46 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
47 | { | 47 | { |
48 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | 48 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
49 | 49 | ||
50 | cpu_buf->sample_lost_overflow++; | 50 | cpu_buf->sample_lost_overflow++; |
51 | } | 51 | } |
@@ -297,7 +297,7 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | |||
297 | unsigned long event, int is_kernel, | 297 | unsigned long event, int is_kernel, |
298 | struct task_struct *task) | 298 | struct task_struct *task) |
299 | { | 299 | { |
300 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | 300 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
301 | unsigned long backtrace = oprofile_backtrace_depth; | 301 | unsigned long backtrace = oprofile_backtrace_depth; |
302 | 302 | ||
303 | /* | 303 | /* |
@@ -357,7 +357,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | |||
357 | { | 357 | { |
358 | struct op_sample *sample; | 358 | struct op_sample *sample; |
359 | int is_kernel = !user_mode(regs); | 359 | int is_kernel = !user_mode(regs); |
360 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | 360 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
361 | 361 | ||
362 | cpu_buf->sample_received++; | 362 | cpu_buf->sample_received++; |
363 | 363 | ||
@@ -412,13 +412,13 @@ int oprofile_write_commit(struct op_entry *entry) | |||
412 | 412 | ||
413 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) | 413 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
414 | { | 414 | { |
415 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | 415 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
416 | log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); | 416 | log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); |
417 | } | 417 | } |
418 | 418 | ||
419 | void oprofile_add_trace(unsigned long pc) | 419 | void oprofile_add_trace(unsigned long pc) |
420 | { | 420 | { |
421 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | 421 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
422 | 422 | ||
423 | if (!cpu_buf->tracing) | 423 | if (!cpu_buf->tracing) |
424 | return; | 424 | return; |
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 61be1d9c16c8..bdef916e5dda 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c | |||
@@ -32,7 +32,7 @@ static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) | |||
32 | 32 | ||
33 | static void __oprofile_hrtimer_start(void *unused) | 33 | static void __oprofile_hrtimer_start(void *unused) |
34 | { | 34 | { |
35 | struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); | 35 | struct hrtimer *hrtimer = this_cpu_ptr(&oprofile_hrtimer); |
36 | 36 | ||
37 | if (!ctr_running) | 37 | if (!ctr_running) |
38 | return; | 38 | return; |
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 07676c22d514..79f59915f71b 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c | |||
@@ -252,7 +252,7 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) | |||
252 | */ | 252 | */ |
253 | void ccw_request_handler(struct ccw_device *cdev) | 253 | void ccw_request_handler(struct ccw_device *cdev) |
254 | { | 254 | { |
255 | struct irb *irb = &__get_cpu_var(cio_irb); | 255 | struct irb *irb = this_cpu_ptr(&cio_irb); |
256 | struct ccw_request *req = &cdev->private->req; | 256 | struct ccw_request *req = &cdev->private->req; |
257 | enum io_status status; | 257 | enum io_status status; |
258 | int rc = -EOPNOTSUPP; | 258 | int rc = -EOPNOTSUPP; |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 3d22d2a4ce14..213159dec89e 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) | |||
58 | { | 58 | { |
59 | struct chsc_private *private = dev_get_drvdata(&sch->dev); | 59 | struct chsc_private *private = dev_get_drvdata(&sch->dev); |
60 | struct chsc_request *request = private->request; | 60 | struct chsc_request *request = private->request; |
61 | struct irb *irb = &__get_cpu_var(cio_irb); | 61 | struct irb *irb = this_cpu_ptr(&cio_irb); |
62 | 62 | ||
63 | CHSC_LOG(4, "irb"); | 63 | CHSC_LOG(4, "irb"); |
64 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); | 64 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index d5a6f287d2fe..10eb738fc81a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -563,7 +563,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) | |||
563 | 563 | ||
564 | set_cpu_flag(CIF_NOHZ_DELAY); | 564 | set_cpu_flag(CIF_NOHZ_DELAY); |
565 | tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; | 565 | tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; |
566 | irb = &__get_cpu_var(cio_irb); | 566 | irb = this_cpu_ptr(&cio_irb); |
567 | sch = (struct subchannel *)(unsigned long) tpi_info->intparm; | 567 | sch = (struct subchannel *)(unsigned long) tpi_info->intparm; |
568 | if (!sch) { | 568 | if (!sch) { |
569 | /* Clear pending interrupt condition. */ | 569 | /* Clear pending interrupt condition. */ |
@@ -613,7 +613,7 @@ void cio_tsch(struct subchannel *sch) | |||
613 | struct irb *irb; | 613 | struct irb *irb; |
614 | int irq_context; | 614 | int irq_context; |
615 | 615 | ||
616 | irb = &__get_cpu_var(cio_irb); | 616 | irb = this_cpu_ptr(&cio_irb); |
617 | /* Store interrupt response block to lowcore. */ | 617 | /* Store interrupt response block to lowcore. */ |
618 | if (tsch(sch->schid, irb) != 0) | 618 | if (tsch(sch->schid, irb) != 0) |
619 | /* Not status pending or not operational. */ | 619 | /* Not status pending or not operational. */ |
@@ -751,7 +751,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) | |||
751 | struct tpi_info ti; | 751 | struct tpi_info ti; |
752 | 752 | ||
753 | if (tpi(&ti)) { | 753 | if (tpi(&ti)) { |
754 | tsch(ti.schid, &__get_cpu_var(cio_irb)); | 754 | tsch(ti.schid, this_cpu_ptr(&cio_irb)); |
755 | if (schid_equal(&ti.schid, &schid)) | 755 | if (schid_equal(&ti.schid, &schid)) |
756 | return 0; | 756 | return 0; |
757 | } | 757 | } |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 0bc902b3cd84..83da53c8e54c 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -739,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
739 | struct irb *irb; | 739 | struct irb *irb; |
740 | int is_cmd; | 740 | int is_cmd; |
741 | 741 | ||
742 | irb = &__get_cpu_var(cio_irb); | 742 | irb = this_cpu_ptr(&cio_irb); |
743 | is_cmd = !scsw_is_tm(&irb->scsw); | 743 | is_cmd = !scsw_is_tm(&irb->scsw); |
744 | /* Check for unsolicited interrupt. */ | 744 | /* Check for unsolicited interrupt. */ |
745 | if (!scsw_is_solicited(&irb->scsw)) { | 745 | if (!scsw_is_solicited(&irb->scsw)) { |
@@ -805,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
805 | { | 805 | { |
806 | struct irb *irb; | 806 | struct irb *irb; |
807 | 807 | ||
808 | irb = &__get_cpu_var(cio_irb); | 808 | irb = this_cpu_ptr(&cio_irb); |
809 | /* Check for unsolicited interrupt. */ | 809 | /* Check for unsolicited interrupt. */ |
810 | if (scsw_stctl(&irb->scsw) == | 810 | if (scsw_stctl(&irb->scsw) == |
811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index c4f7bf3e24c2..37f0834300ea 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c | |||
@@ -134,7 +134,7 @@ static void eadm_subchannel_irq(struct subchannel *sch) | |||
134 | { | 134 | { |
135 | struct eadm_private *private = get_eadm_private(sch); | 135 | struct eadm_private *private = get_eadm_private(sch); |
136 | struct eadm_scsw *scsw = &sch->schib.scsw.eadm; | 136 | struct eadm_scsw *scsw = &sch->schib.scsw.eadm; |
137 | struct irb *irb = &__get_cpu_var(cio_irb); | 137 | struct irb *irb = this_cpu_ptr(&cio_irb); |
138 | int error = 0; | 138 | int error = 0; |
139 | 139 | ||
140 | EADM_LOG(6, "irq"); | 140 | EADM_LOG(6, "irq"); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 8b0f9ef517d6..748c9136a60a 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -4134,7 +4134,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) | |||
4134 | * per cpu locality group is to reduce the contention between block | 4134 | * per cpu locality group is to reduce the contention between block |
4135 | * request from multiple CPUs. | 4135 | * request from multiple CPUs. |
4136 | */ | 4136 | */ |
4137 | ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); | 4137 | ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); |
4138 | 4138 | ||
4139 | /* we're going to use group allocation */ | 4139 | /* we're going to use group allocation */ |
4140 | ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; | 4140 | ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 2997af6d2ccd..0a9a6da21e74 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -666,10 +666,19 @@ static inline size_t cpumask_size(void) | |||
666 | * | 666 | * |
667 | * This code makes NR_CPUS length memcopy and brings to a memory corruption. | 667 | * This code makes NR_CPUS length memcopy and brings to a memory corruption. |
668 | * cpumask_copy() provide safe copy functionality. | 668 | * cpumask_copy() provide safe copy functionality. |
669 | * | ||
670 | * Note that there is another evil here: If you define a cpumask_var_t | ||
671 | * as a percpu variable then the way to obtain the address of the cpumask | ||
672 | * structure differently influences what this_cpu_* operation needs to be | ||
673 | * used. Please use this_cpu_cpumask_var_t in those cases. The direct use | ||
674 | * of this_cpu_ptr() or this_cpu_read() will lead to failures when the | ||
675 | * other type of cpumask_var_t implementation is configured. | ||
669 | */ | 676 | */ |
670 | #ifdef CONFIG_CPUMASK_OFFSTACK | 677 | #ifdef CONFIG_CPUMASK_OFFSTACK |
671 | typedef struct cpumask *cpumask_var_t; | 678 | typedef struct cpumask *cpumask_var_t; |
672 | 679 | ||
680 | #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) | ||
681 | |||
673 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | 682 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
674 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | 683 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
675 | bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | 684 | bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
@@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask); | |||
681 | #else | 690 | #else |
682 | typedef struct cpumask cpumask_var_t[1]; | 691 | typedef struct cpumask cpumask_var_t[1]; |
683 | 692 | ||
693 | #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) | ||
694 | |||
684 | static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | 695 | static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
685 | { | 696 | { |
686 | return true; | 697 | return true; |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index ecbc52f9ff77..8422b4ed6882 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); | |||
44 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | 44 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
45 | 45 | ||
46 | /* Must have preemption disabled for this to be meaningful. */ | 46 | /* Must have preemption disabled for this to be meaningful. */ |
47 | #define kstat_this_cpu (&__get_cpu_var(kstat)) | 47 | #define kstat_this_cpu this_cpu_ptr(&kstat) |
48 | #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) | 48 | #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) |
49 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | 49 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) |
50 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | 50 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) |
51 | 51 | ||
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index cfd56046ecec..420032d41d27 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -257,9 +257,6 @@ do { \ | |||
257 | #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) | 257 | #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) |
258 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | 258 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) |
259 | 259 | ||
260 | /* keep until we have removed all uses of __this_cpu_ptr */ | ||
261 | #define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | ||
262 | |||
263 | /* | 260 | /* |
264 | * Must be an lvalue. Since @var must be a simple identifier, | 261 | * Must be an lvalue. Since @var must be a simple identifier, |
265 | * we force a syntax error here if it isn't. | 262 | * we force a syntax error here if it isn't. |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 37252f71a380..c8a7db605e03 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -242,7 +242,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, | |||
242 | DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); | 242 | DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); |
243 | static inline struct nf_conn *nf_ct_untracked_get(void) | 243 | static inline struct nf_conn *nf_ct_untracked_get(void) |
244 | { | 244 | { |
245 | return &__raw_get_cpu_var(nf_conntrack_untracked); | 245 | return raw_cpu_ptr(&nf_conntrack_untracked); |
246 | } | 246 | } |
247 | void nf_ct_untracked_status_or(unsigned long bits); | 247 | void nf_ct_untracked_status_or(unsigned long bits); |
248 | 248 | ||
diff --git a/include/net/snmp.h b/include/net/snmp.h index 8fd2f498782e..35512ac6dcfb 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h | |||
@@ -164,7 +164,7 @@ struct linux_xfrm_mib { | |||
164 | 164 | ||
165 | #define SNMP_ADD_STATS64_BH(mib, field, addend) \ | 165 | #define SNMP_ADD_STATS64_BH(mib, field, addend) \ |
166 | do { \ | 166 | do { \ |
167 | __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \ | 167 | __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ |
168 | u64_stats_update_begin(&ptr->syncp); \ | 168 | u64_stats_update_begin(&ptr->syncp); \ |
169 | ptr->mibs[field] += addend; \ | 169 | ptr->mibs[field] += addend; \ |
170 | u64_stats_update_end(&ptr->syncp); \ | 170 | u64_stats_update_end(&ptr->syncp); \ |
@@ -185,8 +185,8 @@ struct linux_xfrm_mib { | |||
185 | #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) | 185 | #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) |
186 | #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ | 186 | #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ |
187 | do { \ | 187 | do { \ |
188 | __typeof__(*mib) *ptr; \ | 188 | __typeof__(*mib) *ptr; \ |
189 | ptr = __this_cpu_ptr(mib); \ | 189 | ptr = raw_cpu_ptr((mib)); \ |
190 | u64_stats_update_begin(&ptr->syncp); \ | 190 | u64_stats_update_begin(&ptr->syncp); \ |
191 | ptr->mibs[basefield##PKTS]++; \ | 191 | ptr->mibs[basefield##PKTS]++; \ |
192 | ptr->mibs[basefield##OCTETS] += addend; \ | 192 | ptr->mibs[basefield##OCTETS] += addend; \ |
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index f2a88de87a49..d659487254d5 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c | |||
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) | |||
137 | int cpu; | 137 | int cpu; |
138 | struct callchain_cpus_entries *entries; | 138 | struct callchain_cpus_entries *entries; |
139 | 139 | ||
140 | *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); | 140 | *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); |
141 | if (*rctx == -1) | 141 | if (*rctx == -1) |
142 | return NULL; | 142 | return NULL; |
143 | 143 | ||
@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) | |||
153 | static void | 153 | static void |
154 | put_callchain_entry(int rctx) | 154 | put_callchain_entry(int rctx) |
155 | { | 155 | { |
156 | put_recursion_context(__get_cpu_var(callchain_recursion), rctx); | 156 | put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); |
157 | } | 157 | } |
158 | 158 | ||
159 | struct perf_callchain_entry * | 159 | struct perf_callchain_entry * |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 094df8c0742d..1425d07018de 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -249,7 +249,7 @@ static void perf_duration_warn(struct irq_work *w) | |||
249 | u64 avg_local_sample_len; | 249 | u64 avg_local_sample_len; |
250 | u64 local_samples_len; | 250 | u64 local_samples_len; |
251 | 251 | ||
252 | local_samples_len = __get_cpu_var(running_sample_length); | 252 | local_samples_len = __this_cpu_read(running_sample_length); |
253 | avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; | 253 | avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; |
254 | 254 | ||
255 | printk_ratelimited(KERN_WARNING | 255 | printk_ratelimited(KERN_WARNING |
@@ -271,10 +271,10 @@ void perf_sample_event_took(u64 sample_len_ns) | |||
271 | return; | 271 | return; |
272 | 272 | ||
273 | /* decay the counter by 1 average sample */ | 273 | /* decay the counter by 1 average sample */ |
274 | local_samples_len = __get_cpu_var(running_sample_length); | 274 | local_samples_len = __this_cpu_read(running_sample_length); |
275 | local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; | 275 | local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; |
276 | local_samples_len += sample_len_ns; | 276 | local_samples_len += sample_len_ns; |
277 | __get_cpu_var(running_sample_length) = local_samples_len; | 277 | __this_cpu_write(running_sample_length, local_samples_len); |
278 | 278 | ||
279 | /* | 279 | /* |
280 | * note: this will be biased artifically low until we have | 280 | * note: this will be biased artifically low until we have |
@@ -882,7 +882,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list); | |||
882 | static void perf_pmu_rotate_start(struct pmu *pmu) | 882 | static void perf_pmu_rotate_start(struct pmu *pmu) |
883 | { | 883 | { |
884 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 884 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
885 | struct list_head *head = &__get_cpu_var(rotation_list); | 885 | struct list_head *head = this_cpu_ptr(&rotation_list); |
886 | 886 | ||
887 | WARN_ON(!irqs_disabled()); | 887 | WARN_ON(!irqs_disabled()); |
888 | 888 | ||
@@ -2462,7 +2462,7 @@ void __perf_event_task_sched_out(struct task_struct *task, | |||
2462 | * to check if we have to switch out PMU state. | 2462 | * to check if we have to switch out PMU state. |
2463 | * cgroup event are system-wide mode only | 2463 | * cgroup event are system-wide mode only |
2464 | */ | 2464 | */ |
2465 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2465 | if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) |
2466 | perf_cgroup_sched_out(task, next); | 2466 | perf_cgroup_sched_out(task, next); |
2467 | } | 2467 | } |
2468 | 2468 | ||
@@ -2705,11 +2705,11 @@ void __perf_event_task_sched_in(struct task_struct *prev, | |||
2705 | * to check if we have to switch in PMU state. | 2705 | * to check if we have to switch in PMU state. |
2706 | * cgroup event are system-wide mode only | 2706 | * cgroup event are system-wide mode only |
2707 | */ | 2707 | */ |
2708 | if (atomic_read(&__get_cpu_var(perf_cgroup_events))) | 2708 | if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) |
2709 | perf_cgroup_sched_in(prev, task); | 2709 | perf_cgroup_sched_in(prev, task); |
2710 | 2710 | ||
2711 | /* check for system-wide branch_stack events */ | 2711 | /* check for system-wide branch_stack events */ |
2712 | if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) | 2712 | if (atomic_read(this_cpu_ptr(&perf_branch_stack_events))) |
2713 | perf_branch_stack_sched_in(prev, task); | 2713 | perf_branch_stack_sched_in(prev, task); |
2714 | } | 2714 | } |
2715 | 2715 | ||
@@ -2964,7 +2964,7 @@ bool perf_event_can_stop_tick(void) | |||
2964 | 2964 | ||
2965 | void perf_event_task_tick(void) | 2965 | void perf_event_task_tick(void) |
2966 | { | 2966 | { |
2967 | struct list_head *head = &__get_cpu_var(rotation_list); | 2967 | struct list_head *head = this_cpu_ptr(&rotation_list); |
2968 | struct perf_cpu_context *cpuctx, *tmp; | 2968 | struct perf_cpu_context *cpuctx, *tmp; |
2969 | struct perf_event_context *ctx; | 2969 | struct perf_event_context *ctx; |
2970 | int throttled; | 2970 | int throttled; |
@@ -5833,7 +5833,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | |||
5833 | struct perf_sample_data *data, | 5833 | struct perf_sample_data *data, |
5834 | struct pt_regs *regs) | 5834 | struct pt_regs *regs) |
5835 | { | 5835 | { |
5836 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 5836 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
5837 | struct perf_event *event; | 5837 | struct perf_event *event; |
5838 | struct hlist_head *head; | 5838 | struct hlist_head *head; |
5839 | 5839 | ||
@@ -5852,7 +5852,7 @@ end: | |||
5852 | 5852 | ||
5853 | int perf_swevent_get_recursion_context(void) | 5853 | int perf_swevent_get_recursion_context(void) |
5854 | { | 5854 | { |
5855 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 5855 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
5856 | 5856 | ||
5857 | return get_recursion_context(swhash->recursion); | 5857 | return get_recursion_context(swhash->recursion); |
5858 | } | 5858 | } |
@@ -5860,7 +5860,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | |||
5860 | 5860 | ||
5861 | inline void perf_swevent_put_recursion_context(int rctx) | 5861 | inline void perf_swevent_put_recursion_context(int rctx) |
5862 | { | 5862 | { |
5863 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 5863 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
5864 | 5864 | ||
5865 | put_recursion_context(swhash->recursion, rctx); | 5865 | put_recursion_context(swhash->recursion, rctx); |
5866 | } | 5866 | } |
@@ -5889,7 +5889,7 @@ static void perf_swevent_read(struct perf_event *event) | |||
5889 | 5889 | ||
5890 | static int perf_swevent_add(struct perf_event *event, int flags) | 5890 | static int perf_swevent_add(struct perf_event *event, int flags) |
5891 | { | 5891 | { |
5892 | struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); | 5892 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
5893 | struct hw_perf_event *hwc = &event->hw; | 5893 | struct hw_perf_event *hwc = &event->hw; |
5894 | struct hlist_head *head; | 5894 | struct hlist_head *head; |
5895 | 5895 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 8fb52e9bddc1..e5202f00cabc 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -699,7 +699,7 @@ void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |||
699 | { | 699 | { |
700 | struct irq_chip *chip = irq_desc_get_chip(desc); | 700 | struct irq_chip *chip = irq_desc_get_chip(desc); |
701 | struct irqaction *action = desc->action; | 701 | struct irqaction *action = desc->action; |
702 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); | 702 | void *dev_id = raw_cpu_ptr(action->percpu_dev_id); |
703 | irqreturn_t res; | 703 | irqreturn_t res; |
704 | 704 | ||
705 | kstat_incr_irqs_this_cpu(irq, desc); | 705 | kstat_incr_irqs_this_cpu(irq, desc); |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 385b85aded19..3ab9048483fa 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work) | |||
95 | 95 | ||
96 | /* If the work is "lazy", handle it from next tick if any */ | 96 | /* If the work is "lazy", handle it from next tick if any */ |
97 | if (work->flags & IRQ_WORK_LAZY) { | 97 | if (work->flags & IRQ_WORK_LAZY) { |
98 | if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && | 98 | if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
99 | tick_nohz_tick_stopped()) | 99 | tick_nohz_tick_stopped()) |
100 | arch_irq_work_raise(); | 100 | arch_irq_work_raise(); |
101 | } else { | 101 | } else { |
102 | if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) | 102 | if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
103 | arch_irq_work_raise(); | 103 | arch_irq_work_raise(); |
104 | } | 104 | } |
105 | 105 | ||
@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void) | |||
113 | { | 113 | { |
114 | struct llist_head *raised, *lazy; | 114 | struct llist_head *raised, *lazy; |
115 | 115 | ||
116 | raised = &__get_cpu_var(raised_list); | 116 | raised = this_cpu_ptr(&raised_list); |
117 | lazy = &__get_cpu_var(lazy_list); | 117 | lazy = this_cpu_ptr(&lazy_list); |
118 | 118 | ||
119 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) | 119 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
120 | if (llist_empty(lazy)) | 120 | if (llist_empty(lazy)) |
@@ -168,8 +168,8 @@ static void irq_work_run_list(struct llist_head *list) | |||
168 | */ | 168 | */ |
169 | void irq_work_run(void) | 169 | void irq_work_run(void) |
170 | { | 170 | { |
171 | irq_work_run_list(&__get_cpu_var(raised_list)); | 171 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
172 | irq_work_run_list(&__get_cpu_var(lazy_list)); | 172 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
173 | } | 173 | } |
174 | EXPORT_SYMBOL_GPL(irq_work_run); | 174 | EXPORT_SYMBOL_GPL(irq_work_run); |
175 | 175 | ||
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e3962d63e368..ced2b84b1cb7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2622,7 +2622,7 @@ void wake_up_klogd(void) | |||
2622 | preempt_disable(); | 2622 | preempt_disable(); |
2623 | if (waitqueue_active(&log_wait)) { | 2623 | if (waitqueue_active(&log_wait)) { |
2624 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | 2624 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); |
2625 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | 2625 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
2626 | } | 2626 | } |
2627 | preempt_enable(); | 2627 | preempt_enable(); |
2628 | } | 2628 | } |
@@ -2638,7 +2638,7 @@ int printk_deferred(const char *fmt, ...) | |||
2638 | va_end(args); | 2638 | va_end(args); |
2639 | 2639 | ||
2640 | __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); | 2640 | __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); |
2641 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | 2641 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
2642 | preempt_enable(); | 2642 | preempt_enable(); |
2643 | 2643 | ||
2644 | return r; | 2644 | return r; |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 3ef6451e972e..c27e4f8f4879 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |||
134 | 134 | ||
135 | static inline struct sched_clock_data *this_scd(void) | 135 | static inline struct sched_clock_data *this_scd(void) |
136 | { | 136 | { |
137 | return &__get_cpu_var(sched_clock_data); | 137 | return this_cpu_ptr(&sched_clock_data); |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline struct sched_clock_data *cpu_sdc(int cpu) | 140 | static inline struct sched_clock_data *cpu_sdc(int cpu) |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index abfaf3d9a29f..256e577faf1b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1153,7 +1153,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); | |||
1153 | static int find_later_rq(struct task_struct *task) | 1153 | static int find_later_rq(struct task_struct *task) |
1154 | { | 1154 | { |
1155 | struct sched_domain *sd; | 1155 | struct sched_domain *sd; |
1156 | struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl); | 1156 | struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); |
1157 | int this_cpu = smp_processor_id(); | 1157 | int this_cpu = smp_processor_id(); |
1158 | int best_cpu, cpu = task_cpu(task); | 1158 | int best_cpu, cpu = task_cpu(task); |
1159 | 1159 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b78280c59b46..0b069bf3e708 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -6615,7 +6615,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
6615 | struct sched_group *group; | 6615 | struct sched_group *group; |
6616 | struct rq *busiest; | 6616 | struct rq *busiest; |
6617 | unsigned long flags; | 6617 | unsigned long flags; |
6618 | struct cpumask *cpus = __get_cpu_var(load_balance_mask); | 6618 | struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); |
6619 | 6619 | ||
6620 | struct lb_env env = { | 6620 | struct lb_env env = { |
6621 | .sd = sd, | 6621 | .sd = sd, |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 87ea5bf1b87f..d024e6ce30ba 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1525,7 +1525,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); | |||
1525 | static int find_lowest_rq(struct task_struct *task) | 1525 | static int find_lowest_rq(struct task_struct *task) |
1526 | { | 1526 | { |
1527 | struct sched_domain *sd; | 1527 | struct sched_domain *sd; |
1528 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); | 1528 | struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); |
1529 | int this_cpu = smp_processor_id(); | 1529 | int this_cpu = smp_processor_id(); |
1530 | int cpu = task_cpu(task); | 1530 | int cpu = task_cpu(task); |
1531 | 1531 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6130251de280..24156c8434d1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -663,10 +663,10 @@ static inline int cpu_of(struct rq *rq) | |||
663 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 663 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
664 | 664 | ||
665 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 665 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
666 | #define this_rq() (&__get_cpu_var(runqueues)) | 666 | #define this_rq() this_cpu_ptr(&runqueues) |
667 | #define task_rq(p) cpu_rq(task_cpu(p)) | 667 | #define task_rq(p) cpu_rq(task_cpu(p)) |
668 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 668 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
669 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) | 669 | #define raw_rq() raw_cpu_ptr(&runqueues) |
670 | 670 | ||
671 | static inline u64 rq_clock(struct rq *rq) | 671 | static inline u64 rq_clock(struct rq *rq) |
672 | { | 672 | { |
diff --git a/kernel/smp.c b/kernel/smp.c index 9e0d0b289118..f38a1e692259 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -165,7 +165,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, | |||
165 | if (!csd) { | 165 | if (!csd) { |
166 | csd = &csd_stack; | 166 | csd = &csd_stack; |
167 | if (!wait) | 167 | if (!wait) |
168 | csd = &__get_cpu_var(csd_data); | 168 | csd = this_cpu_ptr(&csd_data); |
169 | } | 169 | } |
170 | 170 | ||
171 | csd_lock(csd); | 171 | csd_lock(csd); |
@@ -230,7 +230,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) | |||
230 | 230 | ||
231 | WARN_ON(!irqs_disabled()); | 231 | WARN_ON(!irqs_disabled()); |
232 | 232 | ||
233 | head = &__get_cpu_var(call_single_queue); | 233 | head = this_cpu_ptr(&call_single_queue); |
234 | entry = llist_del_all(head); | 234 | entry = llist_del_all(head); |
235 | entry = llist_reverse_order(entry); | 235 | entry = llist_reverse_order(entry); |
236 | 236 | ||
@@ -420,7 +420,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
420 | return; | 420 | return; |
421 | } | 421 | } |
422 | 422 | ||
423 | cfd = &__get_cpu_var(cfd_data); | 423 | cfd = this_cpu_ptr(&cfd_data); |
424 | 424 | ||
425 | cpumask_and(cfd->cpumask, mask, cpu_online_mask); | 425 | cpumask_and(cfd->cpumask, mask, cpu_online_mask); |
426 | cpumask_clear_cpu(this_cpu, cfd->cpumask); | 426 | cpumask_clear_cpu(this_cpu, cfd->cpumask); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 348ec763b104..0699add19164 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -485,7 +485,7 @@ static void tasklet_action(struct softirq_action *a) | |||
485 | local_irq_disable(); | 485 | local_irq_disable(); |
486 | list = __this_cpu_read(tasklet_vec.head); | 486 | list = __this_cpu_read(tasklet_vec.head); |
487 | __this_cpu_write(tasklet_vec.head, NULL); | 487 | __this_cpu_write(tasklet_vec.head, NULL); |
488 | __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); | 488 | __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); |
489 | local_irq_enable(); | 489 | local_irq_enable(); |
490 | 490 | ||
491 | while (list) { | 491 | while (list) { |
@@ -521,7 +521,7 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
521 | local_irq_disable(); | 521 | local_irq_disable(); |
522 | list = __this_cpu_read(tasklet_hi_vec.head); | 522 | list = __this_cpu_read(tasklet_hi_vec.head); |
523 | __this_cpu_write(tasklet_hi_vec.head, NULL); | 523 | __this_cpu_write(tasklet_hi_vec.head, NULL); |
524 | __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); | 524 | __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); |
525 | local_irq_enable(); | 525 | local_irq_enable(); |
526 | 526 | ||
527 | while (list) { | 527 | while (list) { |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 13d2f7cd65db..b312fcc73024 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) | |||
638 | fill_tgid_exit(tsk); | 638 | fill_tgid_exit(tsk); |
639 | } | 639 | } |
640 | 640 | ||
641 | listeners = __this_cpu_ptr(&listener_array); | 641 | listeners = raw_cpu_ptr(&listener_array); |
642 | if (list_empty(&listeners->list)) | 642 | if (list_empty(&listeners->list)) |
643 | return; | 643 | return; |
644 | 644 | ||
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index ab370ffffd53..37e50aadd471 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
558 | static int hrtimer_reprogram(struct hrtimer *timer, | 558 | static int hrtimer_reprogram(struct hrtimer *timer, |
559 | struct hrtimer_clock_base *base) | 559 | struct hrtimer_clock_base *base) |
560 | { | 560 | { |
561 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 561 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
562 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 562 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
563 | int res; | 563 | int res; |
564 | 564 | ||
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | |||
629 | */ | 629 | */ |
630 | static void retrigger_next_event(void *arg) | 630 | static void retrigger_next_event(void *arg) |
631 | { | 631 | { |
632 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 632 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); |
633 | 633 | ||
634 | if (!hrtimer_hres_active()) | 634 | if (!hrtimer_hres_active()) |
635 | return; | 635 | return; |
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
903 | */ | 903 | */ |
904 | debug_deactivate(timer); | 904 | debug_deactivate(timer); |
905 | timer_stats_hrtimer_clear_start_info(timer); | 905 | timer_stats_hrtimer_clear_start_info(timer); |
906 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 906 | reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); |
907 | /* | 907 | /* |
908 | * We must preserve the CALLBACK state flag here, | 908 | * We must preserve the CALLBACK state flag here, |
909 | * otherwise we could move the timer base in | 909 | * otherwise we could move the timer base in |
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
963 | * on dynticks target. | 963 | * on dynticks target. |
964 | */ | 964 | */ |
965 | wake_up_nohz_cpu(new_base->cpu_base->cpu); | 965 | wake_up_nohz_cpu(new_base->cpu_base->cpu); |
966 | } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) && | 966 | } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) && |
967 | hrtimer_reprogram(timer, new_base)) { | 967 | hrtimer_reprogram(timer, new_base)) { |
968 | /* | 968 | /* |
969 | * Only allow reprogramming if the new base is on this CPU. | 969 | * Only allow reprogramming if the new base is on this CPU. |
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | |||
1103 | */ | 1103 | */ |
1104 | ktime_t hrtimer_get_next_event(void) | 1104 | ktime_t hrtimer_get_next_event(void) |
1105 | { | 1105 | { |
1106 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1106 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
1107 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 1107 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
1108 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | 1108 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
1109 | unsigned long flags; | 1109 | unsigned long flags; |
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
1144 | 1144 | ||
1145 | memset(timer, 0, sizeof(struct hrtimer)); | 1145 | memset(timer, 0, sizeof(struct hrtimer)); |
1146 | 1146 | ||
1147 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 1147 | cpu_base = raw_cpu_ptr(&hrtimer_bases); |
1148 | 1148 | ||
1149 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | 1149 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
1150 | clock_id = CLOCK_MONOTONIC; | 1150 | clock_id = CLOCK_MONOTONIC; |
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |||
1187 | struct hrtimer_cpu_base *cpu_base; | 1187 | struct hrtimer_cpu_base *cpu_base; |
1188 | int base = hrtimer_clockid_to_base(which_clock); | 1188 | int base = hrtimer_clockid_to_base(which_clock); |
1189 | 1189 | ||
1190 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 1190 | cpu_base = raw_cpu_ptr(&hrtimer_bases); |
1191 | *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); | 1191 | *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); |
1192 | 1192 | ||
1193 | return 0; | 1193 | return 0; |
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1242 | */ | 1242 | */ |
1243 | void hrtimer_interrupt(struct clock_event_device *dev) | 1243 | void hrtimer_interrupt(struct clock_event_device *dev) |
1244 | { | 1244 | { |
1245 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1245 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
1246 | ktime_t expires_next, now, entry_time, delta; | 1246 | ktime_t expires_next, now, entry_time, delta; |
1247 | int i, retries = 0; | 1247 | int i, retries = 0; |
1248 | 1248 | ||
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void) | |||
1376 | if (!hrtimer_hres_active()) | 1376 | if (!hrtimer_hres_active()) |
1377 | return; | 1377 | return; |
1378 | 1378 | ||
1379 | td = &__get_cpu_var(tick_cpu_device); | 1379 | td = this_cpu_ptr(&tick_cpu_device); |
1380 | if (td && td->evtdev) | 1380 | if (td && td->evtdev) |
1381 | hrtimer_interrupt(td->evtdev); | 1381 | hrtimer_interrupt(td->evtdev); |
1382 | } | 1382 | } |
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void) | |||
1440 | void hrtimer_run_queues(void) | 1440 | void hrtimer_run_queues(void) |
1441 | { | 1441 | { |
1442 | struct timerqueue_node *node; | 1442 | struct timerqueue_node *node; |
1443 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1443 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
1444 | struct hrtimer_clock_base *base; | 1444 | struct hrtimer_clock_base *base; |
1445 | int index, gettime = 1; | 1445 | int index, gettime = 1; |
1446 | 1446 | ||
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu) | |||
1679 | 1679 | ||
1680 | local_irq_disable(); | 1680 | local_irq_disable(); |
1681 | old_base = &per_cpu(hrtimer_bases, scpu); | 1681 | old_base = &per_cpu(hrtimer_bases, scpu); |
1682 | new_base = &__get_cpu_var(hrtimer_bases); | 1682 | new_base = this_cpu_ptr(&hrtimer_bases); |
1683 | /* | 1683 | /* |
1684 | * The caller is globally serialized and nobody else | 1684 | * The caller is globally serialized and nobody else |
1685 | * takes two locks at once, deadlock is not possible. | 1685 | * takes two locks at once, deadlock is not possible. |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 64c5990fd500..066f0ec05e48 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
554 | void tick_check_oneshot_broadcast_this_cpu(void) | 554 | void tick_check_oneshot_broadcast_this_cpu(void) |
555 | { | 555 | { |
556 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { | 556 | if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
557 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 557 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
558 | 558 | ||
559 | /* | 559 | /* |
560 | * We might be in the middle of switching over from | 560 | * We might be in the middle of switching over from |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 052b4b53c3d6..7efeedf53ebd 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td, | |||
224 | 224 | ||
225 | void tick_install_replacement(struct clock_event_device *newdev) | 225 | void tick_install_replacement(struct clock_event_device *newdev) |
226 | { | 226 | { |
227 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 227 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
228 | int cpu = smp_processor_id(); | 228 | int cpu = smp_processor_id(); |
229 | 229 | ||
230 | clockevents_exchange_device(td->evtdev, newdev); | 230 | clockevents_exchange_device(td->evtdev, newdev); |
@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup) | |||
374 | 374 | ||
375 | void tick_suspend(void) | 375 | void tick_suspend(void) |
376 | { | 376 | { |
377 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 377 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
378 | 378 | ||
379 | clockevents_shutdown(td->evtdev); | 379 | clockevents_shutdown(td->evtdev); |
380 | } | 380 | } |
381 | 381 | ||
382 | void tick_resume(void) | 382 | void tick_resume(void) |
383 | { | 383 | { |
384 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 384 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
385 | int broadcast = tick_resume_broadcast(); | 385 | int broadcast = tick_resume_broadcast(); |
386 | 386 | ||
387 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); | 387 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 824109060a33..7ce740e78e1b 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, | |||
59 | */ | 59 | */ |
60 | int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) | 60 | int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) |
61 | { | 61 | { |
62 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | 62 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
63 | struct clock_event_device *dev = td->evtdev; | 63 | struct clock_event_device *dev = td->evtdev; |
64 | 64 | ||
65 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || | 65 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a73efdf6f696..7b5741fc4110 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now); | |||
205 | */ | 205 | */ |
206 | void __tick_nohz_full_check(void) | 206 | void __tick_nohz_full_check(void) |
207 | { | 207 | { |
208 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 208 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
209 | 209 | ||
210 | if (tick_nohz_full_cpu(smp_processor_id())) { | 210 | if (tick_nohz_full_cpu(smp_processor_id())) { |
211 | if (ts->tick_stopped && !is_idle_task(current)) { | 211 | if (ts->tick_stopped && !is_idle_task(current)) { |
@@ -573,7 +573,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
573 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; | 573 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
574 | ktime_t last_update, expires, ret = { .tv64 = 0 }; | 574 | ktime_t last_update, expires, ret = { .tv64 = 0 }; |
575 | unsigned long rcu_delta_jiffies; | 575 | unsigned long rcu_delta_jiffies; |
576 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 576 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
577 | u64 time_delta; | 577 | u64 time_delta; |
578 | 578 | ||
579 | time_delta = timekeeping_max_deferment(); | 579 | time_delta = timekeeping_max_deferment(); |
@@ -841,7 +841,7 @@ void tick_nohz_idle_enter(void) | |||
841 | 841 | ||
842 | local_irq_disable(); | 842 | local_irq_disable(); |
843 | 843 | ||
844 | ts = &__get_cpu_var(tick_cpu_sched); | 844 | ts = this_cpu_ptr(&tick_cpu_sched); |
845 | ts->inidle = 1; | 845 | ts->inidle = 1; |
846 | __tick_nohz_idle_enter(ts); | 846 | __tick_nohz_idle_enter(ts); |
847 | 847 | ||
@@ -859,7 +859,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); | |||
859 | */ | 859 | */ |
860 | void tick_nohz_irq_exit(void) | 860 | void tick_nohz_irq_exit(void) |
861 | { | 861 | { |
862 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 862 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
863 | 863 | ||
864 | if (ts->inidle) | 864 | if (ts->inidle) |
865 | __tick_nohz_idle_enter(ts); | 865 | __tick_nohz_idle_enter(ts); |
@@ -874,7 +874,7 @@ void tick_nohz_irq_exit(void) | |||
874 | */ | 874 | */ |
875 | ktime_t tick_nohz_get_sleep_length(void) | 875 | ktime_t tick_nohz_get_sleep_length(void) |
876 | { | 876 | { |
877 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 877 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
878 | 878 | ||
879 | return ts->sleep_length; | 879 | return ts->sleep_length; |
880 | } | 880 | } |
@@ -952,7 +952,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) | |||
952 | */ | 952 | */ |
953 | void tick_nohz_idle_exit(void) | 953 | void tick_nohz_idle_exit(void) |
954 | { | 954 | { |
955 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 955 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
956 | ktime_t now; | 956 | ktime_t now; |
957 | 957 | ||
958 | local_irq_disable(); | 958 | local_irq_disable(); |
@@ -987,7 +987,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |||
987 | */ | 987 | */ |
988 | static void tick_nohz_handler(struct clock_event_device *dev) | 988 | static void tick_nohz_handler(struct clock_event_device *dev) |
989 | { | 989 | { |
990 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 990 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
991 | struct pt_regs *regs = get_irq_regs(); | 991 | struct pt_regs *regs = get_irq_regs(); |
992 | ktime_t now = ktime_get(); | 992 | ktime_t now = ktime_get(); |
993 | 993 | ||
@@ -1011,7 +1011,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
1011 | */ | 1011 | */ |
1012 | static void tick_nohz_switch_to_nohz(void) | 1012 | static void tick_nohz_switch_to_nohz(void) |
1013 | { | 1013 | { |
1014 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 1014 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1015 | ktime_t next; | 1015 | ktime_t next; |
1016 | 1016 | ||
1017 | if (!tick_nohz_enabled) | 1017 | if (!tick_nohz_enabled) |
@@ -1073,7 +1073,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) | |||
1073 | 1073 | ||
1074 | static inline void tick_nohz_irq_enter(void) | 1074 | static inline void tick_nohz_irq_enter(void) |
1075 | { | 1075 | { |
1076 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 1076 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1077 | ktime_t now; | 1077 | ktime_t now; |
1078 | 1078 | ||
1079 | if (!ts->idle_active && !ts->tick_stopped) | 1079 | if (!ts->idle_active && !ts->tick_stopped) |
@@ -1151,7 +1151,7 @@ early_param("skew_tick", skew_tick); | |||
1151 | */ | 1151 | */ |
1152 | void tick_setup_sched_timer(void) | 1152 | void tick_setup_sched_timer(void) |
1153 | { | 1153 | { |
1154 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 1154 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1155 | ktime_t now = ktime_get(); | 1155 | ktime_t now = ktime_get(); |
1156 | 1156 | ||
1157 | /* | 1157 | /* |
@@ -1220,7 +1220,7 @@ void tick_clock_notify(void) | |||
1220 | */ | 1220 | */ |
1221 | void tick_oneshot_notify(void) | 1221 | void tick_oneshot_notify(void) |
1222 | { | 1222 | { |
1223 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 1223 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1224 | 1224 | ||
1225 | set_bit(0, &ts->check_clocks); | 1225 | set_bit(0, &ts->check_clocks); |
1226 | } | 1226 | } |
@@ -1235,7 +1235,7 @@ void tick_oneshot_notify(void) | |||
1235 | */ | 1235 | */ |
1236 | int tick_check_oneshot_change(int allow_nohz) | 1236 | int tick_check_oneshot_change(int allow_nohz) |
1237 | { | 1237 | { |
1238 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 1238 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1239 | 1239 | ||
1240 | if (!test_and_clear_bit(0, &ts->check_clocks)) | 1240 | if (!test_and_clear_bit(0, &ts->check_clocks)) |
1241 | return 0; | 1241 | return 0; |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 9bbb8344ed3b..3260ffdb368f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer) | |||
655 | static void do_init_timer(struct timer_list *timer, unsigned int flags, | 655 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
656 | const char *name, struct lock_class_key *key) | 656 | const char *name, struct lock_class_key *key) |
657 | { | 657 | { |
658 | struct tvec_base *base = __raw_get_cpu_var(tvec_bases); | 658 | struct tvec_base *base = raw_cpu_read(tvec_bases); |
659 | 659 | ||
660 | timer->entry.next = NULL; | 660 | timer->entry.next = NULL; |
661 | timer->base = (void *)((unsigned long)base | flags); | 661 | timer->base = (void *)((unsigned long)base | flags); |
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c index 394f70b17162..9586b670a5b2 100644 --- a/kernel/user-return-notifier.c +++ b/kernel/user-return-notifier.c | |||
@@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head, return_notifier_list); | |||
14 | void user_return_notifier_register(struct user_return_notifier *urn) | 14 | void user_return_notifier_register(struct user_return_notifier *urn) |
15 | { | 15 | { |
16 | set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); | 16 | set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); |
17 | hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list)); | 17 | hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list)); |
18 | } | 18 | } |
19 | EXPORT_SYMBOL_GPL(user_return_notifier_register); | 19 | EXPORT_SYMBOL_GPL(user_return_notifier_register); |
20 | 20 | ||
@@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register); | |||
25 | void user_return_notifier_unregister(struct user_return_notifier *urn) | 25 | void user_return_notifier_unregister(struct user_return_notifier *urn) |
26 | { | 26 | { |
27 | hlist_del(&urn->link); | 27 | hlist_del(&urn->link); |
28 | if (hlist_empty(&__get_cpu_var(return_notifier_list))) | 28 | if (hlist_empty(this_cpu_ptr(&return_notifier_list))) |
29 | clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); | 29 | clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); |
30 | } | 30 | } |
31 | EXPORT_SYMBOL_GPL(user_return_notifier_unregister); | 31 | EXPORT_SYMBOL_GPL(user_return_notifier_unregister); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 49e9537f3673..70bf11815f84 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -208,7 +208,7 @@ void touch_nmi_watchdog(void) | |||
208 | * case we shouldn't have to worry about the watchdog | 208 | * case we shouldn't have to worry about the watchdog |
209 | * going off. | 209 | * going off. |
210 | */ | 210 | */ |
211 | __raw_get_cpu_var(watchdog_nmi_touch) = true; | 211 | raw_cpu_write(watchdog_nmi_touch, true); |
212 | touch_softlockup_watchdog(); | 212 | touch_softlockup_watchdog(); |
213 | } | 213 | } |
214 | EXPORT_SYMBOL(touch_nmi_watchdog); | 214 | EXPORT_SYMBOL(touch_nmi_watchdog); |
@@ -217,8 +217,8 @@ EXPORT_SYMBOL(touch_nmi_watchdog); | |||
217 | 217 | ||
218 | void touch_softlockup_watchdog_sync(void) | 218 | void touch_softlockup_watchdog_sync(void) |
219 | { | 219 | { |
220 | __raw_get_cpu_var(softlockup_touch_sync) = true; | 220 | __this_cpu_write(softlockup_touch_sync, true); |
221 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | 221 | __this_cpu_write(watchdog_touch_ts, 0); |
222 | } | 222 | } |
223 | 223 | ||
224 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 224 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
@@ -425,7 +425,7 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) | |||
425 | 425 | ||
426 | static void watchdog_enable(unsigned int cpu) | 426 | static void watchdog_enable(unsigned int cpu) |
427 | { | 427 | { |
428 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | 428 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); |
429 | 429 | ||
430 | /* kick off the timer for the hardlockup detector */ | 430 | /* kick off the timer for the hardlockup detector */ |
431 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 431 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
@@ -445,7 +445,7 @@ static void watchdog_enable(unsigned int cpu) | |||
445 | 445 | ||
446 | static void watchdog_disable(unsigned int cpu) | 446 | static void watchdog_disable(unsigned int cpu) |
447 | { | 447 | { |
448 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | 448 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); |
449 | 449 | ||
450 | watchdog_set_prio(SCHED_NORMAL, 0); | 450 | watchdog_set_prio(SCHED_NORMAL, 0); |
451 | hrtimer_cancel(hrtimer); | 451 | hrtimer_cancel(hrtimer); |
@@ -585,7 +585,7 @@ static struct smp_hotplug_thread watchdog_threads = { | |||
585 | 585 | ||
586 | static void restart_watchdog_hrtimer(void *info) | 586 | static void restart_watchdog_hrtimer(void *info) |
587 | { | 587 | { |
588 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | 588 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); |
589 | int ret; | 589 | int ret; |
590 | 590 | ||
591 | /* | 591 | /* |
diff --git a/net/core/dev.c b/net/core/dev.c index 4699dcfdc4ab..6470716ddba4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q) | |||
2153 | unsigned long flags; | 2153 | unsigned long flags; |
2154 | 2154 | ||
2155 | local_irq_save(flags); | 2155 | local_irq_save(flags); |
2156 | sd = &__get_cpu_var(softnet_data); | 2156 | sd = this_cpu_ptr(&softnet_data); |
2157 | q->next_sched = NULL; | 2157 | q->next_sched = NULL; |
2158 | *sd->output_queue_tailp = q; | 2158 | *sd->output_queue_tailp = q; |
2159 | sd->output_queue_tailp = &q->next_sched; | 2159 | sd->output_queue_tailp = &q->next_sched; |
@@ -3233,7 +3233,7 @@ static void rps_trigger_softirq(void *data) | |||
3233 | static int rps_ipi_queued(struct softnet_data *sd) | 3233 | static int rps_ipi_queued(struct softnet_data *sd) |
3234 | { | 3234 | { |
3235 | #ifdef CONFIG_RPS | 3235 | #ifdef CONFIG_RPS |
3236 | struct softnet_data *mysd = &__get_cpu_var(softnet_data); | 3236 | struct softnet_data *mysd = this_cpu_ptr(&softnet_data); |
3237 | 3237 | ||
3238 | if (sd != mysd) { | 3238 | if (sd != mysd) { |
3239 | sd->rps_ipi_next = mysd->rps_ipi_list; | 3239 | sd->rps_ipi_next = mysd->rps_ipi_list; |
@@ -3260,7 +3260,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) | |||
3260 | if (qlen < (netdev_max_backlog >> 1)) | 3260 | if (qlen < (netdev_max_backlog >> 1)) |
3261 | return false; | 3261 | return false; |
3262 | 3262 | ||
3263 | sd = &__get_cpu_var(softnet_data); | 3263 | sd = this_cpu_ptr(&softnet_data); |
3264 | 3264 | ||
3265 | rcu_read_lock(); | 3265 | rcu_read_lock(); |
3266 | fl = rcu_dereference(sd->flow_limit); | 3266 | fl = rcu_dereference(sd->flow_limit); |
@@ -3407,7 +3407,7 @@ EXPORT_SYMBOL(netif_rx_ni); | |||
3407 | 3407 | ||
3408 | static void net_tx_action(struct softirq_action *h) | 3408 | static void net_tx_action(struct softirq_action *h) |
3409 | { | 3409 | { |
3410 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 3410 | struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
3411 | 3411 | ||
3412 | if (sd->completion_queue) { | 3412 | if (sd->completion_queue) { |
3413 | struct sk_buff *clist; | 3413 | struct sk_buff *clist; |
@@ -3832,7 +3832,7 @@ EXPORT_SYMBOL(netif_receive_skb); | |||
3832 | static void flush_backlog(void *arg) | 3832 | static void flush_backlog(void *arg) |
3833 | { | 3833 | { |
3834 | struct net_device *dev = arg; | 3834 | struct net_device *dev = arg; |
3835 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 3835 | struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
3836 | struct sk_buff *skb, *tmp; | 3836 | struct sk_buff *skb, *tmp; |
3837 | 3837 | ||
3838 | rps_lock(sd); | 3838 | rps_lock(sd); |
@@ -4379,7 +4379,7 @@ void __napi_schedule(struct napi_struct *n) | |||
4379 | unsigned long flags; | 4379 | unsigned long flags; |
4380 | 4380 | ||
4381 | local_irq_save(flags); | 4381 | local_irq_save(flags); |
4382 | ____napi_schedule(&__get_cpu_var(softnet_data), n); | 4382 | ____napi_schedule(this_cpu_ptr(&softnet_data), n); |
4383 | local_irq_restore(flags); | 4383 | local_irq_restore(flags); |
4384 | } | 4384 | } |
4385 | EXPORT_SYMBOL(__napi_schedule); | 4385 | EXPORT_SYMBOL(__napi_schedule); |
@@ -4500,7 +4500,7 @@ EXPORT_SYMBOL(netif_napi_del); | |||
4500 | 4500 | ||
4501 | static void net_rx_action(struct softirq_action *h) | 4501 | static void net_rx_action(struct softirq_action *h) |
4502 | { | 4502 | { |
4503 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 4503 | struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
4504 | unsigned long time_limit = jiffies + 2; | 4504 | unsigned long time_limit = jiffies + 2; |
4505 | int budget = netdev_budget; | 4505 | int budget = netdev_budget; |
4506 | void *have; | 4506 | void *have; |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 50f9a9db5792..252e155c837b 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
146 | unsigned long flags; | 146 | unsigned long flags; |
147 | 147 | ||
148 | local_irq_save(flags); | 148 | local_irq_save(flags); |
149 | data = &__get_cpu_var(dm_cpu_data); | 149 | data = this_cpu_ptr(&dm_cpu_data); |
150 | spin_lock(&data->lock); | 150 | spin_lock(&data->lock); |
151 | dskb = data->skb; | 151 | dskb = data->skb; |
152 | 152 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 829d013745ab..61059a05ec95 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | 346 | ||
347 | local_irq_save(flags); | 347 | local_irq_save(flags); |
348 | nc = &__get_cpu_var(netdev_alloc_cache); | 348 | nc = this_cpu_ptr(&netdev_alloc_cache); |
349 | if (unlikely(!nc->frag.page)) { | 349 | if (unlikely(!nc->frag.page)) { |
350 | refill: | 350 | refill: |
351 | for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { | 351 | for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 793c0bb8c4fd..2d4ae469b471 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1311,7 +1311,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) | |||
1311 | if (rt_is_input_route(rt)) { | 1311 | if (rt_is_input_route(rt)) { |
1312 | p = (struct rtable **)&nh->nh_rth_input; | 1312 | p = (struct rtable **)&nh->nh_rth_input; |
1313 | } else { | 1313 | } else { |
1314 | p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); | 1314 | p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output); |
1315 | } | 1315 | } |
1316 | orig = *p; | 1316 | orig = *p; |
1317 | 1317 | ||
@@ -1939,7 +1939,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
1939 | do_cache = false; | 1939 | do_cache = false; |
1940 | goto add; | 1940 | goto add; |
1941 | } | 1941 | } |
1942 | prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); | 1942 | prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); |
1943 | } | 1943 | } |
1944 | rth = rcu_dereference(*prth); | 1944 | rth = rcu_dereference(*prth); |
1945 | if (rt_cache_valid(rth)) { | 1945 | if (rt_cache_valid(rth)) { |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 0431a8f3c8f4..af660030e3c7 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -40,7 +40,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, | |||
40 | 40 | ||
41 | net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); | 41 | net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); |
42 | 42 | ||
43 | tmp = __get_cpu_var(ipv4_cookie_scratch); | 43 | tmp = this_cpu_ptr(ipv4_cookie_scratch); |
44 | memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); | 44 | memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); |
45 | tmp[0] = (__force u32)saddr; | 45 | tmp[0] = (__force u32)saddr; |
46 | tmp[1] = (__force u32)daddr; | 46 | tmp[1] = (__force u32)daddr; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 86023b9be47f..1bec4e76d88c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2941,7 +2941,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |||
2941 | local_bh_disable(); | 2941 | local_bh_disable(); |
2942 | p = ACCESS_ONCE(tcp_md5sig_pool); | 2942 | p = ACCESS_ONCE(tcp_md5sig_pool); |
2943 | if (p) | 2943 | if (p) |
2944 | return __this_cpu_ptr(p); | 2944 | return raw_cpu_ptr(p); |
2945 | 2945 | ||
2946 | local_bh_enable(); | 2946 | local_bh_enable(); |
2947 | return NULL; | 2947 | return NULL; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8d4eac793700..becd98ce9a1c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -852,7 +852,7 @@ void tcp_wfree(struct sk_buff *skb) | |||
852 | 852 | ||
853 | /* queue this socket to tasklet queue */ | 853 | /* queue this socket to tasklet queue */ |
854 | local_irq_save(flags); | 854 | local_irq_save(flags); |
855 | tsq = &__get_cpu_var(tsq_tasklet); | 855 | tsq = this_cpu_ptr(&tsq_tasklet); |
856 | list_add(&tp->tsq_node, &tsq->head); | 856 | list_add(&tp->tsq_node, &tsq->head); |
857 | tasklet_schedule(&tsq->tasklet); | 857 | tasklet_schedule(&tsq->tasklet); |
858 | local_irq_restore(flags); | 858 | local_irq_restore(flags); |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 9a2838e93cc5..e25b633266c3 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -67,7 +67,7 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd | |||
67 | 67 | ||
68 | net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); | 68 | net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); |
69 | 69 | ||
70 | tmp = __get_cpu_var(ipv6_cookie_scratch); | 70 | tmp = this_cpu_ptr(ipv6_cookie_scratch); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * we have 320 bits of information to hash, copy in the remaining | 73 | * we have 320 bits of information to hash, copy in the remaining |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e8fdb172adbb..273b8bff6ba4 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -267,7 +267,7 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) | |||
267 | unsigned long *flag; | 267 | unsigned long *flag; |
268 | 268 | ||
269 | preempt_disable(); | 269 | preempt_disable(); |
270 | flag = &__get_cpu_var(clean_list_grace); | 270 | flag = this_cpu_ptr(&clean_list_grace); |
271 | set_bit(CLEAN_LIST_BUSY_BIT, flag); | 271 | set_bit(CLEAN_LIST_BUSY_BIT, flag); |
272 | ret = llist_del_first(&pool->clean_list); | 272 | ret = llist_del_first(&pool->clean_list); |
273 | if (ret) | 273 | if (ret) |