aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-17 13:30:45 -0400
committerTejun Heo <tj@kernel.org>2014-08-26 13:45:52 -0400
commiteb7e7d766326f70859046bfdb6277068c2461fe2 (patch)
treef69fc4fc3e08fc7fbed5c7513406d397449fc99d /arch/s390
parent35898716b4d3382791d219be317faace580b6a41 (diff)
s390: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. At the end of the patch set all uses of __get_cpu_var have been removed so the macro is removed too. The patch set includes passes over all arches as well. Once these operations are used throughout then specialized macros can be defined in non -x86 arches as well in order to optimize per cpu access by f.e. using a global register that may be set to the per cpu base. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to this_cpu_inc(y) Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> CC: linux390@de.ibm.com Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/percpu.h16
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/kernel/nmi.c10
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c22
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c16
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c2
12 files changed, 48 insertions, 44 deletions
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f65bd3634519..692d310dc32d 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -184,7 +184,7 @@ cputime64_t s390_get_idle_time(int cpu);
184 184
185static inline int s390_nohz_delay(int cpu) 185static inline int s390_nohz_delay(int cpu)
186{ 186{
187 return __get_cpu_var(s390_idle).nohz_delay != 0; 187 return __this_cpu_read(s390_idle.nohz_delay) != 0;
188} 188}
189 189
190#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) 190#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index c4dd400a2791..713d325afbfe 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
81 81
82static __always_inline void inc_irq_stat(enum interruption_class irq) 82static __always_inline void inc_irq_stat(enum interruption_class irq)
83{ 83{
84 __get_cpu_var(irq_stat).irqs[irq]++; 84 __this_cpu_inc(irq_stat.irqs[irq]);
85} 85}
86 86
87struct ext_code { 87struct ext_code {
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index fa91e0097458..933355e0d091 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -31,7 +31,7 @@
31 pcp_op_T__ old__, new__, prev__; \ 31 pcp_op_T__ old__, new__, prev__; \
32 pcp_op_T__ *ptr__; \ 32 pcp_op_T__ *ptr__; \
33 preempt_disable(); \ 33 preempt_disable(); \
34 ptr__ = __this_cpu_ptr(&(pcp)); \ 34 ptr__ = raw_cpu_ptr(&(pcp)); \
35 prev__ = *ptr__; \ 35 prev__ = *ptr__; \
36 do { \ 36 do { \
37 old__ = prev__; \ 37 old__ = prev__; \
@@ -70,7 +70,7 @@
70 pcp_op_T__ val__ = (val); \ 70 pcp_op_T__ val__ = (val); \
71 pcp_op_T__ old__, *ptr__; \ 71 pcp_op_T__ old__, *ptr__; \
72 preempt_disable(); \ 72 preempt_disable(); \
73 ptr__ = __this_cpu_ptr(&(pcp)); \ 73 ptr__ = raw_cpu_ptr(&(pcp)); \
74 if (__builtin_constant_p(val__) && \ 74 if (__builtin_constant_p(val__) && \
75 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ 75 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
76 asm volatile( \ 76 asm volatile( \
@@ -97,7 +97,7 @@
97 pcp_op_T__ val__ = (val); \ 97 pcp_op_T__ val__ = (val); \
98 pcp_op_T__ old__, *ptr__; \ 98 pcp_op_T__ old__, *ptr__; \
99 preempt_disable(); \ 99 preempt_disable(); \
100 ptr__ = __this_cpu_ptr(&(pcp)); \ 100 ptr__ = raw_cpu_ptr(&(pcp)); \
101 asm volatile( \ 101 asm volatile( \
102 op " %[old__],%[val__],%[ptr__]\n" \ 102 op " %[old__],%[val__],%[ptr__]\n" \
103 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 103 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -116,7 +116,7 @@
116 pcp_op_T__ val__ = (val); \ 116 pcp_op_T__ val__ = (val); \
117 pcp_op_T__ old__, *ptr__; \ 117 pcp_op_T__ old__, *ptr__; \
118 preempt_disable(); \ 118 preempt_disable(); \
119 ptr__ = __this_cpu_ptr(&(pcp)); \ 119 ptr__ = raw_cpu_ptr(&(pcp)); \
120 asm volatile( \ 120 asm volatile( \
121 op " %[old__],%[val__],%[ptr__]\n" \ 121 op " %[old__],%[val__],%[ptr__]\n" \
122 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 122 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -138,7 +138,7 @@
138 pcp_op_T__ ret__; \ 138 pcp_op_T__ ret__; \
139 pcp_op_T__ *ptr__; \ 139 pcp_op_T__ *ptr__; \
140 preempt_disable(); \ 140 preempt_disable(); \
141 ptr__ = __this_cpu_ptr(&(pcp)); \ 141 ptr__ = raw_cpu_ptr(&(pcp)); \
142 ret__ = cmpxchg(ptr__, oval, nval); \ 142 ret__ = cmpxchg(ptr__, oval, nval); \
143 preempt_enable(); \ 143 preempt_enable(); \
144 ret__; \ 144 ret__; \
@@ -154,7 +154,7 @@
154 typeof(pcp) *ptr__; \ 154 typeof(pcp) *ptr__; \
155 typeof(pcp) ret__; \ 155 typeof(pcp) ret__; \
156 preempt_disable(); \ 156 preempt_disable(); \
157 ptr__ = __this_cpu_ptr(&(pcp)); \ 157 ptr__ = raw_cpu_ptr(&(pcp)); \
158 ret__ = xchg(ptr__, nval); \ 158 ret__ = xchg(ptr__, nval); \
159 preempt_enable(); \ 159 preempt_enable(); \
160 ret__; \ 160 ret__; \
@@ -173,8 +173,8 @@
173 typeof(pcp2) *p2__; \ 173 typeof(pcp2) *p2__; \
174 int ret__; \ 174 int ret__; \
175 preempt_disable(); \ 175 preempt_disable(); \
176 p1__ = __this_cpu_ptr(&(pcp1)); \ 176 p1__ = raw_cpu_ptr(&(pcp1)); \
177 p2__ = __this_cpu_ptr(&(pcp2)); \ 177 p2__ = raw_cpu_ptr(&(pcp2)); \
178 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ 178 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
179 preempt_enable(); \ 179 preempt_enable(); \
180 ret__; \ 180 ret__; \
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8eb82443cfbd..891c183211ce 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -258,7 +258,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
258 258
259 ext_code = *(struct ext_code *) &regs->int_code; 259 ext_code = *(struct ext_code *) &regs->int_code;
260 if (ext_code.code != EXT_IRQ_CLK_COMP) 260 if (ext_code.code != EXT_IRQ_CLK_COMP)
261 __get_cpu_var(s390_idle).nohz_delay = 1; 261 __this_cpu_write(s390_idle.nohz_delay, 1);
262 262
263 index = ext_hash(ext_code.code); 263 index = ext_hash(ext_code.code);
264 rcu_read_lock(); 264 rcu_read_lock();
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index bc71a7b95af5..131ed342ed10 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -366,9 +366,9 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
366 */ 366 */
367static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 367static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
368{ 368{
369 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe); 369 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
370 kcb->prev_kprobe.status = kcb->kprobe_status; 370 kcb->prev_kprobe.status = kcb->kprobe_status;
371 __get_cpu_var(current_kprobe) = p; 371 __this_cpu_write(current_kprobe, p);
372} 372}
373 373
374/* 374/*
@@ -378,7 +378,7 @@ static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
378 */ 378 */
379static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) 379static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
380{ 380{
381 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 381 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
382 kcb->kprobe_status = kcb->prev_kprobe.status; 382 kcb->kprobe_status = kcb->prev_kprobe.status;
383} 383}
384 384
@@ -459,7 +459,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
459 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 459 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
460 return 1; 460 return 1;
461 } else if (kprobe_running()) { 461 } else if (kprobe_running()) {
462 p = __get_cpu_var(current_kprobe); 462 p = __this_cpu_read(current_kprobe);
463 if (p->break_handler && p->break_handler(p, regs)) { 463 if (p->break_handler && p->break_handler(p, regs)) {
464 /* 464 /*
465 * Continuation after the jprobe completed and 465 * Continuation after the jprobe completed and
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 210e1285f75a..d75c42f4147d 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -53,8 +53,12 @@ void s390_handle_mcck(void)
53 */ 53 */
54 local_irq_save(flags); 54 local_irq_save(flags);
55 local_mcck_disable(); 55 local_mcck_disable();
56 mcck = __get_cpu_var(cpu_mcck); 56 /*
57 memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); 57 * Ummm... Does this make sense at all? Copying the percpu struct
58 * and then zapping it one statement later?
59 */
60 memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
61 memset(&mcck, 0, sizeof(struct mcck_struct));
58 clear_cpu_flag(CIF_MCCK_PENDING); 62 clear_cpu_flag(CIF_MCCK_PENDING);
59 local_mcck_enable(); 63 local_mcck_enable();
60 local_irq_restore(flags); 64 local_irq_restore(flags);
@@ -253,7 +257,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
253 nmi_enter(); 257 nmi_enter();
254 inc_irq_stat(NMI_NMI); 258 inc_irq_stat(NMI_NMI);
255 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 259 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
256 mcck = &__get_cpu_var(cpu_mcck); 260 mcck = this_cpu_ptr(&cpu_mcck);
257 umode = user_mode(regs); 261 umode = user_mode(regs);
258 262
259 if (mci->sd) { 263 if (mci->sd) {
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index d3194de7ae1e..56fdad479115 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -173,7 +173,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
173 */ 173 */
174static void cpumf_pmu_enable(struct pmu *pmu) 174static void cpumf_pmu_enable(struct pmu *pmu)
175{ 175{
176 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 176 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
177 int err; 177 int err;
178 178
179 if (cpuhw->flags & PMU_F_ENABLED) 179 if (cpuhw->flags & PMU_F_ENABLED)
@@ -196,7 +196,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
196 */ 196 */
197static void cpumf_pmu_disable(struct pmu *pmu) 197static void cpumf_pmu_disable(struct pmu *pmu)
198{ 198{
199 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 199 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
200 int err; 200 int err;
201 u64 inactive; 201 u64 inactive;
202 202
@@ -230,7 +230,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
230 return; 230 return;
231 231
232 inc_irq_stat(IRQEXT_CMC); 232 inc_irq_stat(IRQEXT_CMC);
233 cpuhw = &__get_cpu_var(cpu_hw_events); 233 cpuhw = this_cpu_ptr(&cpu_hw_events);
234 234
235 /* Measurement alerts are shared and might happen when the PMU 235 /* Measurement alerts are shared and might happen when the PMU
236 * is not reserved. Ignore these alerts in this case. */ 236 * is not reserved. Ignore these alerts in this case. */
@@ -250,7 +250,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
250#define PMC_RELEASE 1 250#define PMC_RELEASE 1
251static void setup_pmc_cpu(void *flags) 251static void setup_pmc_cpu(void *flags)
252{ 252{
253 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 253 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
254 254
255 switch (*((int *) flags)) { 255 switch (*((int *) flags)) {
256 case PMC_INIT: 256 case PMC_INIT:
@@ -475,7 +475,7 @@ static void cpumf_pmu_read(struct perf_event *event)
475 475
476static void cpumf_pmu_start(struct perf_event *event, int flags) 476static void cpumf_pmu_start(struct perf_event *event, int flags)
477{ 477{
478 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 478 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
479 struct hw_perf_event *hwc = &event->hw; 479 struct hw_perf_event *hwc = &event->hw;
480 480
481 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) 481 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
@@ -506,7 +506,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
506 506
507static void cpumf_pmu_stop(struct perf_event *event, int flags) 507static void cpumf_pmu_stop(struct perf_event *event, int flags)
508{ 508{
509 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 509 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
510 struct hw_perf_event *hwc = &event->hw; 510 struct hw_perf_event *hwc = &event->hw;
511 511
512 if (!(hwc->state & PERF_HES_STOPPED)) { 512 if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -527,7 +527,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
527 527
528static int cpumf_pmu_add(struct perf_event *event, int flags) 528static int cpumf_pmu_add(struct perf_event *event, int flags)
529{ 529{
530 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 530 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
531 531
532 /* Check authorization for the counter set to which this 532 /* Check authorization for the counter set to which this
533 * counter belongs. 533 * counter belongs.
@@ -551,7 +551,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
551 551
552static void cpumf_pmu_del(struct perf_event *event, int flags) 552static void cpumf_pmu_del(struct perf_event *event, int flags)
553{ 553{
554 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 554 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
555 555
556 cpumf_pmu_stop(event, PERF_EF_UPDATE); 556 cpumf_pmu_stop(event, PERF_EF_UPDATE);
557 557
@@ -575,7 +575,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
575 */ 575 */
576static void cpumf_pmu_start_txn(struct pmu *pmu) 576static void cpumf_pmu_start_txn(struct pmu *pmu)
577{ 577{
578 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 578 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
579 579
580 perf_pmu_disable(pmu); 580 perf_pmu_disable(pmu);
581 cpuhw->flags |= PERF_EVENT_TXN; 581 cpuhw->flags |= PERF_EVENT_TXN;
@@ -589,7 +589,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu)
589 */ 589 */
590static void cpumf_pmu_cancel_txn(struct pmu *pmu) 590static void cpumf_pmu_cancel_txn(struct pmu *pmu)
591{ 591{
592 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 592 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
593 593
594 WARN_ON(cpuhw->tx_state != cpuhw->state); 594 WARN_ON(cpuhw->tx_state != cpuhw->state);
595 595
@@ -604,7 +604,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu)
604 */ 604 */
605static int cpumf_pmu_commit_txn(struct pmu *pmu) 605static int cpumf_pmu_commit_txn(struct pmu *pmu)
606{ 606{
607 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 607 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
608 u64 state; 608 u64 state;
609 609
610 /* check if the updated state can be scheduled */ 610 /* check if the updated state can be scheduled */
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index ea0c7b2ef030..08e761318c17 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -562,7 +562,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
562static void setup_pmc_cpu(void *flags) 562static void setup_pmc_cpu(void *flags)
563{ 563{
564 int err; 564 int err;
565 struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); 565 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
566 566
567 err = 0; 567 err = 0;
568 switch (*((int *) flags)) { 568 switch (*((int *) flags)) {
@@ -849,7 +849,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
849 849
850static void cpumsf_pmu_enable(struct pmu *pmu) 850static void cpumsf_pmu_enable(struct pmu *pmu)
851{ 851{
852 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 852 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
853 struct hw_perf_event *hwc; 853 struct hw_perf_event *hwc;
854 int err; 854 int err;
855 855
@@ -898,7 +898,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
898 898
899static void cpumsf_pmu_disable(struct pmu *pmu) 899static void cpumsf_pmu_disable(struct pmu *pmu)
900{ 900{
901 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 901 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
902 struct hws_lsctl_request_block inactive; 902 struct hws_lsctl_request_block inactive;
903 struct hws_qsi_info_block si; 903 struct hws_qsi_info_block si;
904 int err; 904 int err;
@@ -1306,7 +1306,7 @@ static void cpumsf_pmu_read(struct perf_event *event)
1306 */ 1306 */
1307static void cpumsf_pmu_start(struct perf_event *event, int flags) 1307static void cpumsf_pmu_start(struct perf_event *event, int flags)
1308{ 1308{
1309 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1309 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1310 1310
1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1312 return; 1312 return;
@@ -1327,7 +1327,7 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
1327 */ 1327 */
1328static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1328static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1329{ 1329{
1330 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1330 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1331 1331
1332 if (event->hw.state & PERF_HES_STOPPED) 1332 if (event->hw.state & PERF_HES_STOPPED)
1333 return; 1333 return;
@@ -1346,7 +1346,7 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1346 1346
1347static int cpumsf_pmu_add(struct perf_event *event, int flags) 1347static int cpumsf_pmu_add(struct perf_event *event, int flags)
1348{ 1348{
1349 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1349 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1350 int err; 1350 int err;
1351 1351
1352 if (cpuhw->flags & PMU_F_IN_USE) 1352 if (cpuhw->flags & PMU_F_IN_USE)
@@ -1397,7 +1397,7 @@ out:
1397 1397
1398static void cpumsf_pmu_del(struct perf_event *event, int flags) 1398static void cpumsf_pmu_del(struct perf_event *event, int flags)
1399{ 1399{
1400 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1400 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1401 1401
1402 perf_pmu_disable(event->pmu); 1402 perf_pmu_disable(event->pmu);
1403 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1403 cpumsf_pmu_stop(event, PERF_EF_UPDATE);
@@ -1470,7 +1470,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
1470 if (!(alert & CPU_MF_INT_SF_MASK)) 1470 if (!(alert & CPU_MF_INT_SF_MASK))
1471 return; 1471 return;
1472 inc_irq_stat(IRQEXT_CMS); 1472 inc_irq_stat(IRQEXT_CMS);
1473 cpuhw = &__get_cpu_var(cpu_hw_sf); 1473 cpuhw = this_cpu_ptr(&cpu_hw_sf);
1474 1474
1475 /* Measurement alerts are shared and might happen when the PMU 1475 /* Measurement alerts are shared and might happen when the PMU
1476 * is not reserved. Ignore these alerts in this case. */ 1476 * is not reserved. Ignore these alerts in this case. */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 24612029f450..f0305b1189aa 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -23,8 +23,8 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
23 */ 23 */
24void cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 26 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
27 struct cpuid *id = &__get_cpu_var(cpu_id); 27 struct cpuid *id = this_cpu_ptr(&cpu_id);
28 28
29 get_cpu_id(id); 29 get_cpu_id(id);
30 atomic_inc(&init_mm.mm_count); 30 atomic_inc(&init_mm.mm_count);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 4cef607f3711..4e5a6d881c62 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -92,7 +92,7 @@ void clock_comparator_work(void)
92 struct clock_event_device *cd; 92 struct clock_event_device *cd;
93 93
94 S390_lowcore.clock_comparator = -1ULL; 94 S390_lowcore.clock_comparator = -1ULL;
95 cd = &__get_cpu_var(comparators); 95 cd = this_cpu_ptr(&comparators);
96 cd->event_handler(cd); 96 cd->event_handler(cd);
97} 97}
98 98
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(get_sync_clock);
360 */ 360 */
361static void disable_sync_clock(void *dummy) 361static void disable_sync_clock(void *dummy)
362{ 362{
363 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); 363 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
364 /* 364 /*
365 * Clear the in-sync bit 2^31. All get_sync_clock calls will 365 * Clear the in-sync bit 2^31. All get_sync_clock calls will
366 * fail until the sync bit is turned back on. In addition 366 * fail until the sync bit is turned back on. In addition
@@ -377,7 +377,7 @@ static void disable_sync_clock(void *dummy)
377 */ 377 */
378static void enable_sync_clock(void) 378static void enable_sync_clock(void)
379{ 379{
380 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); 380 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
381 atomic_set_mask(0x80000000, sw_ptr); 381 atomic_set_mask(0x80000000, sw_ptr);
382} 382}
383 383
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 8c34363d6f1e..f400745dedc0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(vtime_account_system);
154 154
155void __kprobes vtime_stop_cpu(void) 155void __kprobes vtime_stop_cpu(void)
156{ 156{
157 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 157 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
158 unsigned long long idle_time; 158 unsigned long long idle_time;
159 unsigned long psw_mask; 159 unsigned long psw_mask;
160 160
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index e53c6f268807..ff9b4eb34589 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -178,7 +178,7 @@ static int smp_ctl_qsi(int cpu)
178static void hws_ext_handler(struct ext_code ext_code, 178static void hws_ext_handler(struct ext_code ext_code,
179 unsigned int param32, unsigned long param64) 179 unsigned int param32, unsigned long param64)
180{ 180{
181 struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer); 181 struct hws_cpu_buffer *cb = this_cpu_ptr(&sampler_cpu_buffer);
182 182
183 if (!(param32 & CPU_MF_INT_SF_MASK)) 183 if (!(param32 & CPU_MF_INT_SF_MASK))
184 return; 184 return;