aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /arch/s390
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/percpu.h16
-rw-r--r--arch/s390/kernel/idle.c2
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/kernel/nmi.c10
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c22
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c16
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/oprofile/hwsampler.c2
10 files changed, 45 insertions, 41 deletions
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index e787cc1bff8f..b0d5f0a97a01 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -82,7 +82,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
82 82
83static __always_inline void inc_irq_stat(enum interruption_class irq) 83static __always_inline void inc_irq_stat(enum interruption_class irq)
84{ 84{
85 __get_cpu_var(irq_stat).irqs[irq]++; 85 __this_cpu_inc(irq_stat.irqs[irq]);
86} 86}
87 87
88struct ext_code { 88struct ext_code {
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index fa91e0097458..933355e0d091 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -31,7 +31,7 @@
31 pcp_op_T__ old__, new__, prev__; \ 31 pcp_op_T__ old__, new__, prev__; \
32 pcp_op_T__ *ptr__; \ 32 pcp_op_T__ *ptr__; \
33 preempt_disable(); \ 33 preempt_disable(); \
34 ptr__ = __this_cpu_ptr(&(pcp)); \ 34 ptr__ = raw_cpu_ptr(&(pcp)); \
35 prev__ = *ptr__; \ 35 prev__ = *ptr__; \
36 do { \ 36 do { \
37 old__ = prev__; \ 37 old__ = prev__; \
@@ -70,7 +70,7 @@
70 pcp_op_T__ val__ = (val); \ 70 pcp_op_T__ val__ = (val); \
71 pcp_op_T__ old__, *ptr__; \ 71 pcp_op_T__ old__, *ptr__; \
72 preempt_disable(); \ 72 preempt_disable(); \
73 ptr__ = __this_cpu_ptr(&(pcp)); \ 73 ptr__ = raw_cpu_ptr(&(pcp)); \
74 if (__builtin_constant_p(val__) && \ 74 if (__builtin_constant_p(val__) && \
75 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ 75 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
76 asm volatile( \ 76 asm volatile( \
@@ -97,7 +97,7 @@
97 pcp_op_T__ val__ = (val); \ 97 pcp_op_T__ val__ = (val); \
98 pcp_op_T__ old__, *ptr__; \ 98 pcp_op_T__ old__, *ptr__; \
99 preempt_disable(); \ 99 preempt_disable(); \
100 ptr__ = __this_cpu_ptr(&(pcp)); \ 100 ptr__ = raw_cpu_ptr(&(pcp)); \
101 asm volatile( \ 101 asm volatile( \
102 op " %[old__],%[val__],%[ptr__]\n" \ 102 op " %[old__],%[val__],%[ptr__]\n" \
103 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 103 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -116,7 +116,7 @@
116 pcp_op_T__ val__ = (val); \ 116 pcp_op_T__ val__ = (val); \
117 pcp_op_T__ old__, *ptr__; \ 117 pcp_op_T__ old__, *ptr__; \
118 preempt_disable(); \ 118 preempt_disable(); \
119 ptr__ = __this_cpu_ptr(&(pcp)); \ 119 ptr__ = raw_cpu_ptr(&(pcp)); \
120 asm volatile( \ 120 asm volatile( \
121 op " %[old__],%[val__],%[ptr__]\n" \ 121 op " %[old__],%[val__],%[ptr__]\n" \
122 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 122 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -138,7 +138,7 @@
138 pcp_op_T__ ret__; \ 138 pcp_op_T__ ret__; \
139 pcp_op_T__ *ptr__; \ 139 pcp_op_T__ *ptr__; \
140 preempt_disable(); \ 140 preempt_disable(); \
141 ptr__ = __this_cpu_ptr(&(pcp)); \ 141 ptr__ = raw_cpu_ptr(&(pcp)); \
142 ret__ = cmpxchg(ptr__, oval, nval); \ 142 ret__ = cmpxchg(ptr__, oval, nval); \
143 preempt_enable(); \ 143 preempt_enable(); \
144 ret__; \ 144 ret__; \
@@ -154,7 +154,7 @@
154 typeof(pcp) *ptr__; \ 154 typeof(pcp) *ptr__; \
155 typeof(pcp) ret__; \ 155 typeof(pcp) ret__; \
156 preempt_disable(); \ 156 preempt_disable(); \
157 ptr__ = __this_cpu_ptr(&(pcp)); \ 157 ptr__ = raw_cpu_ptr(&(pcp)); \
158 ret__ = xchg(ptr__, nval); \ 158 ret__ = xchg(ptr__, nval); \
159 preempt_enable(); \ 159 preempt_enable(); \
160 ret__; \ 160 ret__; \
@@ -173,8 +173,8 @@
173 typeof(pcp2) *p2__; \ 173 typeof(pcp2) *p2__; \
174 int ret__; \ 174 int ret__; \
175 preempt_disable(); \ 175 preempt_disable(); \
176 p1__ = __this_cpu_ptr(&(pcp1)); \ 176 p1__ = raw_cpu_ptr(&(pcp1)); \
177 p2__ = __this_cpu_ptr(&(pcp2)); \ 177 p2__ = raw_cpu_ptr(&(pcp2)); \
178 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ 178 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
179 preempt_enable(); \ 179 preempt_enable(); \
180 ret__; \ 180 ret__; \
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index c846aee7372f..7559f1beab29 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21 21
22void __kprobes enabled_wait(void) 22void __kprobes enabled_wait(void)
23{ 23{
24 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
25 unsigned long long idle_time; 25 unsigned long long idle_time;
26 unsigned long psw_mask; 26 unsigned long psw_mask;
27 27
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 27ae5433fe4d..014d4729b134 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -218,9 +218,9 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
218 */ 218 */
219static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 219static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
220{ 220{
221 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe); 221 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
222 kcb->prev_kprobe.status = kcb->kprobe_status; 222 kcb->prev_kprobe.status = kcb->kprobe_status;
223 __get_cpu_var(current_kprobe) = p; 223 __this_cpu_write(current_kprobe, p);
224} 224}
225 225
226/* 226/*
@@ -230,7 +230,7 @@ static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
230 */ 230 */
231static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) 231static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
232{ 232{
233 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 233 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
234 kcb->kprobe_status = kcb->prev_kprobe.status; 234 kcb->kprobe_status = kcb->prev_kprobe.status;
235} 235}
236 236
@@ -311,7 +311,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
311 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 311 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
312 return 1; 312 return 1;
313 } else if (kprobe_running()) { 313 } else if (kprobe_running()) {
314 p = __get_cpu_var(current_kprobe); 314 p = __this_cpu_read(current_kprobe);
315 if (p->break_handler && p->break_handler(p, regs)) { 315 if (p->break_handler && p->break_handler(p, regs)) {
316 /* 316 /*
317 * Continuation after the jprobe completed and 317 * Continuation after the jprobe completed and
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index db96b418160a..dd1c24ceda50 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,8 +54,12 @@ void s390_handle_mcck(void)
54 */ 54 */
55 local_irq_save(flags); 55 local_irq_save(flags);
56 local_mcck_disable(); 56 local_mcck_disable();
57 mcck = __get_cpu_var(cpu_mcck); 57 /*
58 memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); 58 * Ummm... Does this make sense at all? Copying the percpu struct
59 * and then zapping it one statement later?
60 */
61 memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
62 memset(&mcck, 0, sizeof(struct mcck_struct));
59 clear_cpu_flag(CIF_MCCK_PENDING); 63 clear_cpu_flag(CIF_MCCK_PENDING);
60 local_mcck_enable(); 64 local_mcck_enable();
61 local_irq_restore(flags); 65 local_irq_restore(flags);
@@ -269,7 +273,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
269 nmi_enter(); 273 nmi_enter();
270 inc_irq_stat(NMI_NMI); 274 inc_irq_stat(NMI_NMI);
271 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 275 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
272 mcck = &__get_cpu_var(cpu_mcck); 276 mcck = this_cpu_ptr(&cpu_mcck);
273 umode = user_mode(regs); 277 umode = user_mode(regs);
274 278
275 if (mci->sd) { 279 if (mci->sd) {
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index d3194de7ae1e..56fdad479115 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -173,7 +173,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
173 */ 173 */
174static void cpumf_pmu_enable(struct pmu *pmu) 174static void cpumf_pmu_enable(struct pmu *pmu)
175{ 175{
176 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 176 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
177 int err; 177 int err;
178 178
179 if (cpuhw->flags & PMU_F_ENABLED) 179 if (cpuhw->flags & PMU_F_ENABLED)
@@ -196,7 +196,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
196 */ 196 */
197static void cpumf_pmu_disable(struct pmu *pmu) 197static void cpumf_pmu_disable(struct pmu *pmu)
198{ 198{
199 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 199 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
200 int err; 200 int err;
201 u64 inactive; 201 u64 inactive;
202 202
@@ -230,7 +230,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
230 return; 230 return;
231 231
232 inc_irq_stat(IRQEXT_CMC); 232 inc_irq_stat(IRQEXT_CMC);
233 cpuhw = &__get_cpu_var(cpu_hw_events); 233 cpuhw = this_cpu_ptr(&cpu_hw_events);
234 234
235 /* Measurement alerts are shared and might happen when the PMU 235 /* Measurement alerts are shared and might happen when the PMU
236 * is not reserved. Ignore these alerts in this case. */ 236 * is not reserved. Ignore these alerts in this case. */
@@ -250,7 +250,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
250#define PMC_RELEASE 1 250#define PMC_RELEASE 1
251static void setup_pmc_cpu(void *flags) 251static void setup_pmc_cpu(void *flags)
252{ 252{
253 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 253 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
254 254
255 switch (*((int *) flags)) { 255 switch (*((int *) flags)) {
256 case PMC_INIT: 256 case PMC_INIT:
@@ -475,7 +475,7 @@ static void cpumf_pmu_read(struct perf_event *event)
475 475
476static void cpumf_pmu_start(struct perf_event *event, int flags) 476static void cpumf_pmu_start(struct perf_event *event, int flags)
477{ 477{
478 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 478 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
479 struct hw_perf_event *hwc = &event->hw; 479 struct hw_perf_event *hwc = &event->hw;
480 480
481 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) 481 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
@@ -506,7 +506,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
506 506
507static void cpumf_pmu_stop(struct perf_event *event, int flags) 507static void cpumf_pmu_stop(struct perf_event *event, int flags)
508{ 508{
509 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 509 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
510 struct hw_perf_event *hwc = &event->hw; 510 struct hw_perf_event *hwc = &event->hw;
511 511
512 if (!(hwc->state & PERF_HES_STOPPED)) { 512 if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -527,7 +527,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
527 527
528static int cpumf_pmu_add(struct perf_event *event, int flags) 528static int cpumf_pmu_add(struct perf_event *event, int flags)
529{ 529{
530 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 530 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
531 531
532 /* Check authorization for the counter set to which this 532 /* Check authorization for the counter set to which this
533 * counter belongs. 533 * counter belongs.
@@ -551,7 +551,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
551 551
552static void cpumf_pmu_del(struct perf_event *event, int flags) 552static void cpumf_pmu_del(struct perf_event *event, int flags)
553{ 553{
554 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 554 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
555 555
556 cpumf_pmu_stop(event, PERF_EF_UPDATE); 556 cpumf_pmu_stop(event, PERF_EF_UPDATE);
557 557
@@ -575,7 +575,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
575 */ 575 */
576static void cpumf_pmu_start_txn(struct pmu *pmu) 576static void cpumf_pmu_start_txn(struct pmu *pmu)
577{ 577{
578 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 578 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
579 579
580 perf_pmu_disable(pmu); 580 perf_pmu_disable(pmu);
581 cpuhw->flags |= PERF_EVENT_TXN; 581 cpuhw->flags |= PERF_EVENT_TXN;
@@ -589,7 +589,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu)
589 */ 589 */
590static void cpumf_pmu_cancel_txn(struct pmu *pmu) 590static void cpumf_pmu_cancel_txn(struct pmu *pmu)
591{ 591{
592 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 592 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
593 593
594 WARN_ON(cpuhw->tx_state != cpuhw->state); 594 WARN_ON(cpuhw->tx_state != cpuhw->state);
595 595
@@ -604,7 +604,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu)
604 */ 604 */
605static int cpumf_pmu_commit_txn(struct pmu *pmu) 605static int cpumf_pmu_commit_txn(struct pmu *pmu)
606{ 606{
607 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 607 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
608 u64 state; 608 u64 state;
609 609
610 /* check if the updated state can be scheduled */ 610 /* check if the updated state can be scheduled */
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index ea0c7b2ef030..08e761318c17 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -562,7 +562,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
562static void setup_pmc_cpu(void *flags) 562static void setup_pmc_cpu(void *flags)
563{ 563{
564 int err; 564 int err;
565 struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); 565 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
566 566
567 err = 0; 567 err = 0;
568 switch (*((int *) flags)) { 568 switch (*((int *) flags)) {
@@ -849,7 +849,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
849 849
850static void cpumsf_pmu_enable(struct pmu *pmu) 850static void cpumsf_pmu_enable(struct pmu *pmu)
851{ 851{
852 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 852 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
853 struct hw_perf_event *hwc; 853 struct hw_perf_event *hwc;
854 int err; 854 int err;
855 855
@@ -898,7 +898,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
898 898
899static void cpumsf_pmu_disable(struct pmu *pmu) 899static void cpumsf_pmu_disable(struct pmu *pmu)
900{ 900{
901 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 901 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
902 struct hws_lsctl_request_block inactive; 902 struct hws_lsctl_request_block inactive;
903 struct hws_qsi_info_block si; 903 struct hws_qsi_info_block si;
904 int err; 904 int err;
@@ -1306,7 +1306,7 @@ static void cpumsf_pmu_read(struct perf_event *event)
1306 */ 1306 */
1307static void cpumsf_pmu_start(struct perf_event *event, int flags) 1307static void cpumsf_pmu_start(struct perf_event *event, int flags)
1308{ 1308{
1309 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1309 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1310 1310
1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1312 return; 1312 return;
@@ -1327,7 +1327,7 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
1327 */ 1327 */
1328static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1328static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1329{ 1329{
1330 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1330 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1331 1331
1332 if (event->hw.state & PERF_HES_STOPPED) 1332 if (event->hw.state & PERF_HES_STOPPED)
1333 return; 1333 return;
@@ -1346,7 +1346,7 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1346 1346
1347static int cpumsf_pmu_add(struct perf_event *event, int flags) 1347static int cpumsf_pmu_add(struct perf_event *event, int flags)
1348{ 1348{
1349 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1349 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1350 int err; 1350 int err;
1351 1351
1352 if (cpuhw->flags & PMU_F_IN_USE) 1352 if (cpuhw->flags & PMU_F_IN_USE)
@@ -1397,7 +1397,7 @@ out:
1397 1397
1398static void cpumsf_pmu_del(struct perf_event *event, int flags) 1398static void cpumsf_pmu_del(struct perf_event *event, int flags)
1399{ 1399{
1400 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); 1400 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1401 1401
1402 perf_pmu_disable(event->pmu); 1402 perf_pmu_disable(event->pmu);
1403 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1403 cpumsf_pmu_stop(event, PERF_EF_UPDATE);
@@ -1470,7 +1470,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
1470 if (!(alert & CPU_MF_INT_SF_MASK)) 1470 if (!(alert & CPU_MF_INT_SF_MASK))
1471 return; 1471 return;
1472 inc_irq_stat(IRQEXT_CMS); 1472 inc_irq_stat(IRQEXT_CMS);
1473 cpuhw = &__get_cpu_var(cpu_hw_sf); 1473 cpuhw = this_cpu_ptr(&cpu_hw_sf);
1474 1474
1475 /* Measurement alerts are shared and might happen when the PMU 1475 /* Measurement alerts are shared and might happen when the PMU
1476 * is not reserved. Ignore these alerts in this case. */ 1476 * is not reserved. Ignore these alerts in this case. */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index edefead3b43a..dbdd33ee0102 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -23,7 +23,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
23 */ 23 */
24void cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct cpuid *id = &__get_cpu_var(cpu_id); 26 struct cpuid *id = this_cpu_ptr(&cpu_id);
27 27
28 get_cpu_id(id); 28 get_cpu_id(id);
29 atomic_inc(&init_mm.mm_count); 29 atomic_inc(&init_mm.mm_count);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 69e980de0f62..005d665fe4a5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -92,7 +92,7 @@ void clock_comparator_work(void)
92 struct clock_event_device *cd; 92 struct clock_event_device *cd;
93 93
94 S390_lowcore.clock_comparator = -1ULL; 94 S390_lowcore.clock_comparator = -1ULL;
95 cd = &__get_cpu_var(comparators); 95 cd = this_cpu_ptr(&comparators);
96 cd->event_handler(cd); 96 cd->event_handler(cd);
97} 97}
98 98
@@ -373,7 +373,7 @@ EXPORT_SYMBOL(get_sync_clock);
373 */ 373 */
374static void disable_sync_clock(void *dummy) 374static void disable_sync_clock(void *dummy)
375{ 375{
376 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); 376 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
377 /* 377 /*
378 * Clear the in-sync bit 2^31. All get_sync_clock calls will 378 * Clear the in-sync bit 2^31. All get_sync_clock calls will
379 * fail until the sync bit is turned back on. In addition 379 * fail until the sync bit is turned back on. In addition
@@ -390,7 +390,7 @@ static void disable_sync_clock(void *dummy)
390 */ 390 */
391static void enable_sync_clock(void) 391static void enable_sync_clock(void)
392{ 392{
393 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); 393 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
394 atomic_set_mask(0x80000000, sw_ptr); 394 atomic_set_mask(0x80000000, sw_ptr);
395} 395}
396 396
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index e53c6f268807..ff9b4eb34589 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -178,7 +178,7 @@ static int smp_ctl_qsi(int cpu)
178static void hws_ext_handler(struct ext_code ext_code, 178static void hws_ext_handler(struct ext_code ext_code,
179 unsigned int param32, unsigned long param64) 179 unsigned int param32, unsigned long param64)
180{ 180{
181 struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer); 181 struct hws_cpu_buffer *cb = this_cpu_ptr(&sampler_cpu_buffer);
182 182
183 if (!(param32 & CPU_MF_INT_SF_MASK)) 183 if (!(param32 & CPU_MF_INT_SF_MASK))
184 return; 184 return;