aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /arch/sparc
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/cpudata_32.h2
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/leon_smp.c2
-rw-r--r--arch/sparc/kernel/nmi.c16
-rw-r--r--arch/sparc/kernel/pci_sun4v.c8
-rw-r--r--arch/sparc/kernel/perf_event.c26
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/mm/tlb.c4
10 files changed, 35 insertions, 35 deletions
diff --git a/arch/sparc/include/asm/cpudata_32.h b/arch/sparc/include/asm/cpudata_32.h
index 0300d94c25b3..05f366379f53 100644
--- a/arch/sparc/include/asm/cpudata_32.h
+++ b/arch/sparc/include/asm/cpudata_32.h
@@ -26,6 +26,6 @@ typedef struct {
26 26
27DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 27DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
28#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 28#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
29#define local_cpu_data() __get_cpu_var(__cpu_data) 29#define local_cpu_data() (*this_cpu_ptr(&__cpu_data))
30 30
31#endif /* _SPARC_CPUDATA_H */ 31#endif /* _SPARC_CPUDATA_H */
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 0e594076912c..a6e424d185d0 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -30,7 +30,7 @@ typedef struct {
30 30
31DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 31DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
32#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 32#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
33#define local_cpu_data() __get_cpu_var(__cpu_data) 33#define local_cpu_data() (*this_cpu_ptr(&__cpu_data))
34 34
35#endif /* !(__ASSEMBLY__) */ 35#endif /* !(__ASSEMBLY__) */
36 36
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 98d712843413..cd83be527586 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -83,7 +83,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
83 83
84static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 84static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
85{ 85{
86 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 86 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
87 kcb->kprobe_status = kcb->prev_kprobe.status; 87 kcb->kprobe_status = kcb->prev_kprobe.status;
88 kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; 88 kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
89 kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; 89 kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
@@ -92,7 +92,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
92static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 92static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
93 struct kprobe_ctlblk *kcb) 93 struct kprobe_ctlblk *kcb)
94{ 94{
95 __get_cpu_var(current_kprobe) = p; 95 __this_cpu_write(current_kprobe, p);
96 kcb->kprobe_orig_tnpc = regs->tnpc; 96 kcb->kprobe_orig_tnpc = regs->tnpc;
97 kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); 97 kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
98} 98}
@@ -155,7 +155,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
155 ret = 1; 155 ret = 1;
156 goto no_kprobe; 156 goto no_kprobe;
157 } 157 }
158 p = __get_cpu_var(current_kprobe); 158 p = __this_cpu_read(current_kprobe);
159 if (p->break_handler && p->break_handler(p, regs)) 159 if (p->break_handler && p->break_handler(p, regs))
160 goto ss_probe; 160 goto ss_probe;
161 } 161 }
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 018ef11f57df..ea2bad306f93 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -343,7 +343,7 @@ static void leon_ipi_resched(int cpu)
343 343
344void leonsmp_ipi_interrupt(void) 344void leonsmp_ipi_interrupt(void)
345{ 345{
346 struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work); 346 struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
347 347
348 if (work->single) { 348 if (work->single) {
349 work->single = 0; 349 work->single = 0;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 5b1151dcba13..a9973bb4a1b2 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -100,20 +100,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
100 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); 100 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
101 101
102 sum = local_cpu_data().irq0_irqs; 102 sum = local_cpu_data().irq0_irqs;
103 if (__get_cpu_var(nmi_touch)) { 103 if (__this_cpu_read(nmi_touch)) {
104 __get_cpu_var(nmi_touch) = 0; 104 __this_cpu_write(nmi_touch, 0);
105 touched = 1; 105 touched = 1;
106 } 106 }
107 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 107 if (!touched && __this_cpu_read(last_irq_sum) == sum) {
108 __this_cpu_inc(alert_counter); 108 __this_cpu_inc(alert_counter);
109 if (__this_cpu_read(alert_counter) == 30 * nmi_hz) 109 if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
110 die_nmi("BUG: NMI Watchdog detected LOCKUP", 110 die_nmi("BUG: NMI Watchdog detected LOCKUP",
111 regs, panic_on_timeout); 111 regs, panic_on_timeout);
112 } else { 112 } else {
113 __get_cpu_var(last_irq_sum) = sum; 113 __this_cpu_write(last_irq_sum, sum);
114 __this_cpu_write(alert_counter, 0); 114 __this_cpu_write(alert_counter, 0);
115 } 115 }
116 if (__get_cpu_var(wd_enabled)) { 116 if (__this_cpu_read(wd_enabled)) {
117 pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); 117 pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
118 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); 118 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
119 } 119 }
@@ -154,7 +154,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
154void stop_nmi_watchdog(void *unused) 154void stop_nmi_watchdog(void *unused)
155{ 155{
156 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); 156 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
157 __get_cpu_var(wd_enabled) = 0; 157 __this_cpu_write(wd_enabled, 0);
158 atomic_dec(&nmi_active); 158 atomic_dec(&nmi_active);
159} 159}
160 160
@@ -207,7 +207,7 @@ error:
207 207
208void start_nmi_watchdog(void *unused) 208void start_nmi_watchdog(void *unused)
209{ 209{
210 __get_cpu_var(wd_enabled) = 1; 210 __this_cpu_write(wd_enabled, 1);
211 atomic_inc(&nmi_active); 211 atomic_inc(&nmi_active);
212 212
213 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); 213 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
@@ -218,7 +218,7 @@ void start_nmi_watchdog(void *unused)
218 218
219static void nmi_adjust_hz_one(void *unused) 219static void nmi_adjust_hz_one(void *unused)
220{ 220{
221 if (!__get_cpu_var(wd_enabled)) 221 if (!__this_cpu_read(wd_enabled))
222 return; 222 return;
223 223
224 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); 224 pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index d07f6b29aed8..49d33b178793 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -48,7 +48,7 @@ static int iommu_batch_initialized;
48/* Interrupts must be disabled. */ 48/* Interrupts must be disabled. */
49static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) 49static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
50{ 50{
51 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 51 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
52 52
53 p->dev = dev; 53 p->dev = dev;
54 p->prot = prot; 54 p->prot = prot;
@@ -94,7 +94,7 @@ static long iommu_batch_flush(struct iommu_batch *p)
94 94
95static inline void iommu_batch_new_entry(unsigned long entry) 95static inline void iommu_batch_new_entry(unsigned long entry)
96{ 96{
97 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 97 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
98 98
99 if (p->entry + p->npages == entry) 99 if (p->entry + p->npages == entry)
100 return; 100 return;
@@ -106,7 +106,7 @@ static inline void iommu_batch_new_entry(unsigned long entry)
106/* Interrupts must be disabled. */ 106/* Interrupts must be disabled. */
107static inline long iommu_batch_add(u64 phys_page) 107static inline long iommu_batch_add(u64 phys_page)
108{ 108{
109 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 109 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
110 110
111 BUG_ON(p->npages >= PGLIST_NENTS); 111 BUG_ON(p->npages >= PGLIST_NENTS);
112 112
@@ -120,7 +120,7 @@ static inline long iommu_batch_add(u64 phys_page)
120/* Interrupts must be disabled. */ 120/* Interrupts must be disabled. */
121static inline long iommu_batch_end(void) 121static inline long iommu_batch_end(void)
122{ 122{
123 struct iommu_batch *p = &__get_cpu_var(iommu_batch); 123 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
124 124
125 BUG_ON(p->npages >= PGLIST_NENTS); 125 BUG_ON(p->npages >= PGLIST_NENTS);
126 126
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index c9759ad3f34a..46a5e4508752 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1013,7 +1013,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
1013 1013
1014static void sparc_pmu_enable(struct pmu *pmu) 1014static void sparc_pmu_enable(struct pmu *pmu)
1015{ 1015{
1016 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1016 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1017 int i; 1017 int i;
1018 1018
1019 if (cpuc->enabled) 1019 if (cpuc->enabled)
@@ -1031,7 +1031,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
1031 1031
1032static void sparc_pmu_disable(struct pmu *pmu) 1032static void sparc_pmu_disable(struct pmu *pmu)
1033{ 1033{
1034 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1034 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1035 int i; 1035 int i;
1036 1036
1037 if (!cpuc->enabled) 1037 if (!cpuc->enabled)
@@ -1065,7 +1065,7 @@ static int active_event_index(struct cpu_hw_events *cpuc,
1065 1065
1066static void sparc_pmu_start(struct perf_event *event, int flags) 1066static void sparc_pmu_start(struct perf_event *event, int flags)
1067{ 1067{
1068 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1068 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1069 int idx = active_event_index(cpuc, event); 1069 int idx = active_event_index(cpuc, event);
1070 1070
1071 if (flags & PERF_EF_RELOAD) { 1071 if (flags & PERF_EF_RELOAD) {
@@ -1080,7 +1080,7 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
1080 1080
1081static void sparc_pmu_stop(struct perf_event *event, int flags) 1081static void sparc_pmu_stop(struct perf_event *event, int flags)
1082{ 1082{
1083 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1083 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1084 int idx = active_event_index(cpuc, event); 1084 int idx = active_event_index(cpuc, event);
1085 1085
1086 if (!(event->hw.state & PERF_HES_STOPPED)) { 1086 if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -1096,7 +1096,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags)
1096 1096
1097static void sparc_pmu_del(struct perf_event *event, int _flags) 1097static void sparc_pmu_del(struct perf_event *event, int _flags)
1098{ 1098{
1099 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1099 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1100 unsigned long flags; 1100 unsigned long flags;
1101 int i; 1101 int i;
1102 1102
@@ -1133,7 +1133,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1133 1133
1134static void sparc_pmu_read(struct perf_event *event) 1134static void sparc_pmu_read(struct perf_event *event)
1135{ 1135{
1136 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1136 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1137 int idx = active_event_index(cpuc, event); 1137 int idx = active_event_index(cpuc, event);
1138 struct hw_perf_event *hwc = &event->hw; 1138 struct hw_perf_event *hwc = &event->hw;
1139 1139
@@ -1145,7 +1145,7 @@ static DEFINE_MUTEX(pmc_grab_mutex);
1145 1145
1146static void perf_stop_nmi_watchdog(void *unused) 1146static void perf_stop_nmi_watchdog(void *unused)
1147{ 1147{
1148 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1148 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1149 int i; 1149 int i;
1150 1150
1151 stop_nmi_watchdog(NULL); 1151 stop_nmi_watchdog(NULL);
@@ -1356,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count,
1356 1356
1357static int sparc_pmu_add(struct perf_event *event, int ef_flags) 1357static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1358{ 1358{
1359 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1359 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1360 int n0, ret = -EAGAIN; 1360 int n0, ret = -EAGAIN;
1361 unsigned long flags; 1361 unsigned long flags;
1362 1362
@@ -1498,7 +1498,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
1498 */ 1498 */
1499static void sparc_pmu_start_txn(struct pmu *pmu) 1499static void sparc_pmu_start_txn(struct pmu *pmu)
1500{ 1500{
1501 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1501 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1502 1502
1503 perf_pmu_disable(pmu); 1503 perf_pmu_disable(pmu);
1504 cpuhw->group_flag |= PERF_EVENT_TXN; 1504 cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1511,7 +1511,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
1511 */ 1511 */
1512static void sparc_pmu_cancel_txn(struct pmu *pmu) 1512static void sparc_pmu_cancel_txn(struct pmu *pmu)
1513{ 1513{
1514 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1514 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1515 1515
1516 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1516 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1517 perf_pmu_enable(pmu); 1517 perf_pmu_enable(pmu);
@@ -1524,13 +1524,13 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
1524 */ 1524 */
1525static int sparc_pmu_commit_txn(struct pmu *pmu) 1525static int sparc_pmu_commit_txn(struct pmu *pmu)
1526{ 1526{
1527 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1527 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1528 int n; 1528 int n;
1529 1529
1530 if (!sparc_pmu) 1530 if (!sparc_pmu)
1531 return -EINVAL; 1531 return -EINVAL;
1532 1532
1533 cpuc = &__get_cpu_var(cpu_hw_events); 1533 cpuc = this_cpu_ptr(&cpu_hw_events);
1534 n = cpuc->n_events; 1534 n = cpuc->n_events;
1535 if (check_excludes(cpuc->event, 0, n)) 1535 if (check_excludes(cpuc->event, 0, n))
1536 return -EINVAL; 1536 return -EINVAL;
@@ -1601,7 +1601,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1601 1601
1602 regs = args->regs; 1602 regs = args->regs;
1603 1603
1604 cpuc = &__get_cpu_var(cpu_hw_events); 1604 cpuc = this_cpu_ptr(&cpu_hw_events);
1605 1605
1606 /* If the PMU has the TOE IRQ enable bits, we need to do a 1606 /* If the PMU has the TOE IRQ enable bits, we need to do a
1607 * dummy write to the %pcr to clear the overflow bits and thus 1607 * dummy write to the %pcr to clear the overflow bits and thus
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index d5c319553fd0..9d98e5002a09 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -204,7 +204,7 @@ static void __init smp4d_ipi_init(void)
204 204
205void sun4d_ipi_interrupt(void) 205void sun4d_ipi_interrupt(void)
206{ 206{
207 struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work); 207 struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
208 208
209 if (work->single) { 209 if (work->single) {
210 work->single = 0; 210 work->single = 0;
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 3fddf64c7fc6..59da0c3ea788 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -765,7 +765,7 @@ void setup_sparc64_timer(void)
765 : /* no outputs */ 765 : /* no outputs */
766 : "r" (pstate)); 766 : "r" (pstate));
767 767
768 sevt = &__get_cpu_var(sparc64_events); 768 sevt = this_cpu_ptr(&sparc64_events);
769 769
770 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); 770 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
771 sevt->cpumask = cpumask_of(smp_processor_id()); 771 sevt->cpumask = cpumask_of(smp_processor_id());
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b89aba217e3b..9df2190c097e 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -52,14 +52,14 @@ out:
52 52
53void arch_enter_lazy_mmu_mode(void) 53void arch_enter_lazy_mmu_mode(void)
54{ 54{
55 struct tlb_batch *tb = &__get_cpu_var(tlb_batch); 55 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
56 56
57 tb->active = 1; 57 tb->active = 1;
58} 58}
59 59
60void arch_leave_lazy_mmu_mode(void) 60void arch_leave_lazy_mmu_mode(void)
61{ 61{
62 struct tlb_batch *tb = &__get_cpu_var(tlb_batch); 62 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
63 63
64 if (tb->tlb_nr) 64 if (tb->tlb_nr)
65 flush_tlb_pending(); 65 flush_tlb_pending();