diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/cpudata_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/cpudata_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/kprobes.c | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/leon_smp.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/nmi.c | 16 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 26 | ||||
-rw-r--r-- | arch/sparc/kernel/sun4d_smp.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 4 |
10 files changed, 35 insertions, 35 deletions
diff --git a/arch/sparc/include/asm/cpudata_32.h b/arch/sparc/include/asm/cpudata_32.h index 0300d94c25b3..05f366379f53 100644 --- a/arch/sparc/include/asm/cpudata_32.h +++ b/arch/sparc/include/asm/cpudata_32.h | |||
@@ -26,6 +26,6 @@ typedef struct { | |||
26 | 26 | ||
27 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 27 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
28 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 28 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
29 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 29 | #define local_cpu_data() (*this_cpu_ptr(&__cpu_data)) |
30 | 30 | ||
31 | #endif /* _SPARC_CPUDATA_H */ | 31 | #endif /* _SPARC_CPUDATA_H */ |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 0e594076912c..a6e424d185d0 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
@@ -30,7 +30,7 @@ typedef struct { | |||
30 | 30 | ||
31 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 31 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
32 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 32 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
33 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 33 | #define local_cpu_data() (*this_cpu_ptr(&__cpu_data)) |
34 | 34 | ||
35 | #endif /* !(__ASSEMBLY__) */ | 35 | #endif /* !(__ASSEMBLY__) */ |
36 | 36 | ||
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index 98d712843413..cd83be527586 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c | |||
@@ -83,7 +83,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
83 | 83 | ||
84 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 84 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
85 | { | 85 | { |
86 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 86 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
87 | kcb->kprobe_status = kcb->prev_kprobe.status; | 87 | kcb->kprobe_status = kcb->prev_kprobe.status; |
88 | kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; | 88 | kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; |
89 | kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; | 89 | kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; |
@@ -92,7 +92,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
92 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 92 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
93 | struct kprobe_ctlblk *kcb) | 93 | struct kprobe_ctlblk *kcb) |
94 | { | 94 | { |
95 | __get_cpu_var(current_kprobe) = p; | 95 | __this_cpu_write(current_kprobe, p); |
96 | kcb->kprobe_orig_tnpc = regs->tnpc; | 96 | kcb->kprobe_orig_tnpc = regs->tnpc; |
97 | kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); | 97 | kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); |
98 | } | 98 | } |
@@ -155,7 +155,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
155 | ret = 1; | 155 | ret = 1; |
156 | goto no_kprobe; | 156 | goto no_kprobe; |
157 | } | 157 | } |
158 | p = __get_cpu_var(current_kprobe); | 158 | p = __this_cpu_read(current_kprobe); |
159 | if (p->break_handler && p->break_handler(p, regs)) | 159 | if (p->break_handler && p->break_handler(p, regs)) |
160 | goto ss_probe; | 160 | goto ss_probe; |
161 | } | 161 | } |
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 018ef11f57df..ea2bad306f93 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c | |||
@@ -343,7 +343,7 @@ static void leon_ipi_resched(int cpu) | |||
343 | 343 | ||
344 | void leonsmp_ipi_interrupt(void) | 344 | void leonsmp_ipi_interrupt(void) |
345 | { | 345 | { |
346 | struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work); | 346 | struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work); |
347 | 347 | ||
348 | if (work->single) { | 348 | if (work->single) { |
349 | work->single = 0; | 349 | work->single = 0; |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 5b1151dcba13..a9973bb4a1b2 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -100,20 +100,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
100 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 100 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
101 | 101 | ||
102 | sum = local_cpu_data().irq0_irqs; | 102 | sum = local_cpu_data().irq0_irqs; |
103 | if (__get_cpu_var(nmi_touch)) { | 103 | if (__this_cpu_read(nmi_touch)) { |
104 | __get_cpu_var(nmi_touch) = 0; | 104 | __this_cpu_write(nmi_touch, 0); |
105 | touched = 1; | 105 | touched = 1; |
106 | } | 106 | } |
107 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 107 | if (!touched && __this_cpu_read(last_irq_sum) == sum) { |
108 | __this_cpu_inc(alert_counter); | 108 | __this_cpu_inc(alert_counter); |
109 | if (__this_cpu_read(alert_counter) == 30 * nmi_hz) | 109 | if (__this_cpu_read(alert_counter) == 30 * nmi_hz) |
110 | die_nmi("BUG: NMI Watchdog detected LOCKUP", | 110 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
111 | regs, panic_on_timeout); | 111 | regs, panic_on_timeout); |
112 | } else { | 112 | } else { |
113 | __get_cpu_var(last_irq_sum) = sum; | 113 | __this_cpu_write(last_irq_sum, sum); |
114 | __this_cpu_write(alert_counter, 0); | 114 | __this_cpu_write(alert_counter, 0); |
115 | } | 115 | } |
116 | if (__get_cpu_var(wd_enabled)) { | 116 | if (__this_cpu_read(wd_enabled)) { |
117 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); | 117 | pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); |
118 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); | 118 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); |
119 | } | 119 | } |
@@ -154,7 +154,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) | |||
154 | void stop_nmi_watchdog(void *unused) | 154 | void stop_nmi_watchdog(void *unused) |
155 | { | 155 | { |
156 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 156 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
157 | __get_cpu_var(wd_enabled) = 0; | 157 | __this_cpu_write(wd_enabled, 0); |
158 | atomic_dec(&nmi_active); | 158 | atomic_dec(&nmi_active); |
159 | } | 159 | } |
160 | 160 | ||
@@ -207,7 +207,7 @@ error: | |||
207 | 207 | ||
208 | void start_nmi_watchdog(void *unused) | 208 | void start_nmi_watchdog(void *unused) |
209 | { | 209 | { |
210 | __get_cpu_var(wd_enabled) = 1; | 210 | __this_cpu_write(wd_enabled, 1); |
211 | atomic_inc(&nmi_active); | 211 | atomic_inc(&nmi_active); |
212 | 212 | ||
213 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 213 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
@@ -218,7 +218,7 @@ void start_nmi_watchdog(void *unused) | |||
218 | 218 | ||
219 | static void nmi_adjust_hz_one(void *unused) | 219 | static void nmi_adjust_hz_one(void *unused) |
220 | { | 220 | { |
221 | if (!__get_cpu_var(wd_enabled)) | 221 | if (!__this_cpu_read(wd_enabled)) |
222 | return; | 222 | return; |
223 | 223 | ||
224 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); | 224 | pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index d07f6b29aed8..49d33b178793 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -48,7 +48,7 @@ static int iommu_batch_initialized; | |||
48 | /* Interrupts must be disabled. */ | 48 | /* Interrupts must be disabled. */ |
49 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) | 49 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) |
50 | { | 50 | { |
51 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 51 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
52 | 52 | ||
53 | p->dev = dev; | 53 | p->dev = dev; |
54 | p->prot = prot; | 54 | p->prot = prot; |
@@ -94,7 +94,7 @@ static long iommu_batch_flush(struct iommu_batch *p) | |||
94 | 94 | ||
95 | static inline void iommu_batch_new_entry(unsigned long entry) | 95 | static inline void iommu_batch_new_entry(unsigned long entry) |
96 | { | 96 | { |
97 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 97 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
98 | 98 | ||
99 | if (p->entry + p->npages == entry) | 99 | if (p->entry + p->npages == entry) |
100 | return; | 100 | return; |
@@ -106,7 +106,7 @@ static inline void iommu_batch_new_entry(unsigned long entry) | |||
106 | /* Interrupts must be disabled. */ | 106 | /* Interrupts must be disabled. */ |
107 | static inline long iommu_batch_add(u64 phys_page) | 107 | static inline long iommu_batch_add(u64 phys_page) |
108 | { | 108 | { |
109 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 109 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
110 | 110 | ||
111 | BUG_ON(p->npages >= PGLIST_NENTS); | 111 | BUG_ON(p->npages >= PGLIST_NENTS); |
112 | 112 | ||
@@ -120,7 +120,7 @@ static inline long iommu_batch_add(u64 phys_page) | |||
120 | /* Interrupts must be disabled. */ | 120 | /* Interrupts must be disabled. */ |
121 | static inline long iommu_batch_end(void) | 121 | static inline long iommu_batch_end(void) |
122 | { | 122 | { |
123 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); | 123 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
124 | 124 | ||
125 | BUG_ON(p->npages >= PGLIST_NENTS); | 125 | BUG_ON(p->npages >= PGLIST_NENTS); |
126 | 126 | ||
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index c9759ad3f34a..46a5e4508752 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1013,7 +1013,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc) | |||
1013 | 1013 | ||
1014 | static void sparc_pmu_enable(struct pmu *pmu) | 1014 | static void sparc_pmu_enable(struct pmu *pmu) |
1015 | { | 1015 | { |
1016 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1016 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1017 | int i; | 1017 | int i; |
1018 | 1018 | ||
1019 | if (cpuc->enabled) | 1019 | if (cpuc->enabled) |
@@ -1031,7 +1031,7 @@ static void sparc_pmu_enable(struct pmu *pmu) | |||
1031 | 1031 | ||
1032 | static void sparc_pmu_disable(struct pmu *pmu) | 1032 | static void sparc_pmu_disable(struct pmu *pmu) |
1033 | { | 1033 | { |
1034 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1034 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1035 | int i; | 1035 | int i; |
1036 | 1036 | ||
1037 | if (!cpuc->enabled) | 1037 | if (!cpuc->enabled) |
@@ -1065,7 +1065,7 @@ static int active_event_index(struct cpu_hw_events *cpuc, | |||
1065 | 1065 | ||
1066 | static void sparc_pmu_start(struct perf_event *event, int flags) | 1066 | static void sparc_pmu_start(struct perf_event *event, int flags) |
1067 | { | 1067 | { |
1068 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1068 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1069 | int idx = active_event_index(cpuc, event); | 1069 | int idx = active_event_index(cpuc, event); |
1070 | 1070 | ||
1071 | if (flags & PERF_EF_RELOAD) { | 1071 | if (flags & PERF_EF_RELOAD) { |
@@ -1080,7 +1080,7 @@ static void sparc_pmu_start(struct perf_event *event, int flags) | |||
1080 | 1080 | ||
1081 | static void sparc_pmu_stop(struct perf_event *event, int flags) | 1081 | static void sparc_pmu_stop(struct perf_event *event, int flags) |
1082 | { | 1082 | { |
1083 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1083 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1084 | int idx = active_event_index(cpuc, event); | 1084 | int idx = active_event_index(cpuc, event); |
1085 | 1085 | ||
1086 | if (!(event->hw.state & PERF_HES_STOPPED)) { | 1086 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
@@ -1096,7 +1096,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags) | |||
1096 | 1096 | ||
1097 | static void sparc_pmu_del(struct perf_event *event, int _flags) | 1097 | static void sparc_pmu_del(struct perf_event *event, int _flags) |
1098 | { | 1098 | { |
1099 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1099 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1100 | unsigned long flags; | 1100 | unsigned long flags; |
1101 | int i; | 1101 | int i; |
1102 | 1102 | ||
@@ -1133,7 +1133,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1133 | 1133 | ||
1134 | static void sparc_pmu_read(struct perf_event *event) | 1134 | static void sparc_pmu_read(struct perf_event *event) |
1135 | { | 1135 | { |
1136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1137 | int idx = active_event_index(cpuc, event); | 1137 | int idx = active_event_index(cpuc, event); |
1138 | struct hw_perf_event *hwc = &event->hw; | 1138 | struct hw_perf_event *hwc = &event->hw; |
1139 | 1139 | ||
@@ -1145,7 +1145,7 @@ static DEFINE_MUTEX(pmc_grab_mutex); | |||
1145 | 1145 | ||
1146 | static void perf_stop_nmi_watchdog(void *unused) | 1146 | static void perf_stop_nmi_watchdog(void *unused) |
1147 | { | 1147 | { |
1148 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1148 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1149 | int i; | 1149 | int i; |
1150 | 1150 | ||
1151 | stop_nmi_watchdog(NULL); | 1151 | stop_nmi_watchdog(NULL); |
@@ -1356,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
1356 | 1356 | ||
1357 | static int sparc_pmu_add(struct perf_event *event, int ef_flags) | 1357 | static int sparc_pmu_add(struct perf_event *event, int ef_flags) |
1358 | { | 1358 | { |
1359 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1359 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1360 | int n0, ret = -EAGAIN; | 1360 | int n0, ret = -EAGAIN; |
1361 | unsigned long flags; | 1361 | unsigned long flags; |
1362 | 1362 | ||
@@ -1498,7 +1498,7 @@ static int sparc_pmu_event_init(struct perf_event *event) | |||
1498 | */ | 1498 | */ |
1499 | static void sparc_pmu_start_txn(struct pmu *pmu) | 1499 | static void sparc_pmu_start_txn(struct pmu *pmu) |
1500 | { | 1500 | { |
1501 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1501 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
1502 | 1502 | ||
1503 | perf_pmu_disable(pmu); | 1503 | perf_pmu_disable(pmu); |
1504 | cpuhw->group_flag |= PERF_EVENT_TXN; | 1504 | cpuhw->group_flag |= PERF_EVENT_TXN; |
@@ -1511,7 +1511,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) | |||
1511 | */ | 1511 | */ |
1512 | static void sparc_pmu_cancel_txn(struct pmu *pmu) | 1512 | static void sparc_pmu_cancel_txn(struct pmu *pmu) |
1513 | { | 1513 | { |
1514 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1514 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
1515 | 1515 | ||
1516 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 1516 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1517 | perf_pmu_enable(pmu); | 1517 | perf_pmu_enable(pmu); |
@@ -1524,13 +1524,13 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) | |||
1524 | */ | 1524 | */ |
1525 | static int sparc_pmu_commit_txn(struct pmu *pmu) | 1525 | static int sparc_pmu_commit_txn(struct pmu *pmu) |
1526 | { | 1526 | { |
1527 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1527 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1528 | int n; | 1528 | int n; |
1529 | 1529 | ||
1530 | if (!sparc_pmu) | 1530 | if (!sparc_pmu) |
1531 | return -EINVAL; | 1531 | return -EINVAL; |
1532 | 1532 | ||
1533 | cpuc = &__get_cpu_var(cpu_hw_events); | 1533 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1534 | n = cpuc->n_events; | 1534 | n = cpuc->n_events; |
1535 | if (check_excludes(cpuc->event, 0, n)) | 1535 | if (check_excludes(cpuc->event, 0, n)) |
1536 | return -EINVAL; | 1536 | return -EINVAL; |
@@ -1601,7 +1601,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1601 | 1601 | ||
1602 | regs = args->regs; | 1602 | regs = args->regs; |
1603 | 1603 | ||
1604 | cpuc = &__get_cpu_var(cpu_hw_events); | 1604 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1605 | 1605 | ||
1606 | /* If the PMU has the TOE IRQ enable bits, we need to do a | 1606 | /* If the PMU has the TOE IRQ enable bits, we need to do a |
1607 | * dummy write to the %pcr to clear the overflow bits and thus | 1607 | * dummy write to the %pcr to clear the overflow bits and thus |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index d5c319553fd0..9d98e5002a09 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -204,7 +204,7 @@ static void __init smp4d_ipi_init(void) | |||
204 | 204 | ||
205 | void sun4d_ipi_interrupt(void) | 205 | void sun4d_ipi_interrupt(void) |
206 | { | 206 | { |
207 | struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work); | 207 | struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work); |
208 | 208 | ||
209 | if (work->single) { | 209 | if (work->single) { |
210 | work->single = 0; | 210 | work->single = 0; |
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3fddf64c7fc6..59da0c3ea788 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c | |||
@@ -765,7 +765,7 @@ void setup_sparc64_timer(void) | |||
765 | : /* no outputs */ | 765 | : /* no outputs */ |
766 | : "r" (pstate)); | 766 | : "r" (pstate)); |
767 | 767 | ||
768 | sevt = &__get_cpu_var(sparc64_events); | 768 | sevt = this_cpu_ptr(&sparc64_events); |
769 | 769 | ||
770 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); | 770 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); |
771 | sevt->cpumask = cpumask_of(smp_processor_id()); | 771 | sevt->cpumask = cpumask_of(smp_processor_id()); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b89aba217e3b..9df2190c097e 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -52,14 +52,14 @@ out: | |||
52 | 52 | ||
53 | void arch_enter_lazy_mmu_mode(void) | 53 | void arch_enter_lazy_mmu_mode(void) |
54 | { | 54 | { |
55 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); | 55 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
56 | 56 | ||
57 | tb->active = 1; | 57 | tb->active = 1; |
58 | } | 58 | } |
59 | 59 | ||
60 | void arch_leave_lazy_mmu_mode(void) | 60 | void arch_leave_lazy_mmu_mode(void) |
61 | { | 61 | { |
62 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); | 62 | struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); |
63 | 63 | ||
64 | if (tb->tlb_nr) | 64 | if (tb->tlb_nr) |
65 | flush_tlb_pending(); | 65 | flush_tlb_pending(); |