diff options
author | Tejun Heo <tj@kernel.org> | 2014-08-27 11:18:29 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-08-27 11:18:29 -0400 |
commit | 23f66e2d661b4d3226d16e25910a9e9472ce2410 (patch) | |
tree | fe5b7769f72cac6ff722a0f1509df9e398768ab6 | |
parent | 47405a253da4d8ca4b18ad537423083fdd790440 (diff) |
Revert "powerpc: Replace __get_cpu_var uses"
This reverts commit 5828f666c069af74e00db21559f1535103c9f79a due to
build failure after merging with pending powerpc changes.
Link: http://lkml.kernel.org/g/20140827142243.6277eaff@canb.auug.org.au
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
32 files changed, 103 insertions, 105 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 8d907ba4fd05..1bbb3013d6aa 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h | |||
@@ -21,9 +21,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |||
21 | 21 | ||
22 | #define __ARCH_IRQ_STAT | 22 | #define __ARCH_IRQ_STAT |
23 | 23 | ||
24 | #define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending) | 24 | #define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending |
25 | #define set_softirq_pending(x) __this_cpu_write(irq_stat._softirq_pending, (x)) | ||
26 | #define or_softirq_pending(x) __this_cpu_or(irq_stat._softirq_pending, (x)) | ||
27 | 25 | ||
28 | static inline void ack_bad_irq(unsigned int irq) | 26 | static inline void ack_bad_irq(unsigned int irq) |
29 | { | 27 | { |
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index cd7c2719d3ef..2def01ed0cb2 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h | |||
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | |||
107 | 107 | ||
108 | static inline void arch_enter_lazy_mmu_mode(void) | 108 | static inline void arch_enter_lazy_mmu_mode(void) |
109 | { | 109 | { |
110 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | 110 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
111 | 111 | ||
112 | batch->active = 1; | 112 | batch->active = 1; |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void arch_leave_lazy_mmu_mode(void) | 115 | static inline void arch_leave_lazy_mmu_mode(void) |
116 | { | 116 | { |
117 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | 117 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
118 | 118 | ||
119 | if (batch->index) | 119 | if (batch->index) |
120 | __flush_tlb_pending(batch); | 120 | __flush_tlb_pending(batch); |
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 5007ad0448ce..282d43a0c855 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -97,7 +97,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr); | |||
97 | 97 | ||
98 | static inline void xics_push_cppr(unsigned int vec) | 98 | static inline void xics_push_cppr(unsigned int vec) |
99 | { | 99 | { |
100 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 100 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
101 | 101 | ||
102 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | 102 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) |
103 | return; | 103 | return; |
@@ -110,7 +110,7 @@ static inline void xics_push_cppr(unsigned int vec) | |||
110 | 110 | ||
111 | static inline unsigned char xics_pop_cppr(void) | 111 | static inline unsigned char xics_pop_cppr(void) |
112 | { | 112 | { |
113 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 113 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
114 | 114 | ||
115 | if (WARN_ON(os_cppr->index < 1)) | 115 | if (WARN_ON(os_cppr->index < 1)) |
116 | return LOWEST_PRIORITY; | 116 | return LOWEST_PRIORITY; |
@@ -120,7 +120,7 @@ static inline unsigned char xics_pop_cppr(void) | |||
120 | 120 | ||
121 | static inline void xics_set_base_cppr(unsigned char cppr) | 121 | static inline void xics_set_base_cppr(unsigned char cppr) |
122 | { | 122 | { |
123 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 123 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
124 | 124 | ||
125 | /* we only really want to set the priority when there's | 125 | /* we only really want to set the priority when there's |
126 | * just one cppr value on the stack | 126 | * just one cppr value on the stack |
@@ -132,7 +132,7 @@ static inline void xics_set_base_cppr(unsigned char cppr) | |||
132 | 132 | ||
133 | static inline unsigned char xics_cppr_top(void) | 133 | static inline unsigned char xics_cppr_top(void) |
134 | { | 134 | { |
135 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 135 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
136 | 136 | ||
137 | return os_cppr->stack[os_cppr->index]; | 137 | return os_cppr->stack[os_cppr->index]; |
138 | } | 138 | } |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f4217819cc31..d55c76c571f3 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs) | |||
41 | 41 | ||
42 | may_hard_irq_enable(); | 42 | may_hard_irq_enable(); |
43 | 43 | ||
44 | __this_cpu_inc(irq_stat.doorbell_irqs); | 44 | __get_cpu_var(irq_stat).doorbell_irqs++; |
45 | 45 | ||
46 | smp_ipi_demux(); | 46 | smp_ipi_demux(); |
47 | 47 | ||
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index b62f90eaf19e..0bb5918faaaf 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c | |||
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type) | |||
63 | int arch_install_hw_breakpoint(struct perf_event *bp) | 63 | int arch_install_hw_breakpoint(struct perf_event *bp) |
64 | { | 64 | { |
65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
66 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg); | 66 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); |
67 | 67 | ||
68 | *slot = bp; | 68 | *slot = bp; |
69 | 69 | ||
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
88 | */ | 88 | */ |
89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | 89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) |
90 | { | 90 | { |
91 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg); | 91 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); |
92 | 92 | ||
93 | if (*slot != bp) { | 93 | if (*slot != bp) { |
94 | WARN_ONCE(1, "Can't find the breakpoint"); | 94 | WARN_ONCE(1, "Can't find the breakpoint"); |
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
226 | */ | 226 | */ |
227 | rcu_read_lock(); | 227 | rcu_read_lock(); |
228 | 228 | ||
229 | bp = __this_cpu_read(bp_per_reg); | 229 | bp = __get_cpu_var(bp_per_reg); |
230 | if (!bp) | 230 | if (!bp) |
231 | goto out; | 231 | goto out; |
232 | info = counter_arch_bp(bp); | 232 | info = counter_arch_bp(bp); |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 71e60bfb89e2..a10642a0d861 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev, | |||
208 | * We don't need to disable preemption here because any CPU can | 208 | * We don't need to disable preemption here because any CPU can |
209 | * safely use any IOMMU pool. | 209 | * safely use any IOMMU pool. |
210 | */ | 210 | */ |
211 | pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); | 211 | pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); |
212 | 212 | ||
213 | if (largealloc) | 213 | if (largealloc) |
214 | pool = &(tbl->large_pool); | 214 | pool = &(tbl->large_pool); |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 74d40c6855b8..4c5891de162e 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
114 | static inline notrace int decrementer_check_overflow(void) | 114 | static inline notrace int decrementer_check_overflow(void) |
115 | { | 115 | { |
116 | u64 now = get_tb_or_rtc(); | 116 | u64 now = get_tb_or_rtc(); |
117 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | 117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
118 | 118 | ||
119 | return now >= *next_tb; | 119 | return now >= *next_tb; |
120 | } | 120 | } |
@@ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs) | |||
499 | 499 | ||
500 | /* And finally process it */ | 500 | /* And finally process it */ |
501 | if (unlikely(irq == NO_IRQ)) | 501 | if (unlikely(irq == NO_IRQ)) |
502 | __this_cpu_inc(irq_stat.spurious_irqs); | 502 | __get_cpu_var(irq_stat).spurious_irqs++; |
503 | else | 503 | else |
504 | generic_handle_irq(irq); | 504 | generic_handle_irq(irq); |
505 | 505 | ||
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index e77c3ccf8dcf..8504657379f1 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs) | |||
155 | { | 155 | { |
156 | struct thread_info *thread_info, *exception_thread_info; | 156 | struct thread_info *thread_info, *exception_thread_info; |
157 | struct thread_info *backup_current_thread_info = | 157 | struct thread_info *backup_current_thread_info = |
158 | this_cpu_ptr(&kgdb_thread_info); | 158 | &__get_cpu_var(kgdb_thread_info); |
159 | 159 | ||
160 | if (user_mode(regs)) | 160 | if (user_mode(regs)) |
161 | return 0; | 161 | return 0; |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 7c053f281406..2f72af82513c 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
119 | 119 | ||
120 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 120 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
121 | { | 121 | { |
122 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | 122 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
123 | kcb->kprobe_status = kcb->prev_kprobe.status; | 123 | kcb->kprobe_status = kcb->prev_kprobe.status; |
124 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; | 124 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; |
125 | } | 125 | } |
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
127 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 127 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
128 | struct kprobe_ctlblk *kcb) | 128 | struct kprobe_ctlblk *kcb) |
129 | { | 129 | { |
130 | __this_cpu_write(current_kprobe, p); | 130 | __get_cpu_var(current_kprobe) = p; |
131 | kcb->kprobe_saved_msr = regs->msr; | 131 | kcb->kprobe_saved_msr = regs->msr; |
132 | } | 132 | } |
133 | 133 | ||
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
192 | ret = 1; | 192 | ret = 1; |
193 | goto no_kprobe; | 193 | goto no_kprobe; |
194 | } | 194 | } |
195 | p = __this_cpu_read(current_kprobe); | 195 | p = __get_cpu_var(current_kprobe); |
196 | if (p->break_handler && p->break_handler(p, regs)) { | 196 | if (p->break_handler && p->break_handler(p, regs)) { |
197 | goto ss_probe; | 197 | goto ss_probe; |
198 | } | 198 | } |
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 15c99b649b04..a7fd4cb78b78 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c | |||
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled, | |||
73 | uint64_t nip, uint64_t addr) | 73 | uint64_t nip, uint64_t addr) |
74 | { | 74 | { |
75 | uint64_t srr1; | 75 | uint64_t srr1; |
76 | int index = __this_cpu_inc_return(mce_nest_count); | 76 | int index = __get_cpu_var(mce_nest_count)++; |
77 | struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); | 77 | struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Return if we don't have enough space to log mce event. | 80 | * Return if we don't have enough space to log mce event. |
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled, | |||
143 | */ | 143 | */ |
144 | int get_mce_event(struct machine_check_event *mce, bool release) | 144 | int get_mce_event(struct machine_check_event *mce, bool release) |
145 | { | 145 | { |
146 | int index = __this_cpu_read(mce_nest_count) - 1; | 146 | int index = __get_cpu_var(mce_nest_count) - 1; |
147 | struct machine_check_event *mc_evt; | 147 | struct machine_check_event *mc_evt; |
148 | int ret = 0; | 148 | int ret = 0; |
149 | 149 | ||
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) | |||
153 | 153 | ||
154 | /* Check if we have MCE info to process. */ | 154 | /* Check if we have MCE info to process. */ |
155 | if (index < MAX_MC_EVT) { | 155 | if (index < MAX_MC_EVT) { |
156 | mc_evt = this_cpu_ptr(&mce_event[index]); | 156 | mc_evt = &__get_cpu_var(mce_event[index]); |
157 | /* Copy the event structure and release the original */ | 157 | /* Copy the event structure and release the original */ |
158 | if (mce) | 158 | if (mce) |
159 | *mce = *mc_evt; | 159 | *mce = *mc_evt; |
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) | |||
163 | } | 163 | } |
164 | /* Decrement the count to free the slot. */ | 164 | /* Decrement the count to free the slot. */ |
165 | if (release) | 165 | if (release) |
166 | __this_cpu_dec(mce_nest_count); | 166 | __get_cpu_var(mce_nest_count)--; |
167 | 167 | ||
168 | return ret; | 168 | return ret; |
169 | } | 169 | } |
@@ -184,13 +184,13 @@ void machine_check_queue_event(void) | |||
184 | if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) | 184 | if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) |
185 | return; | 185 | return; |
186 | 186 | ||
187 | index = __this_cpu_inc_return(mce_queue_count); | 187 | index = __get_cpu_var(mce_queue_count)++; |
188 | /* If queue is full, just return for now. */ | 188 | /* If queue is full, just return for now. */ |
189 | if (index >= MAX_MC_EVT) { | 189 | if (index >= MAX_MC_EVT) { |
190 | __this_cpu_dec(mce_queue_count); | 190 | __get_cpu_var(mce_queue_count)--; |
191 | return; | 191 | return; |
192 | } | 192 | } |
193 | memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); | 193 | __get_cpu_var(mce_event_queue[index]) = evt; |
194 | 194 | ||
195 | /* Queue irq work to process this event later. */ | 195 | /* Queue irq work to process this event later. */ |
196 | irq_work_queue(&mce_event_process_work); | 196 | irq_work_queue(&mce_event_process_work); |
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work) | |||
208 | * For now just print it to console. | 208 | * For now just print it to console. |
209 | * TODO: log this error event to FSP or nvram. | 209 | * TODO: log this error event to FSP or nvram. |
210 | */ | 210 | */ |
211 | while (__this_cpu_read(mce_queue_count) > 0) { | 211 | while (__get_cpu_var(mce_queue_count) > 0) { |
212 | index = __this_cpu_read(mce_queue_count) - 1; | 212 | index = __get_cpu_var(mce_queue_count) - 1; |
213 | machine_check_print_event_info( | 213 | machine_check_print_event_info( |
214 | this_cpu_ptr(&mce_event_queue[index])); | 214 | &__get_cpu_var(mce_event_queue[index])); |
215 | __this_cpu_dec(mce_queue_count); | 215 | __get_cpu_var(mce_queue_count)--; |
216 | } | 216 | } |
217 | } | 217 | } |
218 | 218 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 2df2f2956520..bf44ae962ab8 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -498,7 +498,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk) | |||
498 | 498 | ||
499 | void __set_breakpoint(struct arch_hw_breakpoint *brk) | 499 | void __set_breakpoint(struct arch_hw_breakpoint *brk) |
500 | { | 500 | { |
501 | __this_cpu_write(current_brk, *brk); | 501 | __get_cpu_var(current_brk) = *brk; |
502 | 502 | ||
503 | if (cpu_has_feature(CPU_FTR_DAWR)) | 503 | if (cpu_has_feature(CPU_FTR_DAWR)) |
504 | set_dawr(brk); | 504 | set_dawr(brk); |
@@ -841,7 +841,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
841 | * schedule DABR | 841 | * schedule DABR |
842 | */ | 842 | */ |
843 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 843 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
844 | if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) | 844 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) |
845 | __set_breakpoint(&new->thread.hw_brk); | 845 | __set_breakpoint(&new->thread.hw_brk); |
846 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 846 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
847 | #endif | 847 | #endif |
@@ -855,7 +855,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
855 | * Collect processor utilization data per process | 855 | * Collect processor utilization data per process |
856 | */ | 856 | */ |
857 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 857 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
858 | struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); | 858 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); |
859 | long unsigned start_tb, current_tb; | 859 | long unsigned start_tb, current_tb; |
860 | start_tb = old_thread->start_tb; | 860 | start_tb = old_thread->start_tb; |
861 | cu->current_tb = current_tb = mfspr(SPRN_PURR); | 861 | cu->current_tb = current_tb = mfspr(SPRN_PURR); |
@@ -865,7 +865,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
865 | #endif /* CONFIG_PPC64 */ | 865 | #endif /* CONFIG_PPC64 */ |
866 | 866 | ||
867 | #ifdef CONFIG_PPC_BOOK3S_64 | 867 | #ifdef CONFIG_PPC_BOOK3S_64 |
868 | batch = this_cpu_ptr(&ppc64_tlb_batch); | 868 | batch = &__get_cpu_var(ppc64_tlb_batch); |
869 | if (batch->active) { | 869 | if (batch->active) { |
870 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | 870 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; |
871 | if (batch->index) | 871 | if (batch->index) |
@@ -888,7 +888,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
888 | #ifdef CONFIG_PPC_BOOK3S_64 | 888 | #ifdef CONFIG_PPC_BOOK3S_64 |
889 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | 889 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { |
890 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | 890 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; |
891 | batch = this_cpu_ptr(&ppc64_tlb_batch); | 891 | batch = &__get_cpu_var(ppc64_tlb_batch); |
892 | batch->active = 1; | 892 | batch->active = 1; |
893 | } | 893 | } |
894 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 894 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 60391a51467a..a0738af4aba6 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -242,7 +242,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) | |||
242 | 242 | ||
243 | irqreturn_t smp_ipi_demux(void) | 243 | irqreturn_t smp_ipi_demux(void) |
244 | { | 244 | { |
245 | struct cpu_messages *info = this_cpu_ptr(&ipi_message); | 245 | struct cpu_messages *info = &__get_cpu_var(ipi_message); |
246 | unsigned int all; | 246 | unsigned int all; |
247 | 247 | ||
248 | mb(); /* order any irq clear */ | 248 | mb(); /* order any irq clear */ |
@@ -438,9 +438,9 @@ void generic_mach_cpu_die(void) | |||
438 | idle_task_exit(); | 438 | idle_task_exit(); |
439 | cpu = smp_processor_id(); | 439 | cpu = smp_processor_id(); |
440 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 440 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
441 | __this_cpu_write(cpu_state, CPU_DEAD); | 441 | __get_cpu_var(cpu_state) = CPU_DEAD; |
442 | smp_wmb(); | 442 | smp_wmb(); |
443 | while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE) | 443 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
444 | cpu_relax(); | 444 | cpu_relax(); |
445 | } | 445 | } |
446 | 446 | ||
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index fa1fd8a0c867..67fd2fd2620a 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void) | |||
394 | ppc_set_pmu_inuse(1); | 394 | ppc_set_pmu_inuse(1); |
395 | 395 | ||
396 | /* Only need to enable them once */ | 396 | /* Only need to enable them once */ |
397 | if (__this_cpu_read(pmcs_enabled)) | 397 | if (__get_cpu_var(pmcs_enabled)) |
398 | return; | 398 | return; |
399 | 399 | ||
400 | __this_cpu_write(pmcs_enabled, 1); | 400 | __get_cpu_var(pmcs_enabled) = 1; |
401 | 401 | ||
402 | if (ppc_md.enable_pmcs) | 402 | if (ppc_md.enable_pmcs) |
403 | ppc_md.enable_pmcs(); | 403 | ppc_md.enable_pmcs(); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 4769e5b7f905..368ab374d33c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void) | |||
458 | 458 | ||
459 | DEFINE_PER_CPU(u8, irq_work_pending); | 459 | DEFINE_PER_CPU(u8, irq_work_pending); |
460 | 460 | ||
461 | #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) | 461 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
462 | #define test_irq_work_pending() __this_cpu_read(irq_work_pending) | 462 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
463 | #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) | 463 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
464 | 464 | ||
465 | #endif /* 32 vs 64 bit */ | 465 | #endif /* 32 vs 64 bit */ |
466 | 466 | ||
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void) | |||
482 | void __timer_interrupt(void) | 482 | void __timer_interrupt(void) |
483 | { | 483 | { |
484 | struct pt_regs *regs = get_irq_regs(); | 484 | struct pt_regs *regs = get_irq_regs(); |
485 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | 485 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
486 | struct clock_event_device *evt = this_cpu_ptr(&decrementers); | 486 | struct clock_event_device *evt = &__get_cpu_var(decrementers); |
487 | u64 now; | 487 | u64 now; |
488 | 488 | ||
489 | trace_timer_interrupt_entry(regs); | 489 | trace_timer_interrupt_entry(regs); |
@@ -498,7 +498,7 @@ void __timer_interrupt(void) | |||
498 | *next_tb = ~(u64)0; | 498 | *next_tb = ~(u64)0; |
499 | if (evt->event_handler) | 499 | if (evt->event_handler) |
500 | evt->event_handler(evt); | 500 | evt->event_handler(evt); |
501 | __this_cpu_inc(irq_stat.timer_irqs_event); | 501 | __get_cpu_var(irq_stat).timer_irqs_event++; |
502 | } else { | 502 | } else { |
503 | now = *next_tb - now; | 503 | now = *next_tb - now; |
504 | if (now <= DECREMENTER_MAX) | 504 | if (now <= DECREMENTER_MAX) |
@@ -506,13 +506,13 @@ void __timer_interrupt(void) | |||
506 | /* We may have raced with new irq work */ | 506 | /* We may have raced with new irq work */ |
507 | if (test_irq_work_pending()) | 507 | if (test_irq_work_pending()) |
508 | set_dec(1); | 508 | set_dec(1); |
509 | __this_cpu_inc(irq_stat.timer_irqs_others); | 509 | __get_cpu_var(irq_stat).timer_irqs_others++; |
510 | } | 510 | } |
511 | 511 | ||
512 | #ifdef CONFIG_PPC64 | 512 | #ifdef CONFIG_PPC64 |
513 | /* collect purr register values often, for accurate calculations */ | 513 | /* collect purr register values often, for accurate calculations */ |
514 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 514 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
515 | struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); | 515 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); |
516 | cu->current_tb = mfspr(SPRN_PURR); | 516 | cu->current_tb = mfspr(SPRN_PURR); |
517 | } | 517 | } |
518 | #endif | 518 | #endif |
@@ -527,7 +527,7 @@ void __timer_interrupt(void) | |||
527 | void timer_interrupt(struct pt_regs * regs) | 527 | void timer_interrupt(struct pt_regs * regs) |
528 | { | 528 | { |
529 | struct pt_regs *old_regs; | 529 | struct pt_regs *old_regs; |
530 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | 530 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
531 | 531 | ||
532 | /* Ensure a positive value is written to the decrementer, or else | 532 | /* Ensure a positive value is written to the decrementer, or else |
533 | * some CPUs will continue to take decrementer exceptions. | 533 | * some CPUs will continue to take decrementer exceptions. |
@@ -813,7 +813,7 @@ static void __init clocksource_init(void) | |||
813 | static int decrementer_set_next_event(unsigned long evt, | 813 | static int decrementer_set_next_event(unsigned long evt, |
814 | struct clock_event_device *dev) | 814 | struct clock_event_device *dev) |
815 | { | 815 | { |
816 | __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); | 816 | __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; |
817 | set_dec(evt); | 817 | set_dec(evt); |
818 | 818 | ||
819 | /* We may have raced with new irq work */ | 819 | /* We may have raced with new irq work */ |
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
833 | /* Interrupt handler for the timer broadcast IPI */ | 833 | /* Interrupt handler for the timer broadcast IPI */ |
834 | void tick_broadcast_ipi_handler(void) | 834 | void tick_broadcast_ipi_handler(void) |
835 | { | 835 | { |
836 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | 836 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
837 | 837 | ||
838 | *next_tb = get_tb_or_rtc(); | 838 | *next_tb = get_tb_or_rtc(); |
839 | __timer_interrupt(); | 839 | __timer_interrupt(); |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index e6595b72269b..0dc43f9932cf 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs) | |||
295 | { | 295 | { |
296 | long handled = 0; | 296 | long handled = 0; |
297 | 297 | ||
298 | __this_cpu_inc(irq_stat.mce_exceptions); | 298 | __get_cpu_var(irq_stat).mce_exceptions++; |
299 | 299 | ||
300 | if (cur_cpu_spec && cur_cpu_spec->machine_check_early) | 300 | if (cur_cpu_spec && cur_cpu_spec->machine_check_early) |
301 | handled = cur_cpu_spec->machine_check_early(regs); | 301 | handled = cur_cpu_spec->machine_check_early(regs); |
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs) | |||
304 | 304 | ||
305 | long hmi_exception_realmode(struct pt_regs *regs) | 305 | long hmi_exception_realmode(struct pt_regs *regs) |
306 | { | 306 | { |
307 | __this_cpu_inc(irq_stat.hmi_exceptions); | 307 | __get_cpu_var(irq_stat).hmi_exceptions++; |
308 | 308 | ||
309 | if (ppc_md.hmi_exception_early) | 309 | if (ppc_md.hmi_exception_early) |
310 | ppc_md.hmi_exception_early(regs); | 310 | ppc_md.hmi_exception_early(regs); |
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs) | |||
700 | enum ctx_state prev_state = exception_enter(); | 700 | enum ctx_state prev_state = exception_enter(); |
701 | int recover = 0; | 701 | int recover = 0; |
702 | 702 | ||
703 | __this_cpu_inc(irq_stat.mce_exceptions); | 703 | __get_cpu_var(irq_stat).mce_exceptions++; |
704 | 704 | ||
705 | /* See if any machine dependent calls. In theory, we would want | 705 | /* See if any machine dependent calls. In theory, we would want |
706 | * to call the CPU first, and call the ppc_md. one if the CPU | 706 | * to call the CPU first, and call the ppc_md. one if the CPU |
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs) | |||
1519 | 1519 | ||
1520 | void performance_monitor_exception(struct pt_regs *regs) | 1520 | void performance_monitor_exception(struct pt_regs *regs) |
1521 | { | 1521 | { |
1522 | __this_cpu_inc(irq_stat.pmu_irqs); | 1522 | __get_cpu_var(irq_stat).pmu_irqs++; |
1523 | 1523 | ||
1524 | perf_irq(regs); | 1524 | perf_irq(regs); |
1525 | } | 1525 | } |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 16095841afe1..2e02ed849f36 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry) | |||
76 | unsigned long sid; | 76 | unsigned long sid; |
77 | int ret = -1; | 77 | int ret = -1; |
78 | 78 | ||
79 | sid = __this_cpu_inc_return(pcpu_last_used_sid); | 79 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); |
80 | if (sid < NUM_TIDS) { | 80 | if (sid < NUM_TIDS) { |
81 | __this_cpu_write(pcpu_sids)entry[sid], entry); | 81 | __get_cpu_var(pcpu_sids).entry[sid] = entry; |
82 | entry->val = sid; | 82 | entry->val = sid; |
83 | entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]); | 83 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; |
84 | ret = sid; | 84 | ret = sid; |
85 | } | 85 | } |
86 | 86 | ||
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry) | |||
108 | static inline int local_sid_lookup(struct id *entry) | 108 | static inline int local_sid_lookup(struct id *entry) |
109 | { | 109 | { |
110 | if (entry && entry->val != 0 && | 110 | if (entry && entry->val != 0 && |
111 | __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && | 111 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && |
112 | entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val])) | 112 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) |
113 | return entry->val; | 113 | return entry->val; |
114 | return -1; | 114 | return -1; |
115 | } | 115 | } |
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry) | |||
117 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | 117 | /* Invalidate all id mappings on local core -- call with preempt disabled */ |
118 | static inline void local_sid_destroy_all(void) | 118 | static inline void local_sid_destroy_all(void) |
119 | { | 119 | { |
120 | __this_cpu_write(pcpu_last_used_sid, 0); | 120 | __get_cpu_var(pcpu_last_used_sid) = 0; |
121 | memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids)); | 121 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); |
122 | } | 122 | } |
123 | 123 | ||
124 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | 124 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 6ef54e523f33..164bad2a19bf 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | |||
141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
142 | 142 | ||
143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || | 143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
144 | __this_cpu_read(last_vcpu_of_lpid[vcpu->kvm->arch.lpid]) != vcpu) { | 144 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) { |
145 | kvmppc_e500_tlbil_all(vcpu_e500); | 145 | kvmppc_e500_tlbil_all(vcpu_e500); |
146 | __this_cpu_write(last_vcpu_of_lpid[vcpu->kvm->arch.lpid], vcpu); | 146 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; |
147 | } | 147 | } |
148 | 148 | ||
149 | kvmppc_load_guest_fp(vcpu); | 149 | kvmppc_load_guest_fp(vcpu); |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 504a16f1a1a0..afc0a8295f84 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -625,7 +625,7 @@ static void native_flush_hash_range(unsigned long number, int local) | |||
625 | unsigned long want_v; | 625 | unsigned long want_v; |
626 | unsigned long flags; | 626 | unsigned long flags; |
627 | real_pte_t pte; | 627 | real_pte_t pte; |
628 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | 628 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
629 | unsigned long psize = batch->psize; | 629 | unsigned long psize = batch->psize; |
630 | int ssize = batch->ssize; | 630 | int ssize = batch->ssize; |
631 | int i; | 631 | int i; |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 060d51fda35e..daee7f4e5a14 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -1314,7 +1314,7 @@ void flush_hash_range(unsigned long number, int local) | |||
1314 | else { | 1314 | else { |
1315 | int i; | 1315 | int i; |
1316 | struct ppc64_tlb_batch *batch = | 1316 | struct ppc64_tlb_batch *batch = |
1317 | this_cpu_ptr(&ppc64_tlb_batch); | 1317 | &__get_cpu_var(ppc64_tlb_batch); |
1318 | 1318 | ||
1319 | for (i = 0; i < number; i++) | 1319 | for (i = 0; i < number; i++) |
1320 | flush_hash_page(batch->vpn[i], batch->pte[i], | 1320 | flush_hash_page(batch->vpn[i], batch->pte[i], |
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index ba47aaf33a4b..5e4ee2573903 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c | |||
@@ -33,13 +33,13 @@ static inline int tlb1_next(void) | |||
33 | 33 | ||
34 | ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; | 34 | ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; |
35 | 35 | ||
36 | index = this_cpu_read(next_tlbcam_idx); | 36 | index = __get_cpu_var(next_tlbcam_idx); |
37 | 37 | ||
38 | /* Just round-robin the entries and wrap when we hit the end */ | 38 | /* Just round-robin the entries and wrap when we hit the end */ |
39 | if (unlikely(index == ncams - 1)) | 39 | if (unlikely(index == ncams - 1)) |
40 | __this_cpu_write(next_tlbcam_idx, tlbcam_index); | 40 | __get_cpu_var(next_tlbcam_idx) = tlbcam_index; |
41 | else | 41 | else |
42 | __this_cpu_inc(next_tlbcam_idx); | 42 | __get_cpu_var(next_tlbcam_idx)++; |
43 | 43 | ||
44 | return index; | 44 | return index; |
45 | } | 45 | } |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8aa04f03fd31..7e70ae968e5f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -462,7 +462,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | |||
462 | { | 462 | { |
463 | struct hugepd_freelist **batchp; | 463 | struct hugepd_freelist **batchp; |
464 | 464 | ||
465 | batchp = this_cpu_ptr(&hugepd_freelist_cur); | 465 | batchp = &get_cpu_var(hugepd_freelist_cur); |
466 | 466 | ||
467 | if (atomic_read(&tlb->mm->mm_users) < 2 || | 467 | if (atomic_read(&tlb->mm->mm_users) < 2 || |
468 | cpumask_equal(mm_cpumask(tlb->mm), | 468 | cpumask_equal(mm_cpumask(tlb->mm), |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 690f9c7bf3c8..b7cd00b0171e 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void) | |||
339 | 339 | ||
340 | static void power_pmu_bhrb_enable(struct perf_event *event) | 340 | static void power_pmu_bhrb_enable(struct perf_event *event) |
341 | { | 341 | { |
342 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 342 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
343 | 343 | ||
344 | if (!ppmu->bhrb_nr) | 344 | if (!ppmu->bhrb_nr) |
345 | return; | 345 | return; |
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event) | |||
354 | 354 | ||
355 | static void power_pmu_bhrb_disable(struct perf_event *event) | 355 | static void power_pmu_bhrb_disable(struct perf_event *event) |
356 | { | 356 | { |
357 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 357 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
358 | 358 | ||
359 | if (!ppmu->bhrb_nr) | 359 | if (!ppmu->bhrb_nr) |
360 | return; | 360 | return; |
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu) | |||
1144 | if (!ppmu) | 1144 | if (!ppmu) |
1145 | return; | 1145 | return; |
1146 | local_irq_save(flags); | 1146 | local_irq_save(flags); |
1147 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 1147 | cpuhw = &__get_cpu_var(cpu_hw_events); |
1148 | 1148 | ||
1149 | if (!cpuhw->disabled) { | 1149 | if (!cpuhw->disabled) { |
1150 | /* | 1150 | /* |
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu) | |||
1211 | return; | 1211 | return; |
1212 | local_irq_save(flags); | 1212 | local_irq_save(flags); |
1213 | 1213 | ||
1214 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 1214 | cpuhw = &__get_cpu_var(cpu_hw_events); |
1215 | if (!cpuhw->disabled) | 1215 | if (!cpuhw->disabled) |
1216 | goto out; | 1216 | goto out; |
1217 | 1217 | ||
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) | |||
1403 | * Add the event to the list (if there is room) | 1403 | * Add the event to the list (if there is room) |
1404 | * and check whether the total set is still feasible. | 1404 | * and check whether the total set is still feasible. |
1405 | */ | 1405 | */ |
1406 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 1406 | cpuhw = &__get_cpu_var(cpu_hw_events); |
1407 | n0 = cpuhw->n_events; | 1407 | n0 = cpuhw->n_events; |
1408 | if (n0 >= ppmu->n_counter) | 1408 | if (n0 >= ppmu->n_counter) |
1409 | goto out; | 1409 | goto out; |
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags) | |||
1469 | 1469 | ||
1470 | power_pmu_read(event); | 1470 | power_pmu_read(event); |
1471 | 1471 | ||
1472 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 1472 | cpuhw = &__get_cpu_var(cpu_hw_events); |
1473 | for (i = 0; i < cpuhw->n_events; ++i) { | 1473 | for (i = 0; i < cpuhw->n_events; ++i) { |
1474 | if (event == cpuhw->event[i]) { | 1474 | if (event == cpuhw->event[i]) { |
1475 | while (++i < cpuhw->n_events) { | 1475 | while (++i < cpuhw->n_events) { |
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags) | |||
1575 | */ | 1575 | */ |
1576 | void power_pmu_start_txn(struct pmu *pmu) | 1576 | void power_pmu_start_txn(struct pmu *pmu) |
1577 | { | 1577 | { |
1578 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 1578 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1579 | 1579 | ||
1580 | perf_pmu_disable(pmu); | 1580 | perf_pmu_disable(pmu); |
1581 | cpuhw->group_flag |= PERF_EVENT_TXN; | 1581 | cpuhw->group_flag |= PERF_EVENT_TXN; |
@@ -1589,7 +1589,7 @@ void power_pmu_start_txn(struct pmu *pmu) | |||
1589 | */ | 1589 | */ |
1590 | void power_pmu_cancel_txn(struct pmu *pmu) | 1590 | void power_pmu_cancel_txn(struct pmu *pmu) |
1591 | { | 1591 | { |
1592 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 1592 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1593 | 1593 | ||
1594 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 1594 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1595 | perf_pmu_enable(pmu); | 1595 | perf_pmu_enable(pmu); |
@@ -1607,7 +1607,7 @@ int power_pmu_commit_txn(struct pmu *pmu) | |||
1607 | 1607 | ||
1608 | if (!ppmu) | 1608 | if (!ppmu) |
1609 | return -EAGAIN; | 1609 | return -EAGAIN; |
1610 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 1610 | cpuhw = &__get_cpu_var(cpu_hw_events); |
1611 | n = cpuhw->n_events; | 1611 | n = cpuhw->n_events; |
1612 | if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) | 1612 | if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) |
1613 | return -EAGAIN; | 1613 | return -EAGAIN; |
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1964 | 1964 | ||
1965 | if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { | 1965 | if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { |
1966 | struct cpu_hw_events *cpuhw; | 1966 | struct cpu_hw_events *cpuhw; |
1967 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 1967 | cpuhw = &__get_cpu_var(cpu_hw_events); |
1968 | power_pmu_bhrb_read(cpuhw); | 1968 | power_pmu_bhrb_read(cpuhw); |
1969 | data.br_stack = &cpuhw->bhrb_stack; | 1969 | data.br_stack = &cpuhw->bhrb_stack; |
1970 | } | 1970 | } |
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val) | |||
2037 | static void perf_event_interrupt(struct pt_regs *regs) | 2037 | static void perf_event_interrupt(struct pt_regs *regs) |
2038 | { | 2038 | { |
2039 | int i, j; | 2039 | int i, j; |
2040 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 2040 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
2041 | struct perf_event *event; | 2041 | struct perf_event *event; |
2042 | unsigned long val[8]; | 2042 | unsigned long val[8]; |
2043 | int found, active; | 2043 | int found, active; |
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index 4acaea01fe03..d35ae52c69dc 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c | |||
@@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu) | |||
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | 211 | ||
212 | local_irq_save(flags); | 212 | local_irq_save(flags); |
213 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 213 | cpuhw = &__get_cpu_var(cpu_hw_events); |
214 | 214 | ||
215 | if (!cpuhw->disabled) { | 215 | if (!cpuhw->disabled) { |
216 | cpuhw->disabled = 1; | 216 | cpuhw->disabled = 1; |
@@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu) | |||
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
251 | local_irq_save(flags); | 251 | local_irq_save(flags); |
252 | cpuhw = this_cpu_ptr(&cpu_hw_events); | 252 | cpuhw = &__get_cpu_var(cpu_hw_events); |
253 | if (!cpuhw->disabled) | 253 | if (!cpuhw->disabled) |
254 | goto out; | 254 | goto out; |
255 | 255 | ||
@@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
653 | static void perf_event_interrupt(struct pt_regs *regs) | 653 | static void perf_event_interrupt(struct pt_regs *regs) |
654 | { | 654 | { |
655 | int i; | 655 | int i; |
656 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 656 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
657 | struct perf_event *event; | 657 | struct perf_event *event; |
658 | unsigned long val; | 658 | unsigned long val; |
659 | int found = 0; | 659 | int found = 0; |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 4c11421847be..8a106b4172e0 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d) | |||
82 | 82 | ||
83 | static void iic_eoi(struct irq_data *d) | 83 | static void iic_eoi(struct irq_data *d) |
84 | { | 84 | { |
85 | struct iic *iic = this_cpu_ptr(&cpu_iic); | 85 | struct iic *iic = &__get_cpu_var(cpu_iic); |
86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); | 86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); |
87 | BUG_ON(iic->eoi_ptr < 0); | 87 | BUG_ON(iic->eoi_ptr < 0); |
88 | } | 88 | } |
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void) | |||
148 | struct iic *iic; | 148 | struct iic *iic; |
149 | unsigned int virq; | 149 | unsigned int virq; |
150 | 150 | ||
151 | iic = this_cpu_ptr(&cpu_iic); | 151 | iic = &__get_cpu_var(cpu_iic); |
152 | *(unsigned long *) &pending = | 152 | *(unsigned long *) &pending = |
153 | in_be64((u64 __iomem *) &iic->regs->pending_destr); | 153 | in_be64((u64 __iomem *) &iic->regs->pending_destr); |
154 | if (!(pending.flags & CBE_IIC_IRQ_VALID)) | 154 | if (!(pending.flags & CBE_IIC_IRQ_VALID)) |
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void) | |||
163 | 163 | ||
164 | void iic_setup_cpu(void) | 164 | void iic_setup_cpu(void) |
165 | { | 165 | { |
166 | out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff); | 166 | out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); |
167 | } | 167 | } |
168 | 168 | ||
169 | u8 iic_get_target_id(int cpu) | 169 | u8 iic_get_target_id(int cpu) |
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index 9527e2a7c541..d8a000a9988b 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c | |||
@@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args) | |||
48 | 48 | ||
49 | local_irq_save(flags); | 49 | local_irq_save(flags); |
50 | 50 | ||
51 | depth = this_cpu_ptr(&opal_trace_depth); | 51 | depth = &__get_cpu_var(opal_trace_depth); |
52 | 52 | ||
53 | if (*depth) | 53 | if (*depth) |
54 | goto out; | 54 | goto out; |
@@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval) | |||
69 | 69 | ||
70 | local_irq_save(flags); | 70 | local_irq_save(flags); |
71 | 71 | ||
72 | depth = this_cpu_ptr(&opal_trace_depth); | 72 | depth = &__get_cpu_var(opal_trace_depth); |
73 | 73 | ||
74 | if (*depth) | 74 | if (*depth) |
75 | goto out; | 75 | goto out; |
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index a6c42f34303a..5f3b23220b8e 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c | |||
@@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq) | |||
711 | 711 | ||
712 | static unsigned int ps3_get_irq(void) | 712 | static unsigned int ps3_get_irq(void) |
713 | { | 713 | { |
714 | struct ps3_private *pd = this_cpu_ptr(&ps3_private); | 714 | struct ps3_private *pd = &__get_cpu_var(ps3_private); |
715 | u64 x = (pd->bmp.status & pd->bmp.mask); | 715 | u64 x = (pd->bmp.status & pd->bmp.mask); |
716 | unsigned int plug; | 716 | unsigned int plug; |
717 | 717 | ||
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 39049e4884fb..1062f71f5a85 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -75,7 +75,7 @@ static atomic_t dtl_count; | |||
75 | */ | 75 | */ |
76 | static void consume_dtle(struct dtl_entry *dtle, u64 index) | 76 | static void consume_dtle(struct dtl_entry *dtle, u64 index) |
77 | { | 77 | { |
78 | struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings); | 78 | struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); |
79 | struct dtl_entry *wp = dtlr->write_ptr; | 79 | struct dtl_entry *wp = dtlr->write_ptr; |
80 | struct lppaca *vpa = local_paca->lppaca_ptr; | 80 | struct lppaca *vpa = local_paca->lppaca_ptr; |
81 | 81 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index f02ec3ab428c..4575f0c9e521 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c | |||
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long | |||
110 | if (opcode > MAX_HCALL_OPCODE) | 110 | if (opcode > MAX_HCALL_OPCODE) |
111 | return; | 111 | return; |
112 | 112 | ||
113 | h = this_cpu_ptr(&hcall_stats[opcode / 4]); | 113 | h = &__get_cpu_var(hcall_stats)[opcode / 4]; |
114 | h->tb_start = mftb(); | 114 | h->tb_start = mftb(); |
115 | h->purr_start = mfspr(SPRN_PURR); | 115 | h->purr_start = mfspr(SPRN_PURR); |
116 | } | 116 | } |
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long | |||
123 | if (opcode > MAX_HCALL_OPCODE) | 123 | if (opcode > MAX_HCALL_OPCODE) |
124 | return; | 124 | return; |
125 | 125 | ||
126 | h = this_cpu_ptr(&hcall_stats[opcode / 4]); | 126 | h = &__get_cpu_var(hcall_stats)[opcode / 4]; |
127 | h->num_calls++; | 127 | h->num_calls++; |
128 | h->tb_total += mftb() - h->tb_start; | 128 | h->tb_total += mftb() - h->tb_start; |
129 | h->purr_total += mfspr(SPRN_PURR) - h->purr_start; | 129 | h->purr_total += mfspr(SPRN_PURR) - h->purr_start; |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 8c355ed4291e..4642d6a4d356 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -200,7 +200,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
200 | 200 | ||
201 | local_irq_save(flags); /* to protect tcep and the page behind it */ | 201 | local_irq_save(flags); /* to protect tcep and the page behind it */ |
202 | 202 | ||
203 | tcep = __this_cpu_read(tce_page); | 203 | tcep = __get_cpu_var(tce_page); |
204 | 204 | ||
205 | /* This is safe to do since interrupts are off when we're called | 205 | /* This is safe to do since interrupts are off when we're called |
206 | * from iommu_alloc{,_sg}() | 206 | * from iommu_alloc{,_sg}() |
@@ -213,7 +213,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
213 | return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, | 213 | return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, |
214 | direction, attrs); | 214 | direction, attrs); |
215 | } | 215 | } |
216 | __this_cpu_write(tce_page, tcep); | 216 | __get_cpu_var(tce_page) = tcep; |
217 | } | 217 | } |
218 | 218 | ||
219 | rpn = __pa(uaddr) >> TCE_SHIFT; | 219 | rpn = __pa(uaddr) >> TCE_SHIFT; |
@@ -399,7 +399,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | |||
399 | long l, limit; | 399 | long l, limit; |
400 | 400 | ||
401 | local_irq_disable(); /* to protect tcep and the page behind it */ | 401 | local_irq_disable(); /* to protect tcep and the page behind it */ |
402 | tcep = __this_cpu_read(tce_page); | 402 | tcep = __get_cpu_var(tce_page); |
403 | 403 | ||
404 | if (!tcep) { | 404 | if (!tcep) { |
405 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); | 405 | tcep = (__be64 *)__get_free_page(GFP_ATOMIC); |
@@ -407,7 +407,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | |||
407 | local_irq_enable(); | 407 | local_irq_enable(); |
408 | return -ENOMEM; | 408 | return -ENOMEM; |
409 | } | 409 | } |
410 | __this_cpu_write(tce_page, tcep); | 410 | __get_cpu_var(tce_page) = tcep; |
411 | } | 411 | } |
412 | 412 | ||
413 | proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; | 413 | proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 56df72da59fe..34e64237fff9 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -507,7 +507,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
507 | unsigned long vpn; | 507 | unsigned long vpn; |
508 | unsigned long i, pix, rc; | 508 | unsigned long i, pix, rc; |
509 | unsigned long flags = 0; | 509 | unsigned long flags = 0; |
510 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); | 510 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
511 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | 511 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
512 | unsigned long param[9]; | 512 | unsigned long param[9]; |
513 | unsigned long hash, index, shift, hidx, slot; | 513 | unsigned long hash, index, shift, hidx, slot; |
@@ -697,7 +697,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | |||
697 | 697 | ||
698 | local_irq_save(flags); | 698 | local_irq_save(flags); |
699 | 699 | ||
700 | depth = this_cpu_ptr(&hcall_trace_depth); | 700 | depth = &__get_cpu_var(hcall_trace_depth); |
701 | 701 | ||
702 | if (*depth) | 702 | if (*depth) |
703 | goto out; | 703 | goto out; |
@@ -722,7 +722,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval, | |||
722 | 722 | ||
723 | local_irq_save(flags); | 723 | local_irq_save(flags); |
724 | 724 | ||
725 | depth = this_cpu_ptr(&hcall_trace_depth); | 725 | depth = &__get_cpu_var(hcall_trace_depth); |
726 | 726 | ||
727 | if (*depth) | 727 | if (*depth) |
728 | goto out; | 728 | goto out; |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 179a69fd5568..dff05b9eb946 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) | |||
302 | /* If it isn't an extended log we can use the per cpu 64bit buffer */ | 302 | /* If it isn't an extended log we can use the per cpu 64bit buffer */ |
303 | h = (struct rtas_error_log *)&savep[1]; | 303 | h = (struct rtas_error_log *)&savep[1]; |
304 | if (!rtas_error_extended(h)) { | 304 | if (!rtas_error_extended(h)) { |
305 | memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64)); | 305 | memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); |
306 | errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf); | 306 | errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); |
307 | } else { | 307 | } else { |
308 | int len, error_log_length; | 308 | int len, error_log_length; |
309 | 309 | ||
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 365249cd346b..fe0cca477164 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
@@ -155,7 +155,7 @@ int __init xics_smp_probe(void) | |||
155 | 155 | ||
156 | void xics_teardown_cpu(void) | 156 | void xics_teardown_cpu(void) |
157 | { | 157 | { |
158 | struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); | 158 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * we have to reset the cppr index to 0 because we're | 161 | * we have to reset the cppr index to 0 because we're |