aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-10-21 16:23:25 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-11-02 20:12:32 -0500
commit69111bac42f5ceacdd22e30947837ceb2c4493ed (patch)
tree717c5e3c231b078f970aeb5d895bd96f9efc8fdd /arch
parent0df1f2487d2f0d04703f142813d53615d62a1da4 (diff)
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does not have this change. Sorry about missing linuxppc-dev before. V2->V2 - Fix up to work against 3.18-rc1 __get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. At the end of the patch set all uses of __get_cpu_var have been removed so the macro is removed too. The patch set includes passes over all arches as well. Once these operations are used throughout then specialized macros can be defined in non -x86 arches as well in order to optimize per cpu access by f.e. using a global register that may be set to the per cpu base. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to __this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to __this_cpu_inc(y) Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: Paul Mackerras <paulus@samba.org> Signed-off-by: Christoph Lameter <cl@linux.com> [mpe: Fix build errors caused by set/or_softirq_pending(), and rework assignment in __set_breakpoint() to use memcpy().] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/hardirq.h7
-rw-r--r--arch/powerpc/include/asm/tlbflush.h4
-rw-r--r--arch/powerpc/include/asm/xics.h8
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c6
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/mce.c24
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c22
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kvm/e500.c14
-rw-r--r--arch/powerpc/kvm/e500mc.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c22
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c6
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c4
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
32 files changed, 108 insertions, 103 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 1bbb3013d6aa..8add8b861e8d 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
21 21
22#define __ARCH_IRQ_STAT 22#define __ARCH_IRQ_STAT
23 23
24#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending 24#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
25
26#define __ARCH_SET_SOFTIRQ_PENDING
27
28#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x))
29#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x))
25 30
26static inline void ack_bad_irq(unsigned int irq) 31static inline void ack_bad_irq(unsigned int irq)
27{ 32{
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 2def01ed0cb2..cd7c2719d3ef 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
107 107
108static inline void arch_enter_lazy_mmu_mode(void) 108static inline void arch_enter_lazy_mmu_mode(void)
109{ 109{
110 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
111 111
112 batch->active = 1; 112 batch->active = 1;
113} 113}
114 114
115static inline void arch_leave_lazy_mmu_mode(void) 115static inline void arch_leave_lazy_mmu_mode(void)
116{ 116{
117 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
118 118
119 if (batch->index) 119 if (batch->index)
120 __flush_tlb_pending(batch); 120 __flush_tlb_pending(batch);
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 0d050ea37a04..6997f4a271df 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
98 98
99static inline void xics_push_cppr(unsigned int vec) 99static inline void xics_push_cppr(unsigned int vec)
100{ 100{
101 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 101 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
102 102
103 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) 103 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
104 return; 104 return;
@@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec)
111 111
112static inline unsigned char xics_pop_cppr(void) 112static inline unsigned char xics_pop_cppr(void)
113{ 113{
114 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 114 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
115 115
116 if (WARN_ON(os_cppr->index < 1)) 116 if (WARN_ON(os_cppr->index < 1))
117 return LOWEST_PRIORITY; 117 return LOWEST_PRIORITY;
@@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void)
121 121
122static inline void xics_set_base_cppr(unsigned char cppr) 122static inline void xics_set_base_cppr(unsigned char cppr)
123{ 123{
124 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 124 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
125 125
126 /* we only really want to set the priority when there's 126 /* we only really want to set the priority when there's
127 * just one cppr value on the stack 127 * just one cppr value on the stack
@@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr)
133 133
134static inline unsigned char xics_cppr_top(void) 134static inline unsigned char xics_cppr_top(void)
135{ 135{
136 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 136 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
137 137
138 return os_cppr->stack[os_cppr->index]; 138 return os_cppr->stack[os_cppr->index];
139} 139}
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index d55c76c571f3..f4217819cc31 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
41 41
42 may_hard_irq_enable(); 42 may_hard_irq_enable();
43 43
44 __get_cpu_var(irq_stat).doorbell_irqs++; 44 __this_cpu_inc(irq_stat.doorbell_irqs);
45 45
46 smp_ipi_demux(); 46 smp_ipi_demux();
47 47
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1f7d84e2e8b2..05e804cdecaa 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
63int arch_install_hw_breakpoint(struct perf_event *bp) 63int arch_install_hw_breakpoint(struct perf_event *bp)
64{ 64{
65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 65 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
66 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 66 struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
67 67
68 *slot = bp; 68 *slot = bp;
69 69
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
88 */ 88 */
89void arch_uninstall_hw_breakpoint(struct perf_event *bp) 89void arch_uninstall_hw_breakpoint(struct perf_event *bp)
90{ 90{
91 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 91 struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
92 92
93 if (*slot != bp) { 93 if (*slot != bp) {
94 WARN_ONCE(1, "Can't find the breakpoint"); 94 WARN_ONCE(1, "Can't find the breakpoint");
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
226 */ 226 */
227 rcu_read_lock(); 227 rcu_read_lock();
228 228
229 bp = __get_cpu_var(bp_per_reg); 229 bp = __this_cpu_read(bp_per_reg);
230 if (!bp) 230 if (!bp)
231 goto out; 231 goto out;
232 info = counter_arch_bp(bp); 232 info = counter_arch_bp(bp);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a10642a0d861..71e60bfb89e2 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
208 * We don't need to disable preemption here because any CPU can 208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool. 209 * safely use any IOMMU pool.
210 */ 210 */
211 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
212 212
213 if (largealloc) 213 if (largealloc)
214 pool = &(tbl->large_pool); 214 pool = &(tbl->large_pool);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c14383575fe8..8d87bb162ecd 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
114static inline notrace int decrementer_check_overflow(void) 114static inline notrace int decrementer_check_overflow(void)
115{ 115{
116 u64 now = get_tb_or_rtc(); 116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 117 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
118 118
119 return now >= *next_tb; 119 return now >= *next_tb;
120} 120}
@@ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs)
499 499
500 /* And finally process it */ 500 /* And finally process it */
501 if (unlikely(irq == NO_IRQ)) 501 if (unlikely(irq == NO_IRQ))
502 __get_cpu_var(irq_stat).spurious_irqs++; 502 __this_cpu_inc(irq_stat.spurious_irqs);
503 else 503 else
504 generic_handle_irq(irq); 504 generic_handle_irq(irq);
505 505
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 8504657379f1..e77c3ccf8dcf 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
155{ 155{
156 struct thread_info *thread_info, *exception_thread_info; 156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info = 157 struct thread_info *backup_current_thread_info =
158 &__get_cpu_var(kgdb_thread_info); 158 this_cpu_ptr(&kgdb_thread_info);
159 159
160 if (user_mode(regs)) 160 if (user_mode(regs))
161 return 0; 161 return 0;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2f72af82513c..7c053f281406 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
119 119
120static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 120static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
121{ 121{
122 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 122 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
123 kcb->kprobe_status = kcb->prev_kprobe.status; 123 kcb->kprobe_status = kcb->prev_kprobe.status;
124 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 124 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
125} 125}
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
127static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 127static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
128 struct kprobe_ctlblk *kcb) 128 struct kprobe_ctlblk *kcb)
129{ 129{
130 __get_cpu_var(current_kprobe) = p; 130 __this_cpu_write(current_kprobe, p);
131 kcb->kprobe_saved_msr = regs->msr; 131 kcb->kprobe_saved_msr = regs->msr;
132} 132}
133 133
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
192 ret = 1; 192 ret = 1;
193 goto no_kprobe; 193 goto no_kprobe;
194 } 194 }
195 p = __get_cpu_var(current_kprobe); 195 p = __this_cpu_read(current_kprobe);
196 if (p->break_handler && p->break_handler(p, regs)) { 196 if (p->break_handler && p->break_handler(p, regs)) {
197 goto ss_probe; 197 goto ss_probe;
198 } 198 }
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index a7fd4cb78b78..15c99b649b04 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
73 uint64_t nip, uint64_t addr) 73 uint64_t nip, uint64_t addr)
74{ 74{
75 uint64_t srr1; 75 uint64_t srr1;
76 int index = __get_cpu_var(mce_nest_count)++; 76 int index = __this_cpu_inc_return(mce_nest_count);
77 struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); 77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
78 78
79 /* 79 /*
80 * Return if we don't have enough space to log mce event. 80 * Return if we don't have enough space to log mce event.
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
143 */ 143 */
144int get_mce_event(struct machine_check_event *mce, bool release) 144int get_mce_event(struct machine_check_event *mce, bool release)
145{ 145{
146 int index = __get_cpu_var(mce_nest_count) - 1; 146 int index = __this_cpu_read(mce_nest_count) - 1;
147 struct machine_check_event *mc_evt; 147 struct machine_check_event *mc_evt;
148 int ret = 0; 148 int ret = 0;
149 149
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
153 153
154 /* Check if we have MCE info to process. */ 154 /* Check if we have MCE info to process. */
155 if (index < MAX_MC_EVT) { 155 if (index < MAX_MC_EVT) {
156 mc_evt = &__get_cpu_var(mce_event[index]); 156 mc_evt = this_cpu_ptr(&mce_event[index]);
157 /* Copy the event structure and release the original */ 157 /* Copy the event structure and release the original */
158 if (mce) 158 if (mce)
159 *mce = *mc_evt; 159 *mce = *mc_evt;
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
163 } 163 }
164 /* Decrement the count to free the slot. */ 164 /* Decrement the count to free the slot. */
165 if (release) 165 if (release)
166 __get_cpu_var(mce_nest_count)--; 166 __this_cpu_dec(mce_nest_count);
167 167
168 return ret; 168 return ret;
169} 169}
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
185 return; 185 return;
186 186
187 index = __get_cpu_var(mce_queue_count)++; 187 index = __this_cpu_inc_return(mce_queue_count);
188 /* If queue is full, just return for now. */ 188 /* If queue is full, just return for now. */
189 if (index >= MAX_MC_EVT) { 189 if (index >= MAX_MC_EVT) {
190 __get_cpu_var(mce_queue_count)--; 190 __this_cpu_dec(mce_queue_count);
191 return; 191 return;
192 } 192 }
193 __get_cpu_var(mce_event_queue[index]) = evt; 193 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
194 194
195 /* Queue irq work to process this event later. */ 195 /* Queue irq work to process this event later. */
196 irq_work_queue(&mce_event_process_work); 196 irq_work_queue(&mce_event_process_work);
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
208 * For now just print it to console. 208 * For now just print it to console.
209 * TODO: log this error event to FSP or nvram. 209 * TODO: log this error event to FSP or nvram.
210 */ 210 */
211 while (__get_cpu_var(mce_queue_count) > 0) { 211 while (__this_cpu_read(mce_queue_count) > 0) {
212 index = __get_cpu_var(mce_queue_count) - 1; 212 index = __this_cpu_read(mce_queue_count) - 1;
213 machine_check_print_event_info( 213 machine_check_print_event_info(
214 &__get_cpu_var(mce_event_queue[index])); 214 this_cpu_ptr(&mce_event_queue[index]));
215 __get_cpu_var(mce_queue_count)--; 215 __this_cpu_dec(mce_queue_count);
216 } 216 }
217} 217}
218 218
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 923cd2daba89..91e132b495bf 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
499 499
500void __set_breakpoint(struct arch_hw_breakpoint *brk) 500void __set_breakpoint(struct arch_hw_breakpoint *brk)
501{ 501{
502 __get_cpu_var(current_brk) = *brk; 502 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
503 503
504 if (cpu_has_feature(CPU_FTR_DAWR)) 504 if (cpu_has_feature(CPU_FTR_DAWR))
505 set_dawr(brk); 505 set_dawr(brk);
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
842 * schedule DABR 842 * schedule DABR
843 */ 843 */
844#ifndef CONFIG_HAVE_HW_BREAKPOINT 844#ifndef CONFIG_HAVE_HW_BREAKPOINT
845 if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) 845 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
846 __set_breakpoint(&new->thread.hw_brk); 846 __set_breakpoint(&new->thread.hw_brk);
847#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 847#endif /* CONFIG_HAVE_HW_BREAKPOINT */
848#endif 848#endif
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
856 * Collect processor utilization data per process 856 * Collect processor utilization data per process
857 */ 857 */
858 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 858 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
859 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 859 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
860 long unsigned start_tb, current_tb; 860 long unsigned start_tb, current_tb;
861 start_tb = old_thread->start_tb; 861 start_tb = old_thread->start_tb;
862 cu->current_tb = current_tb = mfspr(SPRN_PURR); 862 cu->current_tb = current_tb = mfspr(SPRN_PURR);
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
866#endif /* CONFIG_PPC64 */ 866#endif /* CONFIG_PPC64 */
867 867
868#ifdef CONFIG_PPC_BOOK3S_64 868#ifdef CONFIG_PPC_BOOK3S_64
869 batch = &__get_cpu_var(ppc64_tlb_batch); 869 batch = this_cpu_ptr(&ppc64_tlb_batch);
870 if (batch->active) { 870 if (batch->active) {
871 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 871 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
872 if (batch->index) 872 if (batch->index)
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
889#ifdef CONFIG_PPC_BOOK3S_64 889#ifdef CONFIG_PPC_BOOK3S_64
890 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 890 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
891 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 891 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
892 batch = &__get_cpu_var(ppc64_tlb_batch); 892 batch = this_cpu_ptr(&ppc64_tlb_batch);
893 batch->active = 1; 893 batch->active = 1;
894 } 894 }
895#endif /* CONFIG_PPC_BOOK3S_64 */ 895#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 71e186d5f331..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
243 243
244irqreturn_t smp_ipi_demux(void) 244irqreturn_t smp_ipi_demux(void)
245{ 245{
246 struct cpu_messages *info = &__get_cpu_var(ipi_message); 246 struct cpu_messages *info = this_cpu_ptr(&ipi_message);
247 unsigned int all; 247 unsigned int all;
248 248
249 mb(); /* order any irq clear */ 249 mb(); /* order any irq clear */
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void)
442 idle_task_exit(); 442 idle_task_exit();
443 cpu = smp_processor_id(); 443 cpu = smp_processor_id();
444 printk(KERN_DEBUG "CPU%d offline\n", cpu); 444 printk(KERN_DEBUG "CPU%d offline\n", cpu);
445 __get_cpu_var(cpu_state) = CPU_DEAD; 445 __this_cpu_write(cpu_state, CPU_DEAD);
446 smp_wmb(); 446 smp_wmb();
447 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 447 while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
448 cpu_relax(); 448 cpu_relax();
449} 449}
450 450
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 67fd2fd2620a..fa1fd8a0c867 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
394 ppc_set_pmu_inuse(1); 394 ppc_set_pmu_inuse(1);
395 395
396 /* Only need to enable them once */ 396 /* Only need to enable them once */
397 if (__get_cpu_var(pmcs_enabled)) 397 if (__this_cpu_read(pmcs_enabled))
398 return; 398 return;
399 399
400 __get_cpu_var(pmcs_enabled) = 1; 400 __this_cpu_write(pmcs_enabled, 1);
401 401
402 if (ppc_md.enable_pmcs) 402 if (ppc_md.enable_pmcs)
403 ppc_md.enable_pmcs(); 403 ppc_md.enable_pmcs();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7505599c2593..9f8ea617ff2c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
458 458
459DEFINE_PER_CPU(u8, irq_work_pending); 459DEFINE_PER_CPU(u8, irq_work_pending);
460 460
461#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 461#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
462#define test_irq_work_pending() __get_cpu_var(irq_work_pending) 462#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
463#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 463#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
464 464
465#endif /* 32 vs 64 bit */ 465#endif /* 32 vs 64 bit */
466 466
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
482static void __timer_interrupt(void) 482static void __timer_interrupt(void)
483{ 483{
484 struct pt_regs *regs = get_irq_regs(); 484 struct pt_regs *regs = get_irq_regs();
485 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 485 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
486 struct clock_event_device *evt = &__get_cpu_var(decrementers); 486 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
487 u64 now; 487 u64 now;
488 488
489 trace_timer_interrupt_entry(regs); 489 trace_timer_interrupt_entry(regs);
@@ -498,7 +498,7 @@ static void __timer_interrupt(void)
498 *next_tb = ~(u64)0; 498 *next_tb = ~(u64)0;
499 if (evt->event_handler) 499 if (evt->event_handler)
500 evt->event_handler(evt); 500 evt->event_handler(evt);
501 __get_cpu_var(irq_stat).timer_irqs_event++; 501 __this_cpu_inc(irq_stat.timer_irqs_event);
502 } else { 502 } else {
503 now = *next_tb - now; 503 now = *next_tb - now;
504 if (now <= DECREMENTER_MAX) 504 if (now <= DECREMENTER_MAX)
@@ -506,13 +506,13 @@ static void __timer_interrupt(void)
506 /* We may have raced with new irq work */ 506 /* We may have raced with new irq work */
507 if (test_irq_work_pending()) 507 if (test_irq_work_pending())
508 set_dec(1); 508 set_dec(1);
509 __get_cpu_var(irq_stat).timer_irqs_others++; 509 __this_cpu_inc(irq_stat.timer_irqs_others);
510 } 510 }
511 511
512#ifdef CONFIG_PPC64 512#ifdef CONFIG_PPC64
513 /* collect purr register values often, for accurate calculations */ 513 /* collect purr register values often, for accurate calculations */
514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
515 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 515 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
516 cu->current_tb = mfspr(SPRN_PURR); 516 cu->current_tb = mfspr(SPRN_PURR);
517 } 517 }
518#endif 518#endif
@@ -527,7 +527,7 @@ static void __timer_interrupt(void)
527void timer_interrupt(struct pt_regs * regs) 527void timer_interrupt(struct pt_regs * regs)
528{ 528{
529 struct pt_regs *old_regs; 529 struct pt_regs *old_regs;
530 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 530 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
531 531
532 /* Ensure a positive value is written to the decrementer, or else 532 /* Ensure a positive value is written to the decrementer, or else
533 * some CPUs will continue to take decrementer exceptions. 533 * some CPUs will continue to take decrementer exceptions.
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
813static int decrementer_set_next_event(unsigned long evt, 813static int decrementer_set_next_event(unsigned long evt,
814 struct clock_event_device *dev) 814 struct clock_event_device *dev)
815{ 815{
816 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 816 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
817 set_dec(evt); 817 set_dec(evt);
818 818
819 /* We may have raced with new irq work */ 819 /* We may have raced with new irq work */
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
833/* Interrupt handler for the timer broadcast IPI */ 833/* Interrupt handler for the timer broadcast IPI */
834void tick_broadcast_ipi_handler(void) 834void tick_broadcast_ipi_handler(void)
835{ 835{
836 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 836 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
837 837
838 *next_tb = get_tb_or_rtc(); 838 *next_tb = get_tb_or_rtc();
839 __timer_interrupt(); 839 __timer_interrupt();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0dc43f9932cf..e6595b72269b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
295{ 295{
296 long handled = 0; 296 long handled = 0;
297 297
298 __get_cpu_var(irq_stat).mce_exceptions++; 298 __this_cpu_inc(irq_stat.mce_exceptions);
299 299
300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
301 handled = cur_cpu_spec->machine_check_early(regs); 301 handled = cur_cpu_spec->machine_check_early(regs);
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
304 304
305long hmi_exception_realmode(struct pt_regs *regs) 305long hmi_exception_realmode(struct pt_regs *regs)
306{ 306{
307 __get_cpu_var(irq_stat).hmi_exceptions++; 307 __this_cpu_inc(irq_stat.hmi_exceptions);
308 308
309 if (ppc_md.hmi_exception_early) 309 if (ppc_md.hmi_exception_early)
310 ppc_md.hmi_exception_early(regs); 310 ppc_md.hmi_exception_early(regs);
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
700 enum ctx_state prev_state = exception_enter(); 700 enum ctx_state prev_state = exception_enter();
701 int recover = 0; 701 int recover = 0;
702 702
703 __get_cpu_var(irq_stat).mce_exceptions++; 703 __this_cpu_inc(irq_stat.mce_exceptions);
704 704
705 /* See if any machine dependent calls. In theory, we would want 705 /* See if any machine dependent calls. In theory, we would want
706 * to call the CPU first, and call the ppc_md. one if the CPU 706 * to call the CPU first, and call the ppc_md. one if the CPU
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
1519 1519
1520void performance_monitor_exception(struct pt_regs *regs) 1520void performance_monitor_exception(struct pt_regs *regs)
1521{ 1521{
1522 __get_cpu_var(irq_stat).pmu_irqs++; 1522 __this_cpu_inc(irq_stat.pmu_irqs);
1523 1523
1524 perf_irq(regs); 1524 perf_irq(regs);
1525} 1525}
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 2e02ed849f36..16095841afe1 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry)
76 unsigned long sid; 76 unsigned long sid;
77 int ret = -1; 77 int ret = -1;
78 78
79 sid = ++(__get_cpu_var(pcpu_last_used_sid)); 79 sid = __this_cpu_inc_return(pcpu_last_used_sid);
80 if (sid < NUM_TIDS) { 80 if (sid < NUM_TIDS) {
81 __get_cpu_var(pcpu_sids).entry[sid] = entry; 81 __this_cpu_write(pcpu_sids)entry[sid], entry);
82 entry->val = sid; 82 entry->val = sid;
83 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; 83 entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
84 ret = sid; 84 ret = sid;
85 } 85 }
86 86
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry)
108static inline int local_sid_lookup(struct id *entry) 108static inline int local_sid_lookup(struct id *entry)
109{ 109{
110 if (entry && entry->val != 0 && 110 if (entry && entry->val != 0 &&
111 __get_cpu_var(pcpu_sids).entry[entry->val] == entry && 111 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
112 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) 112 entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
113 return entry->val; 113 return entry->val;
114 return -1; 114 return -1;
115} 115}
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry)
117/* Invalidate all id mappings on local core -- call with preempt disabled */ 117/* Invalidate all id mappings on local core -- call with preempt disabled */
118static inline void local_sid_destroy_all(void) 118static inline void local_sid_destroy_all(void)
119{ 119{
120 __get_cpu_var(pcpu_last_used_sid) = 0; 120 __this_cpu_write(pcpu_last_used_sid, 0);
121 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); 121 memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
122} 122}
123 123
124static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) 124static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 2fdc8722e324..cda695de8aa7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
144 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 144 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
145 145
146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || 146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
147 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) { 147 __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
148 kvmppc_e500_tlbil_all(vcpu_e500); 148 kvmppc_e500_tlbil_all(vcpu_e500);
149 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu; 149 __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
150 } 150 }
151} 151}
152 152
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ae4962a06476..d53288a08c37 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -629,7 +629,7 @@ static void native_flush_hash_range(unsigned long number, int local)
629 unsigned long want_v; 629 unsigned long want_v;
630 unsigned long flags; 630 unsigned long flags;
631 real_pte_t pte; 631 real_pte_t pte;
632 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 632 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
633 unsigned long psize = batch->psize; 633 unsigned long psize = batch->psize;
634 int ssize = batch->ssize; 634 int ssize = batch->ssize;
635 int i; 635 int i;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d5339a3b9945..f01027731e23 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1322,7 +1322,7 @@ void flush_hash_range(unsigned long number, int local)
1322 else { 1322 else {
1323 int i; 1323 int i;
1324 struct ppc64_tlb_batch *batch = 1324 struct ppc64_tlb_batch *batch =
1325 &__get_cpu_var(ppc64_tlb_batch); 1325 this_cpu_ptr(&ppc64_tlb_batch);
1326 1326
1327 for (i = 0; i < number; i++) 1327 for (i = 0; i < number; i++)
1328 flush_hash_page(batch->vpn[i], batch->pte[i], 1328 flush_hash_page(batch->vpn[i], batch->pte[i],
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 5e4ee2573903..ba47aaf33a4b 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -33,13 +33,13 @@ static inline int tlb1_next(void)
33 33
34 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 34 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
35 35
36 index = __get_cpu_var(next_tlbcam_idx); 36 index = this_cpu_read(next_tlbcam_idx);
37 37
38 /* Just round-robin the entries and wrap when we hit the end */ 38 /* Just round-robin the entries and wrap when we hit the end */
39 if (unlikely(index == ncams - 1)) 39 if (unlikely(index == ncams - 1))
40 __get_cpu_var(next_tlbcam_idx) = tlbcam_index; 40 __this_cpu_write(next_tlbcam_idx, tlbcam_index);
41 else 41 else
42 __get_cpu_var(next_tlbcam_idx)++; 42 __this_cpu_inc(next_tlbcam_idx);
43 43
44 return index; 44 return index;
45} 45}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e70ae968e5f..8aa04f03fd31 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -462,7 +462,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
462{ 462{
463 struct hugepd_freelist **batchp; 463 struct hugepd_freelist **batchp;
464 464
465 batchp = &get_cpu_var(hugepd_freelist_cur); 465 batchp = this_cpu_ptr(&hugepd_freelist_cur);
466 466
467 if (atomic_read(&tlb->mm->mm_users) < 2 || 467 if (atomic_read(&tlb->mm->mm_users) < 2 ||
468 cpumask_equal(mm_cpumask(tlb->mm), 468 cpumask_equal(mm_cpumask(tlb->mm),
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a6995d4e93d4..7c4f6690533a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void)
339 339
340static void power_pmu_bhrb_enable(struct perf_event *event) 340static void power_pmu_bhrb_enable(struct perf_event *event)
341{ 341{
342 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 342 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
343 343
344 if (!ppmu->bhrb_nr) 344 if (!ppmu->bhrb_nr)
345 return; 345 return;
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
354 354
355static void power_pmu_bhrb_disable(struct perf_event *event) 355static void power_pmu_bhrb_disable(struct perf_event *event)
356{ 356{
357 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 357 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
358 358
359 if (!ppmu->bhrb_nr) 359 if (!ppmu->bhrb_nr)
360 return; 360 return;
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu)
1144 if (!ppmu) 1144 if (!ppmu)
1145 return; 1145 return;
1146 local_irq_save(flags); 1146 local_irq_save(flags);
1147 cpuhw = &__get_cpu_var(cpu_hw_events); 1147 cpuhw = this_cpu_ptr(&cpu_hw_events);
1148 1148
1149 if (!cpuhw->disabled) { 1149 if (!cpuhw->disabled) {
1150 /* 1150 /*
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu)
1211 return; 1211 return;
1212 local_irq_save(flags); 1212 local_irq_save(flags);
1213 1213
1214 cpuhw = &__get_cpu_var(cpu_hw_events); 1214 cpuhw = this_cpu_ptr(&cpu_hw_events);
1215 if (!cpuhw->disabled) 1215 if (!cpuhw->disabled)
1216 goto out; 1216 goto out;
1217 1217
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
1403 * Add the event to the list (if there is room) 1403 * Add the event to the list (if there is room)
1404 * and check whether the total set is still feasible. 1404 * and check whether the total set is still feasible.
1405 */ 1405 */
1406 cpuhw = &__get_cpu_var(cpu_hw_events); 1406 cpuhw = this_cpu_ptr(&cpu_hw_events);
1407 n0 = cpuhw->n_events; 1407 n0 = cpuhw->n_events;
1408 if (n0 >= ppmu->n_counter) 1408 if (n0 >= ppmu->n_counter)
1409 goto out; 1409 goto out;
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
1469 1469
1470 power_pmu_read(event); 1470 power_pmu_read(event);
1471 1471
1472 cpuhw = &__get_cpu_var(cpu_hw_events); 1472 cpuhw = this_cpu_ptr(&cpu_hw_events);
1473 for (i = 0; i < cpuhw->n_events; ++i) { 1473 for (i = 0; i < cpuhw->n_events; ++i) {
1474 if (event == cpuhw->event[i]) { 1474 if (event == cpuhw->event[i]) {
1475 while (++i < cpuhw->n_events) { 1475 while (++i < cpuhw->n_events) {
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
1575 */ 1575 */
1576static void power_pmu_start_txn(struct pmu *pmu) 1576static void power_pmu_start_txn(struct pmu *pmu)
1577{ 1577{
1578 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1578 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1579 1579
1580 perf_pmu_disable(pmu); 1580 perf_pmu_disable(pmu);
1581 cpuhw->group_flag |= PERF_EVENT_TXN; 1581 cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu)
1589 */ 1589 */
1590static void power_pmu_cancel_txn(struct pmu *pmu) 1590static void power_pmu_cancel_txn(struct pmu *pmu)
1591{ 1591{
1592 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1592 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1593 1593
1594 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1594 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1595 perf_pmu_enable(pmu); 1595 perf_pmu_enable(pmu);
@@ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
1607 1607
1608 if (!ppmu) 1608 if (!ppmu)
1609 return -EAGAIN; 1609 return -EAGAIN;
1610 cpuhw = &__get_cpu_var(cpu_hw_events); 1610 cpuhw = this_cpu_ptr(&cpu_hw_events);
1611 n = cpuhw->n_events; 1611 n = cpuhw->n_events;
1612 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) 1612 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1613 return -EAGAIN; 1613 return -EAGAIN;
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1964 1964
1965 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { 1965 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
1966 struct cpu_hw_events *cpuhw; 1966 struct cpu_hw_events *cpuhw;
1967 cpuhw = &__get_cpu_var(cpu_hw_events); 1967 cpuhw = this_cpu_ptr(&cpu_hw_events);
1968 power_pmu_bhrb_read(cpuhw); 1968 power_pmu_bhrb_read(cpuhw);
1969 data.br_stack = &cpuhw->bhrb_stack; 1969 data.br_stack = &cpuhw->bhrb_stack;
1970 } 1970 }
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val)
2037static void perf_event_interrupt(struct pt_regs *regs) 2037static void perf_event_interrupt(struct pt_regs *regs)
2038{ 2038{
2039 int i, j; 2039 int i, j;
2040 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 2040 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
2041 struct perf_event *event; 2041 struct perf_event *event;
2042 unsigned long val[8]; 2042 unsigned long val[8];
2043 int found, active; 2043 int found, active;
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index d35ae52c69dc..4acaea01fe03 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu)
210 unsigned long flags; 210 unsigned long flags;
211 211
212 local_irq_save(flags); 212 local_irq_save(flags);
213 cpuhw = &__get_cpu_var(cpu_hw_events); 213 cpuhw = this_cpu_ptr(&cpu_hw_events);
214 214
215 if (!cpuhw->disabled) { 215 if (!cpuhw->disabled) {
216 cpuhw->disabled = 1; 216 cpuhw->disabled = 1;
@@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu)
249 unsigned long flags; 249 unsigned long flags;
250 250
251 local_irq_save(flags); 251 local_irq_save(flags);
252 cpuhw = &__get_cpu_var(cpu_hw_events); 252 cpuhw = this_cpu_ptr(&cpu_hw_events);
253 if (!cpuhw->disabled) 253 if (!cpuhw->disabled)
254 goto out; 254 goto out;
255 255
@@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
653static void perf_event_interrupt(struct pt_regs *regs) 653static void perf_event_interrupt(struct pt_regs *regs)
654{ 654{
655 int i; 655 int i;
656 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 656 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
657 struct perf_event *event; 657 struct perf_event *event;
658 unsigned long val; 658 unsigned long val;
659 int found = 0; 659 int found = 0;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 8a106b4172e0..4c11421847be 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d)
82 82
83static void iic_eoi(struct irq_data *d) 83static void iic_eoi(struct irq_data *d)
84{ 84{
85 struct iic *iic = &__get_cpu_var(cpu_iic); 85 struct iic *iic = this_cpu_ptr(&cpu_iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); 86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0); 87 BUG_ON(iic->eoi_ptr < 0);
88} 88}
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
148 struct iic *iic; 148 struct iic *iic;
149 unsigned int virq; 149 unsigned int virq;
150 150
151 iic = &__get_cpu_var(cpu_iic); 151 iic = this_cpu_ptr(&cpu_iic);
152 *(unsigned long *) &pending = 152 *(unsigned long *) &pending =
153 in_be64((u64 __iomem *) &iic->regs->pending_destr); 153 in_be64((u64 __iomem *) &iic->regs->pending_destr);
154 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 154 if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
163 163
164void iic_setup_cpu(void) 164void iic_setup_cpu(void)
165{ 165{
166 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); 166 out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
167} 167}
168 168
169u8 iic_get_target_id(int cpu) 169u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index ae14c40b4b1c..e11273b2386d 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args)
48 48
49 local_irq_save(flags); 49 local_irq_save(flags);
50 50
51 depth = &__get_cpu_var(opal_trace_depth); 51 depth = this_cpu_ptr(&opal_trace_depth);
52 52
53 if (*depth) 53 if (*depth)
54 goto out; 54 goto out;
@@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval)
69 69
70 local_irq_save(flags); 70 local_irq_save(flags);
71 71
72 depth = &__get_cpu_var(opal_trace_depth); 72 depth = this_cpu_ptr(&opal_trace_depth);
73 73
74 if (*depth) 74 if (*depth)
75 goto out; 75 goto out;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 5f3b23220b8e..a6c42f34303a 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
711 711
712static unsigned int ps3_get_irq(void) 712static unsigned int ps3_get_irq(void)
713{ 713{
714 struct ps3_private *pd = &__get_cpu_var(ps3_private); 714 struct ps3_private *pd = this_cpu_ptr(&ps3_private);
715 u64 x = (pd->bmp.status & pd->bmp.mask); 715 u64 x = (pd->bmp.status & pd->bmp.mask);
716 unsigned int plug; 716 unsigned int plug;
717 717
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 1062f71f5a85..39049e4884fb 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -75,7 +75,7 @@ static atomic_t dtl_count;
75 */ 75 */
76static void consume_dtle(struct dtl_entry *dtle, u64 index) 76static void consume_dtle(struct dtl_entry *dtle, u64 index)
77{ 77{
78 struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); 78 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
79 struct dtl_entry *wp = dtlr->write_ptr; 79 struct dtl_entry *wp = dtlr->write_ptr;
80 struct lppaca *vpa = local_paca->lppaca_ptr; 80 struct lppaca *vpa = local_paca->lppaca_ptr;
81 81
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 4575f0c9e521..f02ec3ab428c 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
110 if (opcode > MAX_HCALL_OPCODE) 110 if (opcode > MAX_HCALL_OPCODE)
111 return; 111 return;
112 112
113 h = &__get_cpu_var(hcall_stats)[opcode / 4]; 113 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
114 h->tb_start = mftb(); 114 h->tb_start = mftb();
115 h->purr_start = mfspr(SPRN_PURR); 115 h->purr_start = mfspr(SPRN_PURR);
116} 116}
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
123 if (opcode > MAX_HCALL_OPCODE) 123 if (opcode > MAX_HCALL_OPCODE)
124 return; 124 return;
125 125
126 h = &__get_cpu_var(hcall_stats)[opcode / 4]; 126 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
127 h->num_calls++; 127 h->num_calls++;
128 h->tb_total += mftb() - h->tb_start; 128 h->tb_total += mftb() - h->tb_start;
129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start; 129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e32e00976a94..49bcf7c09c49 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
199 199
200 local_irq_save(flags); /* to protect tcep and the page behind it */ 200 local_irq_save(flags); /* to protect tcep and the page behind it */
201 201
202 tcep = __get_cpu_var(tce_page); 202 tcep = __this_cpu_read(tce_page);
203 203
204 /* This is safe to do since interrupts are off when we're called 204 /* This is safe to do since interrupts are off when we're called
205 * from iommu_alloc{,_sg}() 205 * from iommu_alloc{,_sg}()
@@ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
213 direction, attrs); 213 direction, attrs);
214 } 214 }
215 __get_cpu_var(tce_page) = tcep; 215 __this_cpu_write(tce_page, tcep);
216 } 216 }
217 217
218 rpn = __pa(uaddr) >> TCE_SHIFT; 218 rpn = __pa(uaddr) >> TCE_SHIFT;
@@ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
398 long l, limit; 398 long l, limit;
399 399
400 local_irq_disable(); /* to protect tcep and the page behind it */ 400 local_irq_disable(); /* to protect tcep and the page behind it */
401 tcep = __get_cpu_var(tce_page); 401 tcep = __this_cpu_read(tce_page);
402 402
403 if (!tcep) { 403 if (!tcep) {
404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC); 404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
@@ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
406 local_irq_enable(); 406 local_irq_enable();
407 return -ENOMEM; 407 return -ENOMEM;
408 } 408 }
409 __get_cpu_var(tce_page) = tcep; 409 __this_cpu_write(tce_page, tcep);
410 } 410 }
411 411
412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; 412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8c509d5397c6..87d8792f906c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -505,7 +505,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
505 unsigned long vpn; 505 unsigned long vpn;
506 unsigned long i, pix, rc; 506 unsigned long i, pix, rc;
507 unsigned long flags = 0; 507 unsigned long flags = 0;
508 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 508 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
509 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 509 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
510 unsigned long param[9]; 510 unsigned long param[9];
511 unsigned long hash, index, shift, hidx, slot; 511 unsigned long hash, index, shift, hidx, slot;
@@ -695,7 +695,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
695 695
696 local_irq_save(flags); 696 local_irq_save(flags);
697 697
698 depth = &__get_cpu_var(hcall_trace_depth); 698 depth = this_cpu_ptr(&hcall_trace_depth);
699 699
700 if (*depth) 700 if (*depth)
701 goto out; 701 goto out;
@@ -720,7 +720,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
720 720
721 local_irq_save(flags); 721 local_irq_save(flags);
722 722
723 depth = &__get_cpu_var(hcall_trace_depth); 723 depth = this_cpu_ptr(&hcall_trace_depth);
724 724
725 if (*depth) 725 if (*depth)
726 goto out; 726 goto out;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 5a4d0fc03b03..c3b2a7e81ddb 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
302 /* If it isn't an extended log we can use the per cpu 64bit buffer */ 302 /* If it isn't an extended log we can use the per cpu 64bit buffer */
303 h = (struct rtas_error_log *)&savep[1]; 303 h = (struct rtas_error_log *)&savep[1];
304 if (!rtas_error_extended(h)) { 304 if (!rtas_error_extended(h)) {
305 memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); 305 memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
306 errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); 306 errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
307 } else { 307 } else {
308 int len, error_log_length; 308 int len, error_log_length;
309 309
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index fe0cca477164..365249cd346b 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -155,7 +155,7 @@ int __init xics_smp_probe(void)
155 155
156void xics_teardown_cpu(void) 156void xics_teardown_cpu(void)
157{ 157{
158 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 158 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
159 159
160 /* 160 /*
161 * we have to reset the cppr index to 0 because we're 161 * we have to reset the cppr index to 0 because we're