aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-10-21 16:23:25 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-11-02 20:12:32 -0500
commit69111bac42f5ceacdd22e30947837ceb2c4493ed (patch)
tree717c5e3c231b078f970aeb5d895bd96f9efc8fdd /arch/powerpc/kernel
parent0df1f2487d2f0d04703f142813d53615d62a1da4 (diff)
powerpc: Replace __get_cpu_var uses
This still has not been merged and now powerpc is the only arch that does not have this change. Sorry about missing linuxppc-dev before. V2->V2 - Fix up to work against 3.18-rc1 __get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. At the end of the patch set all uses of __get_cpu_var have been removed so the macro is removed too. The patch set includes passes over all arches as well. Once these operations are used throughout then specialized macros can be defined in non -x86 arches as well in order to optimize per cpu access by f.e. using a global register that may be set to the per cpu base. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to __this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to __this_cpu_inc(y) Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: Paul Mackerras <paulus@samba.org> Signed-off-by: Christoph Lameter <cl@linux.com> [mpe: Fix build errors caused by set/or_softirq_pending(), and rework assignment in __set_breakpoint() to use memcpy().] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c6
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/mce.c24
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c22
-rw-r--r--arch/powerpc/kernel/traps.c8
12 files changed, 48 insertions, 48 deletions
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index d55c76c571f3..f4217819cc31 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
41 41
42 may_hard_irq_enable(); 42 may_hard_irq_enable();
43 43
44 __get_cpu_var(irq_stat).doorbell_irqs++; 44 __this_cpu_inc(irq_stat.doorbell_irqs);
45 45
46 smp_ipi_demux(); 46 smp_ipi_demux();
47 47
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1f7d84e2e8b2..05e804cdecaa 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
63int arch_install_hw_breakpoint(struct perf_event *bp) 63int arch_install_hw_breakpoint(struct perf_event *bp)
64{ 64{
65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 65 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
66 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 66 struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
67 67
68 *slot = bp; 68 *slot = bp;
69 69
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
88 */ 88 */
89void arch_uninstall_hw_breakpoint(struct perf_event *bp) 89void arch_uninstall_hw_breakpoint(struct perf_event *bp)
90{ 90{
91 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 91 struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
92 92
93 if (*slot != bp) { 93 if (*slot != bp) {
94 WARN_ONCE(1, "Can't find the breakpoint"); 94 WARN_ONCE(1, "Can't find the breakpoint");
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
226 */ 226 */
227 rcu_read_lock(); 227 rcu_read_lock();
228 228
229 bp = __get_cpu_var(bp_per_reg); 229 bp = __this_cpu_read(bp_per_reg);
230 if (!bp) 230 if (!bp)
231 goto out; 231 goto out;
232 info = counter_arch_bp(bp); 232 info = counter_arch_bp(bp);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a10642a0d861..71e60bfb89e2 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
208 * We don't need to disable preemption here because any CPU can 208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool. 209 * safely use any IOMMU pool.
210 */ 210 */
211 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
212 212
213 if (largealloc) 213 if (largealloc)
214 pool = &(tbl->large_pool); 214 pool = &(tbl->large_pool);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c14383575fe8..8d87bb162ecd 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
114static inline notrace int decrementer_check_overflow(void) 114static inline notrace int decrementer_check_overflow(void)
115{ 115{
116 u64 now = get_tb_or_rtc(); 116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 117 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
118 118
119 return now >= *next_tb; 119 return now >= *next_tb;
120} 120}
@@ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs)
499 499
500 /* And finally process it */ 500 /* And finally process it */
501 if (unlikely(irq == NO_IRQ)) 501 if (unlikely(irq == NO_IRQ))
502 __get_cpu_var(irq_stat).spurious_irqs++; 502 __this_cpu_inc(irq_stat.spurious_irqs);
503 else 503 else
504 generic_handle_irq(irq); 504 generic_handle_irq(irq);
505 505
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 8504657379f1..e77c3ccf8dcf 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
155{ 155{
156 struct thread_info *thread_info, *exception_thread_info; 156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info = 157 struct thread_info *backup_current_thread_info =
158 &__get_cpu_var(kgdb_thread_info); 158 this_cpu_ptr(&kgdb_thread_info);
159 159
160 if (user_mode(regs)) 160 if (user_mode(regs))
161 return 0; 161 return 0;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2f72af82513c..7c053f281406 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
119 119
120static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 120static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
121{ 121{
122 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 122 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
123 kcb->kprobe_status = kcb->prev_kprobe.status; 123 kcb->kprobe_status = kcb->prev_kprobe.status;
124 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 124 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
125} 125}
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
127static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 127static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
128 struct kprobe_ctlblk *kcb) 128 struct kprobe_ctlblk *kcb)
129{ 129{
130 __get_cpu_var(current_kprobe) = p; 130 __this_cpu_write(current_kprobe, p);
131 kcb->kprobe_saved_msr = regs->msr; 131 kcb->kprobe_saved_msr = regs->msr;
132} 132}
133 133
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
192 ret = 1; 192 ret = 1;
193 goto no_kprobe; 193 goto no_kprobe;
194 } 194 }
195 p = __get_cpu_var(current_kprobe); 195 p = __this_cpu_read(current_kprobe);
196 if (p->break_handler && p->break_handler(p, regs)) { 196 if (p->break_handler && p->break_handler(p, regs)) {
197 goto ss_probe; 197 goto ss_probe;
198 } 198 }
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index a7fd4cb78b78..15c99b649b04 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
73 uint64_t nip, uint64_t addr) 73 uint64_t nip, uint64_t addr)
74{ 74{
75 uint64_t srr1; 75 uint64_t srr1;
76 int index = __get_cpu_var(mce_nest_count)++; 76 int index = __this_cpu_inc_return(mce_nest_count);
77 struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); 77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
78 78
79 /* 79 /*
80 * Return if we don't have enough space to log mce event. 80 * Return if we don't have enough space to log mce event.
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
143 */ 143 */
144int get_mce_event(struct machine_check_event *mce, bool release) 144int get_mce_event(struct machine_check_event *mce, bool release)
145{ 145{
146 int index = __get_cpu_var(mce_nest_count) - 1; 146 int index = __this_cpu_read(mce_nest_count) - 1;
147 struct machine_check_event *mc_evt; 147 struct machine_check_event *mc_evt;
148 int ret = 0; 148 int ret = 0;
149 149
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
153 153
154 /* Check if we have MCE info to process. */ 154 /* Check if we have MCE info to process. */
155 if (index < MAX_MC_EVT) { 155 if (index < MAX_MC_EVT) {
156 mc_evt = &__get_cpu_var(mce_event[index]); 156 mc_evt = this_cpu_ptr(&mce_event[index]);
157 /* Copy the event structure and release the original */ 157 /* Copy the event structure and release the original */
158 if (mce) 158 if (mce)
159 *mce = *mc_evt; 159 *mce = *mc_evt;
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
163 } 163 }
164 /* Decrement the count to free the slot. */ 164 /* Decrement the count to free the slot. */
165 if (release) 165 if (release)
166 __get_cpu_var(mce_nest_count)--; 166 __this_cpu_dec(mce_nest_count);
167 167
168 return ret; 168 return ret;
169} 169}
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
185 return; 185 return;
186 186
187 index = __get_cpu_var(mce_queue_count)++; 187 index = __this_cpu_inc_return(mce_queue_count);
188 /* If queue is full, just return for now. */ 188 /* If queue is full, just return for now. */
189 if (index >= MAX_MC_EVT) { 189 if (index >= MAX_MC_EVT) {
190 __get_cpu_var(mce_queue_count)--; 190 __this_cpu_dec(mce_queue_count);
191 return; 191 return;
192 } 192 }
193 __get_cpu_var(mce_event_queue[index]) = evt; 193 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
194 194
195 /* Queue irq work to process this event later. */ 195 /* Queue irq work to process this event later. */
196 irq_work_queue(&mce_event_process_work); 196 irq_work_queue(&mce_event_process_work);
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
208 * For now just print it to console. 208 * For now just print it to console.
209 * TODO: log this error event to FSP or nvram. 209 * TODO: log this error event to FSP or nvram.
210 */ 210 */
211 while (__get_cpu_var(mce_queue_count) > 0) { 211 while (__this_cpu_read(mce_queue_count) > 0) {
212 index = __get_cpu_var(mce_queue_count) - 1; 212 index = __this_cpu_read(mce_queue_count) - 1;
213 machine_check_print_event_info( 213 machine_check_print_event_info(
214 &__get_cpu_var(mce_event_queue[index])); 214 this_cpu_ptr(&mce_event_queue[index]));
215 __get_cpu_var(mce_queue_count)--; 215 __this_cpu_dec(mce_queue_count);
216 } 216 }
217} 217}
218 218
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 923cd2daba89..91e132b495bf 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
499 499
500void __set_breakpoint(struct arch_hw_breakpoint *brk) 500void __set_breakpoint(struct arch_hw_breakpoint *brk)
501{ 501{
502 __get_cpu_var(current_brk) = *brk; 502 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
503 503
504 if (cpu_has_feature(CPU_FTR_DAWR)) 504 if (cpu_has_feature(CPU_FTR_DAWR))
505 set_dawr(brk); 505 set_dawr(brk);
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
842 * schedule DABR 842 * schedule DABR
843 */ 843 */
844#ifndef CONFIG_HAVE_HW_BREAKPOINT 844#ifndef CONFIG_HAVE_HW_BREAKPOINT
845 if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) 845 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
846 __set_breakpoint(&new->thread.hw_brk); 846 __set_breakpoint(&new->thread.hw_brk);
847#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 847#endif /* CONFIG_HAVE_HW_BREAKPOINT */
848#endif 848#endif
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
856 * Collect processor utilization data per process 856 * Collect processor utilization data per process
857 */ 857 */
858 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 858 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
859 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 859 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
860 long unsigned start_tb, current_tb; 860 long unsigned start_tb, current_tb;
861 start_tb = old_thread->start_tb; 861 start_tb = old_thread->start_tb;
862 cu->current_tb = current_tb = mfspr(SPRN_PURR); 862 cu->current_tb = current_tb = mfspr(SPRN_PURR);
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
866#endif /* CONFIG_PPC64 */ 866#endif /* CONFIG_PPC64 */
867 867
868#ifdef CONFIG_PPC_BOOK3S_64 868#ifdef CONFIG_PPC_BOOK3S_64
869 batch = &__get_cpu_var(ppc64_tlb_batch); 869 batch = this_cpu_ptr(&ppc64_tlb_batch);
870 if (batch->active) { 870 if (batch->active) {
871 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 871 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
872 if (batch->index) 872 if (batch->index)
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
889#ifdef CONFIG_PPC_BOOK3S_64 889#ifdef CONFIG_PPC_BOOK3S_64
890 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 890 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
891 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 891 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
892 batch = &__get_cpu_var(ppc64_tlb_batch); 892 batch = this_cpu_ptr(&ppc64_tlb_batch);
893 batch->active = 1; 893 batch->active = 1;
894 } 894 }
895#endif /* CONFIG_PPC_BOOK3S_64 */ 895#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 71e186d5f331..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
243 243
244irqreturn_t smp_ipi_demux(void) 244irqreturn_t smp_ipi_demux(void)
245{ 245{
246 struct cpu_messages *info = &__get_cpu_var(ipi_message); 246 struct cpu_messages *info = this_cpu_ptr(&ipi_message);
247 unsigned int all; 247 unsigned int all;
248 248
249 mb(); /* order any irq clear */ 249 mb(); /* order any irq clear */
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void)
442 idle_task_exit(); 442 idle_task_exit();
443 cpu = smp_processor_id(); 443 cpu = smp_processor_id();
444 printk(KERN_DEBUG "CPU%d offline\n", cpu); 444 printk(KERN_DEBUG "CPU%d offline\n", cpu);
445 __get_cpu_var(cpu_state) = CPU_DEAD; 445 __this_cpu_write(cpu_state, CPU_DEAD);
446 smp_wmb(); 446 smp_wmb();
447 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 447 while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
448 cpu_relax(); 448 cpu_relax();
449} 449}
450 450
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 67fd2fd2620a..fa1fd8a0c867 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
394 ppc_set_pmu_inuse(1); 394 ppc_set_pmu_inuse(1);
395 395
396 /* Only need to enable them once */ 396 /* Only need to enable them once */
397 if (__get_cpu_var(pmcs_enabled)) 397 if (__this_cpu_read(pmcs_enabled))
398 return; 398 return;
399 399
400 __get_cpu_var(pmcs_enabled) = 1; 400 __this_cpu_write(pmcs_enabled, 1);
401 401
402 if (ppc_md.enable_pmcs) 402 if (ppc_md.enable_pmcs)
403 ppc_md.enable_pmcs(); 403 ppc_md.enable_pmcs();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7505599c2593..9f8ea617ff2c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
458 458
459DEFINE_PER_CPU(u8, irq_work_pending); 459DEFINE_PER_CPU(u8, irq_work_pending);
460 460
461#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 461#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
462#define test_irq_work_pending() __get_cpu_var(irq_work_pending) 462#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
463#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 463#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
464 464
465#endif /* 32 vs 64 bit */ 465#endif /* 32 vs 64 bit */
466 466
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
482static void __timer_interrupt(void) 482static void __timer_interrupt(void)
483{ 483{
484 struct pt_regs *regs = get_irq_regs(); 484 struct pt_regs *regs = get_irq_regs();
485 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 485 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
486 struct clock_event_device *evt = &__get_cpu_var(decrementers); 486 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
487 u64 now; 487 u64 now;
488 488
489 trace_timer_interrupt_entry(regs); 489 trace_timer_interrupt_entry(regs);
@@ -498,7 +498,7 @@ static void __timer_interrupt(void)
498 *next_tb = ~(u64)0; 498 *next_tb = ~(u64)0;
499 if (evt->event_handler) 499 if (evt->event_handler)
500 evt->event_handler(evt); 500 evt->event_handler(evt);
501 __get_cpu_var(irq_stat).timer_irqs_event++; 501 __this_cpu_inc(irq_stat.timer_irqs_event);
502 } else { 502 } else {
503 now = *next_tb - now; 503 now = *next_tb - now;
504 if (now <= DECREMENTER_MAX) 504 if (now <= DECREMENTER_MAX)
@@ -506,13 +506,13 @@ static void __timer_interrupt(void)
506 /* We may have raced with new irq work */ 506 /* We may have raced with new irq work */
507 if (test_irq_work_pending()) 507 if (test_irq_work_pending())
508 set_dec(1); 508 set_dec(1);
509 __get_cpu_var(irq_stat).timer_irqs_others++; 509 __this_cpu_inc(irq_stat.timer_irqs_others);
510 } 510 }
511 511
512#ifdef CONFIG_PPC64 512#ifdef CONFIG_PPC64
513 /* collect purr register values often, for accurate calculations */ 513 /* collect purr register values often, for accurate calculations */
514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
515 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 515 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
516 cu->current_tb = mfspr(SPRN_PURR); 516 cu->current_tb = mfspr(SPRN_PURR);
517 } 517 }
518#endif 518#endif
@@ -527,7 +527,7 @@ static void __timer_interrupt(void)
527void timer_interrupt(struct pt_regs * regs) 527void timer_interrupt(struct pt_regs * regs)
528{ 528{
529 struct pt_regs *old_regs; 529 struct pt_regs *old_regs;
530 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 530 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
531 531
532 /* Ensure a positive value is written to the decrementer, or else 532 /* Ensure a positive value is written to the decrementer, or else
533 * some CPUs will continue to take decrementer exceptions. 533 * some CPUs will continue to take decrementer exceptions.
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
813static int decrementer_set_next_event(unsigned long evt, 813static int decrementer_set_next_event(unsigned long evt,
814 struct clock_event_device *dev) 814 struct clock_event_device *dev)
815{ 815{
816 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 816 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
817 set_dec(evt); 817 set_dec(evt);
818 818
819 /* We may have raced with new irq work */ 819 /* We may have raced with new irq work */
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
833/* Interrupt handler for the timer broadcast IPI */ 833/* Interrupt handler for the timer broadcast IPI */
834void tick_broadcast_ipi_handler(void) 834void tick_broadcast_ipi_handler(void)
835{ 835{
836 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 836 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
837 837
838 *next_tb = get_tb_or_rtc(); 838 *next_tb = get_tb_or_rtc();
839 __timer_interrupt(); 839 __timer_interrupt();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0dc43f9932cf..e6595b72269b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
295{ 295{
296 long handled = 0; 296 long handled = 0;
297 297
298 __get_cpu_var(irq_stat).mce_exceptions++; 298 __this_cpu_inc(irq_stat.mce_exceptions);
299 299
300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
301 handled = cur_cpu_spec->machine_check_early(regs); 301 handled = cur_cpu_spec->machine_check_early(regs);
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
304 304
305long hmi_exception_realmode(struct pt_regs *regs) 305long hmi_exception_realmode(struct pt_regs *regs)
306{ 306{
307 __get_cpu_var(irq_stat).hmi_exceptions++; 307 __this_cpu_inc(irq_stat.hmi_exceptions);
308 308
309 if (ppc_md.hmi_exception_early) 309 if (ppc_md.hmi_exception_early)
310 ppc_md.hmi_exception_early(regs); 310 ppc_md.hmi_exception_early(regs);
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
700 enum ctx_state prev_state = exception_enter(); 700 enum ctx_state prev_state = exception_enter();
701 int recover = 0; 701 int recover = 0;
702 702
703 __get_cpu_var(irq_stat).mce_exceptions++; 703 __this_cpu_inc(irq_stat.mce_exceptions);
704 704
705 /* See if any machine dependent calls. In theory, we would want 705 /* See if any machine dependent calls. In theory, we would want
706 * to call the CPU first, and call the ppc_md. one if the CPU 706 * to call the CPU first, and call the ppc_md. one if the CPU
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
1519 1519
1520void performance_monitor_exception(struct pt_regs *regs) 1520void performance_monitor_exception(struct pt_regs *regs)
1521{ 1521{
1522 __get_cpu_var(irq_stat).pmu_irqs++; 1522 __this_cpu_inc(irq_stat.pmu_irqs);
1523 1523
1524 perf_irq(regs); 1524 perf_irq(regs);
1525} 1525}