aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /arch/tile
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/irqflags.h4
-rw-r--r--arch/tile/include/asm/mmu_context.h6
-rw-r--r--arch/tile/kernel/irq.c14
-rw-r--r--arch/tile/kernel/messaging.c4
-rw-r--r--arch/tile/kernel/perf_event.c12
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/single_step.c4
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c6
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c4
13 files changed, 36 insertions, 35 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 71af5747874d..60d62a292fce 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -140,12 +140,12 @@ extern unsigned int debug_smp_processor_id(void);
140 140
141/* 141/*
142 * Read the set of maskable interrupts. 142 * Read the set of maskable interrupts.
143 * We avoid the preemption warning here via __this_cpu_ptr since even 143 * We avoid the preemption warning here via raw_cpu_ptr since even
144 * if irqs are already enabled, it's harmless to read the wrong cpu's 144 * if irqs are already enabled, it's harmless to read the wrong cpu's
145 * enabled mask. 145 * enabled mask.
146 */ 146 */
147#define arch_local_irqs_enabled() \ 147#define arch_local_irqs_enabled() \
148 (*__this_cpu_ptr(&interrupts_enabled_mask)) 148 (*raw_cpu_ptr(&interrupts_enabled_mask))
149 149
150/* Re-enable all maskable interrupts. */ 150/* Re-enable all maskable interrupts. */
151#define arch_local_irq_enable() \ 151#define arch_local_irq_enable() \
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 4734215e2ad4..f67753db1f78 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -84,7 +84,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
84 * clear any pending DMA interrupts. 84 * clear any pending DMA interrupts.
85 */ 85 */
86 if (current->thread.tile_dma_state.enabled) 86 if (current->thread.tile_dma_state.enabled)
87 install_page_table(mm->pgd, __get_cpu_var(current_asid)); 87 install_page_table(mm->pgd, __this_cpu_read(current_asid));
88#endif 88#endif
89} 89}
90 90
@@ -96,12 +96,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
96 int cpu = smp_processor_id(); 96 int cpu = smp_processor_id();
97 97
98 /* Pick new ASID. */ 98 /* Pick new ASID. */
99 int asid = __get_cpu_var(current_asid) + 1; 99 int asid = __this_cpu_read(current_asid) + 1;
100 if (asid > max_asid) { 100 if (asid > max_asid) {
101 asid = min_asid; 101 asid = min_asid;
102 local_flush_tlb(); 102 local_flush_tlb();
103 } 103 }
104 __get_cpu_var(current_asid) = asid; 104 __this_cpu_write(current_asid, asid);
105 105
106 /* Clear cpu from the old mm, and set it in the new one. */ 106 /* Clear cpu from the old mm, and set it in the new one. */
107 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 107 cpumask_clear_cpu(cpu, mm_cpumask(prev));
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 637f2ffaa5f5..ba85765e1436 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth);
73 */ 73 */
74void tile_dev_intr(struct pt_regs *regs, int intnum) 74void tile_dev_intr(struct pt_regs *regs, int intnum)
75{ 75{
76 int depth = __get_cpu_var(irq_depth)++; 76 int depth = __this_cpu_inc_return(irq_depth);
77 unsigned long original_irqs; 77 unsigned long original_irqs;
78 unsigned long remaining_irqs; 78 unsigned long remaining_irqs;
79 struct pt_regs *old_regs; 79 struct pt_regs *old_regs;
@@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
120 120
121 /* Count device irqs; Linux IPIs are counted elsewhere. */ 121 /* Count device irqs; Linux IPIs are counted elsewhere. */
122 if (irq != IRQ_RESCHEDULE) 122 if (irq != IRQ_RESCHEDULE)
123 __get_cpu_var(irq_stat).irq_dev_intr_count++; 123 __this_cpu_inc(irq_stat.irq_dev_intr_count);
124 124
125 generic_handle_irq(irq); 125 generic_handle_irq(irq);
126 } 126 }
@@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
130 * including any that were reenabled during interrupt 130 * including any that were reenabled during interrupt
131 * handling. 131 * handling.
132 */ 132 */
133 if (depth == 0) 133 if (depth == 1)
134 unmask_irqs(~__get_cpu_var(irq_disable_mask)); 134 unmask_irqs(~__this_cpu_read(irq_disable_mask));
135 135
136 __get_cpu_var(irq_depth)--; 136 __this_cpu_dec(irq_depth);
137 137
138 /* 138 /*
139 * Track time spent against the current process again and 139 * Track time spent against the current process again and
@@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
151static void tile_irq_chip_enable(struct irq_data *d) 151static void tile_irq_chip_enable(struct irq_data *d)
152{ 152{
153 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); 153 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
154 if (__get_cpu_var(irq_depth) == 0) 154 if (__this_cpu_read(irq_depth) == 0)
155 unmask_irqs(1UL << d->irq); 155 unmask_irqs(1UL << d->irq);
156 put_cpu_var(irq_disable_mask); 156 put_cpu_var(irq_disable_mask);
157} 157}
@@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d)
197 */ 197 */
198static void tile_irq_chip_eoi(struct irq_data *d) 198static void tile_irq_chip_eoi(struct irq_data *d)
199{ 199{
200 if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) 200 if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
201 unmask_irqs(1UL << d->irq); 201 unmask_irqs(1UL << d->irq);
202} 202}
203 203
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 7867266f9716..ac950be1318e 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state);
28void init_messaging(void) 28void init_messaging(void)
29{ 29{
30 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
31 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = this_cpu_ptr(&msg_state);
32 int rc = hv_register_message_state(state); 32 int rc = hv_register_message_state(state);
33 if (rc != HV_OK) 33 if (rc != HV_OK)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
@@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
96 struct hv_driver_cb *cb = 96 struct hv_driver_cb *cb =
97 (struct hv_driver_cb *)him->intarg; 97 (struct hv_driver_cb *)him->intarg;
98 cb->callback(cb, him->intdata); 98 cb->callback(cb, him->intdata);
99 __get_cpu_var(irq_stat).irq_hv_msg_count++; 99 __this_cpu_inc(irq_stat.irq_hv_msg_count);
100 } 100 }
101 } 101 }
102 102
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 2bf6c9c135c1..bb509cee3b59 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event)
590 */ 590 */
591static void tile_pmu_stop(struct perf_event *event, int flags) 591static void tile_pmu_stop(struct perf_event *event, int flags)
592{ 592{
593 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 593 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
594 struct hw_perf_event *hwc = &event->hw; 594 struct hw_perf_event *hwc = &event->hw;
595 int idx = hwc->idx; 595 int idx = hwc->idx;
596 596
@@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags)
616 */ 616 */
617static void tile_pmu_start(struct perf_event *event, int flags) 617static void tile_pmu_start(struct perf_event *event, int flags)
618{ 618{
619 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 619 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
620 int idx = event->hw.idx; 620 int idx = event->hw.idx;
621 621
622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags)
650 */ 650 */
651static int tile_pmu_add(struct perf_event *event, int flags) 651static int tile_pmu_add(struct perf_event *event, int flags)
652{ 652{
653 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 653 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
654 struct hw_perf_event *hwc; 654 struct hw_perf_event *hwc;
655 unsigned long mask; 655 unsigned long mask;
656 int b, max_cnt; 656 int b, max_cnt;
@@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags)
706 */ 706 */
707static void tile_pmu_del(struct perf_event *event, int flags) 707static void tile_pmu_del(struct perf_event *event, int flags)
708{ 708{
709 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 709 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
710 int i; 710 int i;
711 711
712 /* 712 /*
@@ -880,14 +880,14 @@ static struct pmu tilera_pmu = {
880int tile_pmu_handle_irq(struct pt_regs *regs, int fault) 880int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
881{ 881{
882 struct perf_sample_data data; 882 struct perf_sample_data data;
883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 883 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
884 struct perf_event *event; 884 struct perf_event *event;
885 struct hw_perf_event *hwc; 885 struct hw_perf_event *hwc;
886 u64 val; 886 u64 val;
887 unsigned long status; 887 unsigned long status;
888 int bit; 888 int bit;
889 889
890 __get_cpu_var(perf_irqs)++; 890 __this_cpu_inc(perf_irqs);
891 891
892 if (!atomic_read(&tile_active_events)) 892 if (!atomic_read(&tile_active_events))
893 return 0; 893 return 0;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 16ed58948757..0050cbc1d9de 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -64,7 +64,7 @@ early_param("idle", idle_setup);
64 64
65void arch_cpu_idle(void) 65void arch_cpu_idle(void)
66{ 66{
67 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 67 __this_cpu_write(irq_stat.idle_timestamp, jiffies);
68 _cpu_idle(); 68 _cpu_idle();
69} 69}
70 70
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 112ababa9e55..b9736ded06f2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1218,7 +1218,8 @@ static void __init validate_hv(void)
1218 * various asid variables to their appropriate initial states. 1218 * various asid variables to their appropriate initial states.
1219 */ 1219 */
1220 asid_range = hv_inquire_asid(0); 1220 asid_range = hv_inquire_asid(0);
1221 __get_cpu_var(current_asid) = min_asid = asid_range.start; 1221 min_asid = asid_range.start;
1222 __this_cpu_write(current_asid, min_asid);
1222 max_asid = asid_range.start + asid_range.size - 1; 1223 max_asid = asid_range.start + asid_range.size - 1;
1223 1224
1224 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, 1225 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de07fa7d1315..6cb2ce31b5a2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
740 740
741void gx_singlestep_handle(struct pt_regs *regs, int fault_num) 741void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
742{ 742{
743 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); 743 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
744 struct thread_info *info = (void *)current_thread_info(); 744 struct thread_info *info = (void *)current_thread_info();
745 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); 745 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
746 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); 746 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
@@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
766 766
767void single_step_once(struct pt_regs *regs) 767void single_step_once(struct pt_regs *regs)
768{ 768{
769 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); 769 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
770 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); 770 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
771 771
772 *ss_pc = regs->pc; 772 *ss_pc = regs->pc;
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 19eaa62d456a..d3c4ed780ce2 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(flush_icache_range);
189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ 189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
190static irqreturn_t handle_reschedule_ipi(int irq, void *token) 190static irqreturn_t handle_reschedule_ipi(int irq, void *token)
191{ 191{
192 __get_cpu_var(irq_stat).irq_resched_count++; 192 __this_cpu_inc(irq_stat.irq_resched_count);
193 scheduler_ipi(); 193 scheduler_ipi();
194 194
195 return IRQ_HANDLED; 195 return IRQ_HANDLED;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 732e9d138661..0d59a1b60c74 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void)
41 int cpu = smp_processor_id(); 41 int cpu = smp_processor_id();
42 set_cpu_online(cpu, 1); 42 set_cpu_online(cpu, 1);
43 set_cpu_present(cpu, 1); 43 set_cpu_present(cpu, 1);
44 __get_cpu_var(cpu_state) = CPU_ONLINE; 44 __this_cpu_write(cpu_state, CPU_ONLINE);
45 45
46 init_messaging(); 46 init_messaging();
47} 47}
@@ -158,7 +158,7 @@ static void start_secondary(void)
158 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ 158 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
159 159
160 /* Initialize the current asid for our first page table. */ 160 /* Initialize the current asid for our first page table. */
161 __get_cpu_var(current_asid) = min_asid; 161 __this_cpu_write(current_asid, min_asid);
162 162
163 /* Set up this thread as another owner of the init_mm */ 163 /* Set up this thread as another owner of the init_mm */
164 atomic_inc(&init_mm.mm_count); 164 atomic_inc(&init_mm.mm_count);
@@ -201,7 +201,7 @@ void online_secondary(void)
201 notify_cpu_starting(smp_processor_id()); 201 notify_cpu_starting(smp_processor_id());
202 202
203 set_cpu_online(smp_processor_id(), 1); 203 set_cpu_online(smp_processor_id(), 1);
204 __get_cpu_var(cpu_state) = CPU_ONLINE; 204 __this_cpu_write(cpu_state, CPU_ONLINE);
205 205
206 /* Set up tile-specific state for this cpu. */ 206 /* Set up tile-specific state for this cpu. */
207 setup_cpu(0); 207 setup_cpu(0);
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index c1b362277fb7..b854a1cd0079 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
162 162
163void setup_tile_timer(void) 163void setup_tile_timer(void)
164{ 164{
165 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 165 struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
166 166
167 /* Fill in fields that are speed-specific. */ 167 /* Fill in fields that are speed-specific. */
168 clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); 168 clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
@@ -182,7 +182,7 @@ void setup_tile_timer(void)
182void do_timer_interrupt(struct pt_regs *regs, int fault_num) 182void do_timer_interrupt(struct pt_regs *regs, int fault_num)
183{ 183{
184 struct pt_regs *old_regs = set_irq_regs(regs); 184 struct pt_regs *old_regs = set_irq_regs(regs);
185 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 185 struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
186 186
187 /* 187 /*
188 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
194 irq_enter(); 194 irq_enter();
195 195
196 /* Track interrupt count. */ 196 /* Track interrupt count. */
197 __get_cpu_var(irq_stat).irq_timer_count++; 197 __this_cpu_inc(irq_stat.irq_timer_count);
198 198
199 /* Call the generic timer handler */ 199 /* Call the generic timer handler */
200 evt->event_handler(evt); 200 evt->event_handler(evt);
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs)
235 * We do not have to disable preemption here as each core has the same 235 * We do not have to disable preemption here as each core has the same
236 * clock frequency. 236 * clock frequency.
237 */ 237 */
238 struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); 238 struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
239 239
240 /* 240 /*
241 * as in clocksource.h and x86's timer.h, we split the calculation 241 * as in clocksource.h and x86's timer.h, we split the calculation
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 0dc218294770..6aa2f2625447 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -103,7 +103,7 @@ static void kmap_atomic_register(struct page *page, int type,
103 spin_lock(&amp_lock); 103 spin_lock(&amp_lock);
104 104
105 /* With interrupts disabled, now fill in the per-cpu info. */ 105 /* With interrupts disabled, now fill in the per-cpu info. */
106 amp = &__get_cpu_var(amps).per_type[type]; 106 amp = this_cpu_ptr(&amps.per_type[type]);
107 amp->page = page; 107 amp->page = page;
108 amp->cpu = smp_processor_id(); 108 amp->cpu = smp_processor_id();
109 amp->va = va; 109 amp->va = va;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index a092e393bd20..caa270165f86 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -593,14 +593,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
593 interrupt_mask_set_mask(-1ULL); 593 interrupt_mask_set_mask(-1ULL);
594 rc = flush_and_install_context(__pa(pgtables), 594 rc = flush_and_install_context(__pa(pgtables),
595 init_pgprot((unsigned long)pgtables), 595 init_pgprot((unsigned long)pgtables),
596 __get_cpu_var(current_asid), 596 __this_cpu_read(current_asid),
597 cpumask_bits(my_cpu_mask)); 597 cpumask_bits(my_cpu_mask));
598 interrupt_mask_restore_mask(irqmask); 598 interrupt_mask_restore_mask(irqmask);
599 BUG_ON(rc != 0); 599 BUG_ON(rc != 0);
600 600
601 /* Copy the page table back to the normal swapper_pg_dir. */ 601 /* Copy the page table back to the normal swapper_pg_dir. */
602 memcpy(pgd_base, pgtables, sizeof(pgtables)); 602 memcpy(pgd_base, pgtables, sizeof(pgtables));
603 __install_page_table(pgd_base, __get_cpu_var(current_asid), 603 __install_page_table(pgd_base, __this_cpu_read(current_asid),
604 swapper_pgprot); 604 swapper_pgprot);
605 605
606 /* 606 /*