aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-05-11 03:35:27 -0400
committerTejun Heo <tj@kernel.org>2012-05-14 17:15:31 -0400
commitc6ae41e7d469f00d9c92a2b2887c7235d121c009 (patch)
treeb16d65641c1e99a622ab460aee9f5b13e1948a25 /arch
parent19e8d69c543f8f62050099892b138e981db952cc (diff)
x86: replace percpu_xxx funcs with this_cpu_xxx
Since percpu_xxx() serial functions are duplicated with this_cpu_xxx(). Removing percpu_xxx() definition and replacing them by this_cpu_xxx() in code. There is no function change in this patch, just preparation for later percpu_xxx serial function removing. On x86 machine the this_cpu_xxx() serial functions are same as __this_cpu_xxx() without no unnecessary premmpt enable/disable. Thanks for Stephen Rothwell, he found and fixed a i386 build error in the patch. Also thanks for Andrew Morton, he kept updating the patchset in Linus' tree. Signed-off-by: Alex Shi <alex.shi@intel.com> Acked-by: Christoph Lameter <cl@gentwo.org> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/compat.h2
-rw-r--r--arch/x86/include/asm/current.h2
-rw-r--r--arch/x86/include/asm/desc.h1
-rw-r--r--arch/x86/include/asm/fpu-internal.h6
-rw-r--r--arch/x86/include/asm/hardirq.h9
-rw-r--r--arch/x86/include/asm/irq_regs.h4
-rw-r--r--arch/x86/include/asm/mmu_context.h12
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/smp.h4
-rw-r--r--arch/x86/include/asm/stackprotector.h4
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/nmi_selftest.c1
-rw-r--r--arch/x86/kernel/paravirt.c12
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/mm/tlb.c10
21 files changed, 53 insertions, 50 deletions
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index d6805798d6fc..fedf32b73e65 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -229,7 +229,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
229 sp = task_pt_regs(current)->sp; 229 sp = task_pt_regs(current)->sp;
230 } else { 230 } else {
231 /* -128 for the x32 ABI redzone */ 231 /* -128 for the x32 ABI redzone */
232 sp = percpu_read(old_rsp) - 128; 232 sp = this_cpu_read(old_rsp) - 128;
233 } 233 }
234 234
235 return (void __user *)round_down(sp - len, 16); 235 return (void __user *)round_down(sp - len, 16);
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 4d447b732d82..9476c04ee635 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
11 11
12static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
13{ 13{
14 return percpu_read_stable(current_task); 14 return this_cpu_read_stable(current_task);
15} 15}
16 16
17#define current get_current() 17#define current get_current()
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e95822d683f4..8bf1c06070d5 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -6,6 +6,7 @@
6#include <asm/mmu.h> 6#include <asm/mmu.h>
7 7
8#include <linux/smp.h> 8#include <linux/smp.h>
9#include <linux/percpu.h>
9 10
10static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) 11static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
11{ 12{
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 4fa88154e4de..75f4c6d6a331 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -290,14 +290,14 @@ static inline int __thread_has_fpu(struct task_struct *tsk)
290static inline void __thread_clear_has_fpu(struct task_struct *tsk) 290static inline void __thread_clear_has_fpu(struct task_struct *tsk)
291{ 291{
292 tsk->thread.fpu.has_fpu = 0; 292 tsk->thread.fpu.has_fpu = 0;
293 percpu_write(fpu_owner_task, NULL); 293 this_cpu_write(fpu_owner_task, NULL);
294} 294}
295 295
296/* Must be paired with a 'clts' before! */ 296/* Must be paired with a 'clts' before! */
297static inline void __thread_set_has_fpu(struct task_struct *tsk) 297static inline void __thread_set_has_fpu(struct task_struct *tsk)
298{ 298{
299 tsk->thread.fpu.has_fpu = 1; 299 tsk->thread.fpu.has_fpu = 1;
300 percpu_write(fpu_owner_task, tsk); 300 this_cpu_write(fpu_owner_task, tsk);
301} 301}
302 302
303/* 303/*
@@ -344,7 +344,7 @@ typedef struct { int preload; } fpu_switch_t;
344 */ 344 */
345static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) 345static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
346{ 346{
347 return new == percpu_read_stable(fpu_owner_task) && 347 return new == this_cpu_read_stable(fpu_owner_task) &&
348 cpu == new->thread.fpu.last_cpu; 348 cpu == new->thread.fpu.last_cpu;
349} 349}
350 350
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 382f75d735f3..d3895dbf4ddb 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,14 +35,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
35 35
36#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
37 37
38#define inc_irq_stat(member) percpu_inc(irq_stat.member) 38#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
39 39
40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
41 41
42#define __ARCH_SET_SOFTIRQ_PENDING 42#define __ARCH_SET_SOFTIRQ_PENDING
43 43
44#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) 44#define set_softirq_pending(x) \
45#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) 45 this_cpu_write(irq_stat.__softirq_pending, (x))
46#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
46 47
47extern void ack_bad_irq(unsigned int irq); 48extern void ack_bad_irq(unsigned int irq);
48 49
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 77843225b7ea..d82250b1debb 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs);
15 15
16static inline struct pt_regs *get_irq_regs(void) 16static inline struct pt_regs *get_irq_regs(void)
17{ 17{
18 return percpu_read(irq_regs); 18 return this_cpu_read(irq_regs);
19} 19}
20 20
21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) 21static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
23 struct pt_regs *old_regs; 23 struct pt_regs *old_regs;
24 24
25 old_regs = get_irq_regs(); 25 old_regs = get_irq_regs();
26 percpu_write(irq_regs, new_regs); 26 this_cpu_write(irq_regs, new_regs);
27 27
28 return old_regs; 28 return old_regs;
29} 29}
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 69021528b43c..cdbf36776106 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm);
25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26{ 26{
27#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 28 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 29 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
30#endif 30#endif
31} 31}
32 32
@@ -37,8 +37,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
41 percpu_write(cpu_tlbstate.active_mm, next); 41 this_cpu_write(cpu_tlbstate.active_mm, next);
42#endif 42#endif
43 cpumask_set_cpu(cpu, mm_cpumask(next)); 43 cpumask_set_cpu(cpu, mm_cpumask(next));
44 44
@@ -56,8 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
56 } 56 }
57#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
58 else { 58 else {
59 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 59 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
60 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 60 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
61 61
62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { 62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
63 /* We were in lazy tlb mode and leave_mm disabled 63 /* We were in lazy tlb mode and leave_mm disabled
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7a11910a63c4..967ee3be5c0a 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -46,7 +46,7 @@
46 46
47#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
48#define __percpu_prefix "%%"__stringify(__percpu_seg)":" 48#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
49#define __my_cpu_offset percpu_read(this_cpu_off) 49#define __my_cpu_offset this_cpu_read(this_cpu_off)
50 50
51/* 51/*
52 * Compared to the generic __my_cpu_offset version, the following 52 * Compared to the generic __my_cpu_offset version, the following
@@ -352,15 +352,15 @@ do { \
352 352
353/* 353/*
354 * percpu_read() makes gcc load the percpu variable every time it is 354 * percpu_read() makes gcc load the percpu variable every time it is
355 * accessed while percpu_read_stable() allows the value to be cached. 355 * accessed while this_cpu_read_stable() allows the value to be cached.
356 * percpu_read_stable() is more efficient and can be used if its value 356 * this_cpu_read_stable() is more efficient and can be used if its value
357 * is guaranteed to be valid across cpus. The current users include 357 * is guaranteed to be valid across cpus. The current users include
358 * get_current() and get_thread_info() both of which are actually 358 * get_current() and get_thread_info() both of which are actually
359 * per-thread variables implemented as per-cpu variables and thus 359 * per-thread variables implemented as per-cpu variables and thus
360 * stable for the duration of the respective task. 360 * stable for the duration of the respective task.
361 */ 361 */
362#define percpu_read(var) percpu_from_op("mov", var, "m" (var)) 362#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
363#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) 363#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
364#define percpu_write(var, val) percpu_to_op("mov", var, val) 364#define percpu_write(var, val) percpu_to_op("mov", var, val)
365#define percpu_add(var, val) percpu_add_op(var, val) 365#define percpu_add(var, val) percpu_add_op(var, val)
366#define percpu_sub(var, val) percpu_add_op(var, -(val)) 366#define percpu_sub(var, val) percpu_add_op(var, -(val))
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 0434c400287c..e276f6bb6524 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -188,11 +188,11 @@ extern unsigned disabled_cpus __cpuinitdata;
188 * from the initial startup. We map APIC_BASE very early in page_setup(), 188 * from the initial startup. We map APIC_BASE very early in page_setup(),
189 * so this is correct in the x86 case. 189 * so this is correct in the x86 case.
190 */ 190 */
191#define raw_smp_processor_id() (percpu_read(cpu_number)) 191#define raw_smp_processor_id() (this_cpu_read(cpu_number))
192extern int safe_smp_processor_id(void); 192extern int safe_smp_processor_id(void);
193 193
194#elif defined(CONFIG_X86_64_SMP) 194#elif defined(CONFIG_X86_64_SMP)
195#define raw_smp_processor_id() (percpu_read(cpu_number)) 195#define raw_smp_processor_id() (this_cpu_read(cpu_number))
196 196
197#define stack_smp_processor_id() \ 197#define stack_smp_processor_id() \
198({ \ 198({ \
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index b5d9533d2c38..6a998598f172 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -75,9 +75,9 @@ static __always_inline void boot_init_stack_canary(void)
75 75
76 current->stack_canary = canary; 76 current->stack_canary = canary;
77#ifdef CONFIG_X86_64 77#ifdef CONFIG_X86_64
78 percpu_write(irq_stack_union.stack_canary, canary); 78 this_cpu_write(irq_stack_union.stack_canary, canary);
79#else 79#else
80 percpu_write(stack_canary.canary, canary); 80 this_cpu_write(stack_canary.canary, canary);
81#endif 81#endif
82} 82}
83 83
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index ad6df8ccd715..f67fd89c874b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -222,7 +222,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
222static inline struct thread_info *current_thread_info(void) 222static inline struct thread_info *current_thread_info(void)
223{ 223{
224 struct thread_info *ti; 224 struct thread_info *ti;
225 ti = (void *)(percpu_read_stable(kernel_stack) + 225 ti = (void *)(this_cpu_read_stable(kernel_stack) +
226 KERNEL_STACK_OFFSET - THREAD_SIZE); 226 KERNEL_STACK_OFFSET - THREAD_SIZE);
227 return ti; 227 return ti;
228} 228}
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c0e108e08079..1620d23f14d7 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -156,8 +156,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
156 156
157static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
158{ 158{
159 percpu_write(cpu_tlbstate.state, 0); 159 this_cpu_write(cpu_tlbstate.state, 0);
160 percpu_write(cpu_tlbstate.active_mm, &init_mm); 160 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
161} 161}
162 162
163#endif /* SMP */ 163#endif /* SMP */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cf79302198a6..82f29e70d058 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1185,7 +1185,7 @@ void __cpuinit cpu_init(void)
1185 oist = &per_cpu(orig_ist, cpu); 1185 oist = &per_cpu(orig_ist, cpu);
1186 1186
1187#ifdef CONFIG_NUMA 1187#ifdef CONFIG_NUMA
1188 if (cpu != 0 && percpu_read(numa_node) == 0 && 1188 if (cpu != 0 && this_cpu_read(numa_node) == 0 &&
1189 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1189 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1190 set_numa_node(early_cpu_to_node(cpu)); 1190 set_numa_node(early_cpu_to_node(cpu));
1191#endif 1191#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d086a09c087d..c0276d5d9bd4 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -583,7 +583,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
583 struct mce m; 583 struct mce m;
584 int i; 584 int i;
585 585
586 percpu_inc(mce_poll_count); 586 this_cpu_inc(mce_poll_count);
587 587
588 mce_gather_info(&m, NULL); 588 mce_gather_info(&m, NULL);
589 589
@@ -1015,7 +1015,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1015 1015
1016 atomic_inc(&mce_entry); 1016 atomic_inc(&mce_entry);
1017 1017
1018 percpu_inc(mce_exception_count); 1018 this_cpu_inc(mce_exception_count);
1019 1019
1020 if (!banks) 1020 if (!banks)
1021 goto out; 1021 goto out;
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 2d6e6498c176..f250431fb505 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -88,7 +88,7 @@ void kernel_fpu_begin(void)
88 __thread_clear_has_fpu(me); 88 __thread_clear_has_fpu(me);
89 /* We do 'stts()' in kernel_fpu_end() */ 89 /* We do 'stts()' in kernel_fpu_end() */
90 } else { 90 } else {
91 percpu_write(fpu_owner_task, NULL); 91 this_cpu_write(fpu_owner_task, NULL);
92 clts(); 92 clts();
93 } 93 }
94} 94}
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index 2c39dcd510fa..ff3698625081 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -13,6 +13,7 @@
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/percpu.h>
16 17
17#include <asm/apic.h> 18#include <asm/apic.h>
18#include <asm/nmi.h> 19#include <asm/nmi.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ab137605e694..9ce885996fd7 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -241,16 +241,16 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA
241 241
242static inline void enter_lazy(enum paravirt_lazy_mode mode) 242static inline void enter_lazy(enum paravirt_lazy_mode mode)
243{ 243{
244 BUG_ON(percpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); 244 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
245 245
246 percpu_write(paravirt_lazy_mode, mode); 246 this_cpu_write(paravirt_lazy_mode, mode);
247} 247}
248 248
249static void leave_lazy(enum paravirt_lazy_mode mode) 249static void leave_lazy(enum paravirt_lazy_mode mode)
250{ 250{
251 BUG_ON(percpu_read(paravirt_lazy_mode) != mode); 251 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
252 252
253 percpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); 253 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
254} 254}
255 255
256void paravirt_enter_lazy_mmu(void) 256void paravirt_enter_lazy_mmu(void)
@@ -267,7 +267,7 @@ void paravirt_start_context_switch(struct task_struct *prev)
267{ 267{
268 BUG_ON(preemptible()); 268 BUG_ON(preemptible());
269 269
270 if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { 270 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
271 arch_leave_lazy_mmu_mode(); 271 arch_leave_lazy_mmu_mode();
272 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); 272 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
273 } 273 }
@@ -289,7 +289,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
289 if (in_interrupt()) 289 if (in_interrupt())
290 return PARAVIRT_LAZY_NONE; 290 return PARAVIRT_LAZY_NONE;
291 291
292 return percpu_read(paravirt_lazy_mode); 292 return this_cpu_read(paravirt_lazy_mode);
293} 293}
294 294
295void arch_flush_lazy_mmu_mode(void) 295void arch_flush_lazy_mmu_mode(void)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1d92a5ab6e8b..857adffb7080 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -377,7 +377,7 @@ static inline void play_dead(void)
377#ifdef CONFIG_X86_64 377#ifdef CONFIG_X86_64
378void enter_idle(void) 378void enter_idle(void)
379{ 379{
380 percpu_write(is_idle, 1); 380 this_cpu_write(is_idle, 1);
381 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); 381 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
382} 382}
383 383
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index ae6847303e26..01d8d40ccaf6 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
302 302
303 switch_fpu_finish(next_p, fpu); 303 switch_fpu_finish(next_p, fpu);
304 304
305 percpu_write(current_task, next_p); 305 this_cpu_write(current_task, next_p);
306 306
307 return prev_p; 307 return prev_p;
308} 308}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 43d8b48b23e6..28e810255a0a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -237,7 +237,7 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
237 current->thread.usersp = new_sp; 237 current->thread.usersp = new_sp;
238 regs->ip = new_ip; 238 regs->ip = new_ip;
239 regs->sp = new_sp; 239 regs->sp = new_sp;
240 percpu_write(old_rsp, new_sp); 240 this_cpu_write(old_rsp, new_sp);
241 regs->cs = _cs; 241 regs->cs = _cs;
242 regs->ss = _ss; 242 regs->ss = _ss;
243 regs->flags = X86_EFLAGS_IF; 243 regs->flags = X86_EFLAGS_IF;
@@ -359,11 +359,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
359 /* 359 /*
360 * Switch the PDA and FPU contexts. 360 * Switch the PDA and FPU contexts.
361 */ 361 */
362 prev->usersp = percpu_read(old_rsp); 362 prev->usersp = this_cpu_read(old_rsp);
363 percpu_write(old_rsp, next->usersp); 363 this_cpu_write(old_rsp, next->usersp);
364 percpu_write(current_task, next_p); 364 this_cpu_write(current_task, next_p);
365 365
366 percpu_write(kernel_stack, 366 this_cpu_write(kernel_stack,
367 (unsigned long)task_stack_page(next_p) + 367 (unsigned long)task_stack_page(next_p) +
368 THREAD_SIZE - KERNEL_STACK_OFFSET); 368 THREAD_SIZE - KERNEL_STACK_OFFSET);
369 369
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index d6c0418c3e47..3804471db104 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -61,10 +61,10 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
61 */ 61 */
62void leave_mm(int cpu) 62void leave_mm(int cpu)
63{ 63{
64 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 64 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
65 BUG(); 65 BUG();
66 cpumask_clear_cpu(cpu, 66 cpumask_clear_cpu(cpu,
67 mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); 67 mm_cpumask(this_cpu_read(cpu_tlbstate.active_mm)));
68 load_cr3(swapper_pg_dir); 68 load_cr3(swapper_pg_dir);
69} 69}
70EXPORT_SYMBOL_GPL(leave_mm); 70EXPORT_SYMBOL_GPL(leave_mm);
@@ -152,8 +152,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
152 * BUG(); 152 * BUG();
153 */ 153 */
154 154
155 if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { 155 if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
156 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 156 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
157 if (f->flush_va == TLB_FLUSH_ALL) 157 if (f->flush_va == TLB_FLUSH_ALL)
158 local_flush_tlb(); 158 local_flush_tlb();
159 else 159 else
@@ -322,7 +322,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
322static void do_flush_tlb_all(void *info) 322static void do_flush_tlb_all(void *info)
323{ 323{
324 __flush_tlb_all(); 324 __flush_tlb_all();
325 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 325 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
326 leave_mm(smp_processor_id()); 326 leave_mm(smp_processor_id());
327} 327}
328 328