diff options
-rw-r--r-- | arch/x86/include/asm/compat.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/current.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/desc.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/hardirq.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/irq_regs.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 24 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/stackprotector.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/nmi_selftest.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 10 | ||||
-rw-r--r-- | include/linux/percpu.h | 54 | ||||
-rw-r--r-- | include/linux/topology.h | 4 | ||||
-rw-r--r-- | net/netfilter/xt_TEE.c | 12 | ||||
-rw-r--r-- | net/socket.c | 4 |
25 files changed, 69 insertions, 124 deletions
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index d6805798d6fc..fedf32b73e65 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -229,7 +229,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) | |||
229 | sp = task_pt_regs(current)->sp; | 229 | sp = task_pt_regs(current)->sp; |
230 | } else { | 230 | } else { |
231 | /* -128 for the x32 ABI redzone */ | 231 | /* -128 for the x32 ABI redzone */ |
232 | sp = percpu_read(old_rsp) - 128; | 232 | sp = this_cpu_read(old_rsp) - 128; |
233 | } | 233 | } |
234 | 234 | ||
235 | return (void __user *)round_down(sp - len, 16); | 235 | return (void __user *)round_down(sp - len, 16); |
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 4d447b732d82..9476c04ee635 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h | |||
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task); | |||
11 | 11 | ||
12 | static __always_inline struct task_struct *get_current(void) | 12 | static __always_inline struct task_struct *get_current(void) |
13 | { | 13 | { |
14 | return percpu_read_stable(current_task); | 14 | return this_cpu_read_stable(current_task); |
15 | } | 15 | } |
16 | 16 | ||
17 | #define current get_current() | 17 | #define current get_current() |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index e95822d683f4..8bf1c06070d5 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <asm/mmu.h> | 6 | #include <asm/mmu.h> |
7 | 7 | ||
8 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
9 | #include <linux/percpu.h> | ||
9 | 10 | ||
10 | static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) | 11 | static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) |
11 | { | 12 | { |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 4fa88154e4de..75f4c6d6a331 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -290,14 +290,14 @@ static inline int __thread_has_fpu(struct task_struct *tsk) | |||
290 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | 290 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) |
291 | { | 291 | { |
292 | tsk->thread.fpu.has_fpu = 0; | 292 | tsk->thread.fpu.has_fpu = 0; |
293 | percpu_write(fpu_owner_task, NULL); | 293 | this_cpu_write(fpu_owner_task, NULL); |
294 | } | 294 | } |
295 | 295 | ||
296 | /* Must be paired with a 'clts' before! */ | 296 | /* Must be paired with a 'clts' before! */ |
297 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | 297 | static inline void __thread_set_has_fpu(struct task_struct *tsk) |
298 | { | 298 | { |
299 | tsk->thread.fpu.has_fpu = 1; | 299 | tsk->thread.fpu.has_fpu = 1; |
300 | percpu_write(fpu_owner_task, tsk); | 300 | this_cpu_write(fpu_owner_task, tsk); |
301 | } | 301 | } |
302 | 302 | ||
303 | /* | 303 | /* |
@@ -344,7 +344,7 @@ typedef struct { int preload; } fpu_switch_t; | |||
344 | */ | 344 | */ |
345 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | 345 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) |
346 | { | 346 | { |
347 | return new == percpu_read_stable(fpu_owner_task) && | 347 | return new == this_cpu_read_stable(fpu_owner_task) && |
348 | cpu == new->thread.fpu.last_cpu; | 348 | cpu == new->thread.fpu.last_cpu; |
349 | } | 349 | } |
350 | 350 | ||
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 382f75d735f3..d3895dbf4ddb 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -35,14 +35,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |||
35 | 35 | ||
36 | #define __ARCH_IRQ_STAT | 36 | #define __ARCH_IRQ_STAT |
37 | 37 | ||
38 | #define inc_irq_stat(member) percpu_inc(irq_stat.member) | 38 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) |
39 | 39 | ||
40 | #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) | 40 | #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) |
41 | 41 | ||
42 | #define __ARCH_SET_SOFTIRQ_PENDING | 42 | #define __ARCH_SET_SOFTIRQ_PENDING |
43 | 43 | ||
44 | #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) | 44 | #define set_softirq_pending(x) \ |
45 | #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) | 45 | this_cpu_write(irq_stat.__softirq_pending, (x)) |
46 | #define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x)) | ||
46 | 47 | ||
47 | extern void ack_bad_irq(unsigned int irq); | 48 | extern void ack_bad_irq(unsigned int irq); |
48 | 49 | ||
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h index 77843225b7ea..d82250b1debb 100644 --- a/arch/x86/include/asm/irq_regs.h +++ b/arch/x86/include/asm/irq_regs.h | |||
@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs); | |||
15 | 15 | ||
16 | static inline struct pt_regs *get_irq_regs(void) | 16 | static inline struct pt_regs *get_irq_regs(void) |
17 | { | 17 | { |
18 | return percpu_read(irq_regs); | 18 | return this_cpu_read(irq_regs); |
19 | } | 19 | } |
20 | 20 | ||
21 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | 21 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) |
@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | |||
23 | struct pt_regs *old_regs; | 23 | struct pt_regs *old_regs; |
24 | 24 | ||
25 | old_regs = get_irq_regs(); | 25 | old_regs = get_irq_regs(); |
26 | percpu_write(irq_regs, new_regs); | 26 | this_cpu_write(irq_regs, new_regs); |
27 | 27 | ||
28 | return old_regs; | 28 | return old_regs; |
29 | } | 29 | } |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 69021528b43c..cdbf36776106 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm); | |||
25 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 25 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
26 | { | 26 | { |
27 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
28 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | 28 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
29 | percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | 29 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); |
30 | #endif | 30 | #endif |
31 | } | 31 | } |
32 | 32 | ||
@@ -37,8 +37,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
37 | 37 | ||
38 | if (likely(prev != next)) { | 38 | if (likely(prev != next)) { |
39 | #ifdef CONFIG_SMP | 39 | #ifdef CONFIG_SMP |
40 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 40 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
41 | percpu_write(cpu_tlbstate.active_mm, next); | 41 | this_cpu_write(cpu_tlbstate.active_mm, next); |
42 | #endif | 42 | #endif |
43 | cpumask_set_cpu(cpu, mm_cpumask(next)); | 43 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
44 | 44 | ||
@@ -56,8 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
56 | } | 56 | } |
57 | #ifdef CONFIG_SMP | 57 | #ifdef CONFIG_SMP |
58 | else { | 58 | else { |
59 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 59 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
60 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); | 60 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); |
61 | 61 | ||
62 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { | 62 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { |
63 | /* We were in lazy tlb mode and leave_mm disabled | 63 | /* We were in lazy tlb mode and leave_mm disabled |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 7a11910a63c4..d9b8e3f7f42a 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #ifdef CONFIG_SMP | 47 | #ifdef CONFIG_SMP |
48 | #define __percpu_prefix "%%"__stringify(__percpu_seg)":" | 48 | #define __percpu_prefix "%%"__stringify(__percpu_seg)":" |
49 | #define __my_cpu_offset percpu_read(this_cpu_off) | 49 | #define __my_cpu_offset this_cpu_read(this_cpu_off) |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * Compared to the generic __my_cpu_offset version, the following | 52 | * Compared to the generic __my_cpu_offset version, the following |
@@ -351,23 +351,15 @@ do { \ | |||
351 | }) | 351 | }) |
352 | 352 | ||
353 | /* | 353 | /* |
354 | * percpu_read() makes gcc load the percpu variable every time it is | 354 | * this_cpu_read() makes gcc load the percpu variable every time it is |
355 | * accessed while percpu_read_stable() allows the value to be cached. | 355 | * accessed while this_cpu_read_stable() allows the value to be cached. |
356 | * percpu_read_stable() is more efficient and can be used if its value | 356 | * this_cpu_read_stable() is more efficient and can be used if its value |
357 | * is guaranteed to be valid across cpus. The current users include | 357 | * is guaranteed to be valid across cpus. The current users include |
358 | * get_current() and get_thread_info() both of which are actually | 358 | * get_current() and get_thread_info() both of which are actually |
359 | * per-thread variables implemented as per-cpu variables and thus | 359 | * per-thread variables implemented as per-cpu variables and thus |
360 | * stable for the duration of the respective task. | 360 | * stable for the duration of the respective task. |
361 | */ | 361 | */ |
362 | #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) | 362 | #define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) |
363 | #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) | ||
364 | #define percpu_write(var, val) percpu_to_op("mov", var, val) | ||
365 | #define percpu_add(var, val) percpu_add_op(var, val) | ||
366 | #define percpu_sub(var, val) percpu_add_op(var, -(val)) | ||
367 | #define percpu_and(var, val) percpu_to_op("and", var, val) | ||
368 | #define percpu_or(var, val) percpu_to_op("or", var, val) | ||
369 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) | ||
370 | #define percpu_inc(var) percpu_unary_op("inc", var) | ||
371 | 363 | ||
372 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 364 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
373 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 365 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
@@ -512,7 +504,11 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, | |||
512 | { | 504 | { |
513 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; | 505 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; |
514 | 506 | ||
515 | return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0; | 507 | #ifdef CONFIG_X86_64 |
508 | return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_8(*a)) != 0; | ||
509 | #else | ||
510 | return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_4(*a)) != 0; | ||
511 | #endif | ||
516 | } | 512 | } |
517 | 513 | ||
518 | static inline int x86_this_cpu_variable_test_bit(int nr, | 514 | static inline int x86_this_cpu_variable_test_bit(int nr, |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index f8cbc6f20e31..f48394513c37 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -191,11 +191,11 @@ extern unsigned disabled_cpus __cpuinitdata; | |||
191 | * from the initial startup. We map APIC_BASE very early in page_setup(), | 191 | * from the initial startup. We map APIC_BASE very early in page_setup(), |
192 | * so this is correct in the x86 case. | 192 | * so this is correct in the x86 case. |
193 | */ | 193 | */ |
194 | #define raw_smp_processor_id() (percpu_read(cpu_number)) | 194 | #define raw_smp_processor_id() (this_cpu_read(cpu_number)) |
195 | extern int safe_smp_processor_id(void); | 195 | extern int safe_smp_processor_id(void); |
196 | 196 | ||
197 | #elif defined(CONFIG_X86_64_SMP) | 197 | #elif defined(CONFIG_X86_64_SMP) |
198 | #define raw_smp_processor_id() (percpu_read(cpu_number)) | 198 | #define raw_smp_processor_id() (this_cpu_read(cpu_number)) |
199 | 199 | ||
200 | #define stack_smp_processor_id() \ | 200 | #define stack_smp_processor_id() \ |
201 | ({ \ | 201 | ({ \ |
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index b5d9533d2c38..6a998598f172 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h | |||
@@ -75,9 +75,9 @@ static __always_inline void boot_init_stack_canary(void) | |||
75 | 75 | ||
76 | current->stack_canary = canary; | 76 | current->stack_canary = canary; |
77 | #ifdef CONFIG_X86_64 | 77 | #ifdef CONFIG_X86_64 |
78 | percpu_write(irq_stack_union.stack_canary, canary); | 78 | this_cpu_write(irq_stack_union.stack_canary, canary); |
79 | #else | 79 | #else |
80 | percpu_write(stack_canary.canary, canary); | 80 | this_cpu_write(stack_canary.canary, canary); |
81 | #endif | 81 | #endif |
82 | } | 82 | } |
83 | 83 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 73cfe0d309c9..3c9aebc00d39 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -204,7 +204,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack); | |||
204 | static inline struct thread_info *current_thread_info(void) | 204 | static inline struct thread_info *current_thread_info(void) |
205 | { | 205 | { |
206 | struct thread_info *ti; | 206 | struct thread_info *ti; |
207 | ti = (void *)(percpu_read_stable(kernel_stack) + | 207 | ti = (void *)(this_cpu_read_stable(kernel_stack) + |
208 | KERNEL_STACK_OFFSET - THREAD_SIZE); | 208 | KERNEL_STACK_OFFSET - THREAD_SIZE); |
209 | return ti; | 209 | return ti; |
210 | } | 210 | } |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index c0e108e08079..1620d23f14d7 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -156,8 +156,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |||
156 | 156 | ||
157 | static inline void reset_lazy_tlbstate(void) | 157 | static inline void reset_lazy_tlbstate(void) |
158 | { | 158 | { |
159 | percpu_write(cpu_tlbstate.state, 0); | 159 | this_cpu_write(cpu_tlbstate.state, 0); |
160 | percpu_write(cpu_tlbstate.active_mm, &init_mm); | 160 | this_cpu_write(cpu_tlbstate.active_mm, &init_mm); |
161 | } | 161 | } |
162 | 162 | ||
163 | #endif /* SMP */ | 163 | #endif /* SMP */ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cf79302198a6..82f29e70d058 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1185,7 +1185,7 @@ void __cpuinit cpu_init(void) | |||
1185 | oist = &per_cpu(orig_ist, cpu); | 1185 | oist = &per_cpu(orig_ist, cpu); |
1186 | 1186 | ||
1187 | #ifdef CONFIG_NUMA | 1187 | #ifdef CONFIG_NUMA |
1188 | if (cpu != 0 && percpu_read(numa_node) == 0 && | 1188 | if (cpu != 0 && this_cpu_read(numa_node) == 0 && |
1189 | early_cpu_to_node(cpu) != NUMA_NO_NODE) | 1189 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
1190 | set_numa_node(early_cpu_to_node(cpu)); | 1190 | set_numa_node(early_cpu_to_node(cpu)); |
1191 | #endif | 1191 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 11c9166c3337..297edb1b1fb3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -583,7 +583,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
583 | struct mce m; | 583 | struct mce m; |
584 | int i; | 584 | int i; |
585 | 585 | ||
586 | percpu_inc(mce_poll_count); | 586 | this_cpu_inc(mce_poll_count); |
587 | 587 | ||
588 | mce_gather_info(&m, NULL); | 588 | mce_gather_info(&m, NULL); |
589 | 589 | ||
@@ -1017,7 +1017,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
1017 | 1017 | ||
1018 | atomic_inc(&mce_entry); | 1018 | atomic_inc(&mce_entry); |
1019 | 1019 | ||
1020 | percpu_inc(mce_exception_count); | 1020 | this_cpu_inc(mce_exception_count); |
1021 | 1021 | ||
1022 | if (!banks) | 1022 | if (!banks) |
1023 | goto out; | 1023 | goto out; |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 2d6e6498c176..f250431fb505 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -88,7 +88,7 @@ void kernel_fpu_begin(void) | |||
88 | __thread_clear_has_fpu(me); | 88 | __thread_clear_has_fpu(me); |
89 | /* We do 'stts()' in kernel_fpu_end() */ | 89 | /* We do 'stts()' in kernel_fpu_end() */ |
90 | } else { | 90 | } else { |
91 | percpu_write(fpu_owner_task, NULL); | 91 | this_cpu_write(fpu_owner_task, NULL); |
92 | clts(); | 92 | clts(); |
93 | } | 93 | } |
94 | } | 94 | } |
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index 2c39dcd510fa..ff3698625081 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/cpumask.h> | 13 | #include <linux/cpumask.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/percpu.h> | ||
16 | 17 | ||
17 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
18 | #include <asm/nmi.h> | 19 | #include <asm/nmi.h> |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ab137605e694..9ce885996fd7 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -241,16 +241,16 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA | |||
241 | 241 | ||
242 | static inline void enter_lazy(enum paravirt_lazy_mode mode) | 242 | static inline void enter_lazy(enum paravirt_lazy_mode mode) |
243 | { | 243 | { |
244 | BUG_ON(percpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); | 244 | BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); |
245 | 245 | ||
246 | percpu_write(paravirt_lazy_mode, mode); | 246 | this_cpu_write(paravirt_lazy_mode, mode); |
247 | } | 247 | } |
248 | 248 | ||
249 | static void leave_lazy(enum paravirt_lazy_mode mode) | 249 | static void leave_lazy(enum paravirt_lazy_mode mode) |
250 | { | 250 | { |
251 | BUG_ON(percpu_read(paravirt_lazy_mode) != mode); | 251 | BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode); |
252 | 252 | ||
253 | percpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); | 253 | this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); |
254 | } | 254 | } |
255 | 255 | ||
256 | void paravirt_enter_lazy_mmu(void) | 256 | void paravirt_enter_lazy_mmu(void) |
@@ -267,7 +267,7 @@ void paravirt_start_context_switch(struct task_struct *prev) | |||
267 | { | 267 | { |
268 | BUG_ON(preemptible()); | 268 | BUG_ON(preemptible()); |
269 | 269 | ||
270 | if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { | 270 | if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { |
271 | arch_leave_lazy_mmu_mode(); | 271 | arch_leave_lazy_mmu_mode(); |
272 | set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); | 272 | set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); |
273 | } | 273 | } |
@@ -289,7 +289,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
289 | if (in_interrupt()) | 289 | if (in_interrupt()) |
290 | return PARAVIRT_LAZY_NONE; | 290 | return PARAVIRT_LAZY_NONE; |
291 | 291 | ||
292 | return percpu_read(paravirt_lazy_mode); | 292 | return this_cpu_read(paravirt_lazy_mode); |
293 | } | 293 | } |
294 | 294 | ||
295 | void arch_flush_lazy_mmu_mode(void) | 295 | void arch_flush_lazy_mmu_mode(void) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e8173154800d..dc8ca8ea78c4 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -385,7 +385,7 @@ static inline void play_dead(void) | |||
385 | #ifdef CONFIG_X86_64 | 385 | #ifdef CONFIG_X86_64 |
386 | void enter_idle(void) | 386 | void enter_idle(void) |
387 | { | 387 | { |
388 | percpu_write(is_idle, 1); | 388 | this_cpu_write(is_idle, 1); |
389 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); | 389 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); |
390 | } | 390 | } |
391 | 391 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index ae6847303e26..01d8d40ccaf6 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
302 | 302 | ||
303 | switch_fpu_finish(next_p, fpu); | 303 | switch_fpu_finish(next_p, fpu); |
304 | 304 | ||
305 | percpu_write(current_task, next_p); | 305 | this_cpu_write(current_task, next_p); |
306 | 306 | ||
307 | return prev_p; | 307 | return prev_p; |
308 | } | 308 | } |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 43d8b48b23e6..28e810255a0a 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -237,7 +237,7 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, | |||
237 | current->thread.usersp = new_sp; | 237 | current->thread.usersp = new_sp; |
238 | regs->ip = new_ip; | 238 | regs->ip = new_ip; |
239 | regs->sp = new_sp; | 239 | regs->sp = new_sp; |
240 | percpu_write(old_rsp, new_sp); | 240 | this_cpu_write(old_rsp, new_sp); |
241 | regs->cs = _cs; | 241 | regs->cs = _cs; |
242 | regs->ss = _ss; | 242 | regs->ss = _ss; |
243 | regs->flags = X86_EFLAGS_IF; | 243 | regs->flags = X86_EFLAGS_IF; |
@@ -359,11 +359,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
359 | /* | 359 | /* |
360 | * Switch the PDA and FPU contexts. | 360 | * Switch the PDA and FPU contexts. |
361 | */ | 361 | */ |
362 | prev->usersp = percpu_read(old_rsp); | 362 | prev->usersp = this_cpu_read(old_rsp); |
363 | percpu_write(old_rsp, next->usersp); | 363 | this_cpu_write(old_rsp, next->usersp); |
364 | percpu_write(current_task, next_p); | 364 | this_cpu_write(current_task, next_p); |
365 | 365 | ||
366 | percpu_write(kernel_stack, | 366 | this_cpu_write(kernel_stack, |
367 | (unsigned long)task_stack_page(next_p) + | 367 | (unsigned long)task_stack_page(next_p) + |
368 | THREAD_SIZE - KERNEL_STACK_OFFSET); | 368 | THREAD_SIZE - KERNEL_STACK_OFFSET); |
369 | 369 | ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index d6c0418c3e47..3804471db104 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -61,10 +61,10 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); | |||
61 | */ | 61 | */ |
62 | void leave_mm(int cpu) | 62 | void leave_mm(int cpu) |
63 | { | 63 | { |
64 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | 64 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
65 | BUG(); | 65 | BUG(); |
66 | cpumask_clear_cpu(cpu, | 66 | cpumask_clear_cpu(cpu, |
67 | mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); | 67 | mm_cpumask(this_cpu_read(cpu_tlbstate.active_mm))); |
68 | load_cr3(swapper_pg_dir); | 68 | load_cr3(swapper_pg_dir); |
69 | } | 69 | } |
70 | EXPORT_SYMBOL_GPL(leave_mm); | 70 | EXPORT_SYMBOL_GPL(leave_mm); |
@@ -152,8 +152,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) | |||
152 | * BUG(); | 152 | * BUG(); |
153 | */ | 153 | */ |
154 | 154 | ||
155 | if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { | 155 | if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) { |
156 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | 156 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
157 | if (f->flush_va == TLB_FLUSH_ALL) | 157 | if (f->flush_va == TLB_FLUSH_ALL) |
158 | local_flush_tlb(); | 158 | local_flush_tlb(); |
159 | else | 159 | else |
@@ -322,7 +322,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
322 | static void do_flush_tlb_all(void *info) | 322 | static void do_flush_tlb_all(void *info) |
323 | { | 323 | { |
324 | __flush_tlb_all(); | 324 | __flush_tlb_all(); |
325 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) | 325 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
326 | leave_mm(smp_processor_id()); | 326 | leave_mm(smp_processor_id()); |
327 | } | 327 | } |
328 | 328 | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 21638ae14e07..2b9f82c037c9 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -166,60 +166,6 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
166 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) | 166 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Optional methods for optimized non-lvalue per-cpu variable access. | ||
170 | * | ||
171 | * @var can be a percpu variable or a field of it and its size should | ||
172 | * equal char, int or long. percpu_read() evaluates to a lvalue and | ||
173 | * all others to void. | ||
174 | * | ||
175 | * These operations are guaranteed to be atomic. | ||
176 | * The generic versions disable interrupts. Archs are | ||
177 | * encouraged to implement single-instruction alternatives which don't | ||
178 | * require protection. | ||
179 | */ | ||
180 | #ifndef percpu_read | ||
181 | # define percpu_read(var) \ | ||
182 | ({ \ | ||
183 | typeof(var) *pr_ptr__ = &(var); \ | ||
184 | typeof(var) pr_ret__; \ | ||
185 | pr_ret__ = get_cpu_var(*pr_ptr__); \ | ||
186 | put_cpu_var(*pr_ptr__); \ | ||
187 | pr_ret__; \ | ||
188 | }) | ||
189 | #endif | ||
190 | |||
191 | #define __percpu_generic_to_op(var, val, op) \ | ||
192 | do { \ | ||
193 | typeof(var) *pgto_ptr__ = &(var); \ | ||
194 | get_cpu_var(*pgto_ptr__) op val; \ | ||
195 | put_cpu_var(*pgto_ptr__); \ | ||
196 | } while (0) | ||
197 | |||
198 | #ifndef percpu_write | ||
199 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | ||
200 | #endif | ||
201 | |||
202 | #ifndef percpu_add | ||
203 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | ||
204 | #endif | ||
205 | |||
206 | #ifndef percpu_sub | ||
207 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | ||
208 | #endif | ||
209 | |||
210 | #ifndef percpu_and | ||
211 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | ||
212 | #endif | ||
213 | |||
214 | #ifndef percpu_or | ||
215 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | ||
216 | #endif | ||
217 | |||
218 | #ifndef percpu_xor | ||
219 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | ||
220 | #endif | ||
221 | |||
222 | /* | ||
223 | * Branching function to split up a function into a set of functions that | 169 | * Branching function to split up a function into a set of functions that |
224 | * are called for different scalar sizes of the objects handled. | 170 | * are called for different scalar sizes of the objects handled. |
225 | */ | 171 | */ |
diff --git a/include/linux/topology.h b/include/linux/topology.h index e26db031303b..9dc427cdb6ff 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -239,7 +239,7 @@ static inline int cpu_to_node(int cpu) | |||
239 | #ifndef set_numa_node | 239 | #ifndef set_numa_node |
240 | static inline void set_numa_node(int node) | 240 | static inline void set_numa_node(int node) |
241 | { | 241 | { |
242 | percpu_write(numa_node, node); | 242 | this_cpu_write(numa_node, node); |
243 | } | 243 | } |
244 | #endif | 244 | #endif |
245 | 245 | ||
@@ -274,7 +274,7 @@ DECLARE_PER_CPU(int, _numa_mem_); | |||
274 | #ifndef set_numa_mem | 274 | #ifndef set_numa_mem |
275 | static inline void set_numa_mem(int node) | 275 | static inline void set_numa_mem(int node) |
276 | { | 276 | { |
277 | percpu_write(_numa_mem_, node); | 277 | this_cpu_write(_numa_mem_, node); |
278 | } | 278 | } |
279 | #endif | 279 | #endif |
280 | 280 | ||
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index 4d5057902839..ee2e5bc5a8c7 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c | |||
@@ -87,7 +87,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) | |||
87 | const struct xt_tee_tginfo *info = par->targinfo; | 87 | const struct xt_tee_tginfo *info = par->targinfo; |
88 | struct iphdr *iph; | 88 | struct iphdr *iph; |
89 | 89 | ||
90 | if (percpu_read(tee_active)) | 90 | if (__this_cpu_read(tee_active)) |
91 | return XT_CONTINUE; | 91 | return XT_CONTINUE; |
92 | /* | 92 | /* |
93 | * Copy the skb, and route the copy. Will later return %XT_CONTINUE for | 93 | * Copy the skb, and route the copy. Will later return %XT_CONTINUE for |
@@ -124,9 +124,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) | |||
124 | ip_send_check(iph); | 124 | ip_send_check(iph); |
125 | 125 | ||
126 | if (tee_tg_route4(skb, info)) { | 126 | if (tee_tg_route4(skb, info)) { |
127 | percpu_write(tee_active, true); | 127 | __this_cpu_write(tee_active, true); |
128 | ip_local_out(skb); | 128 | ip_local_out(skb); |
129 | percpu_write(tee_active, false); | 129 | __this_cpu_write(tee_active, false); |
130 | } else { | 130 | } else { |
131 | kfree_skb(skb); | 131 | kfree_skb(skb); |
132 | } | 132 | } |
@@ -168,7 +168,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
168 | { | 168 | { |
169 | const struct xt_tee_tginfo *info = par->targinfo; | 169 | const struct xt_tee_tginfo *info = par->targinfo; |
170 | 170 | ||
171 | if (percpu_read(tee_active)) | 171 | if (__this_cpu_read(tee_active)) |
172 | return XT_CONTINUE; | 172 | return XT_CONTINUE; |
173 | skb = pskb_copy(skb, GFP_ATOMIC); | 173 | skb = pskb_copy(skb, GFP_ATOMIC); |
174 | if (skb == NULL) | 174 | if (skb == NULL) |
@@ -186,9 +186,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
186 | --iph->hop_limit; | 186 | --iph->hop_limit; |
187 | } | 187 | } |
188 | if (tee_tg_route6(skb, info)) { | 188 | if (tee_tg_route6(skb, info)) { |
189 | percpu_write(tee_active, true); | 189 | __this_cpu_write(tee_active, true); |
190 | ip6_local_out(skb); | 190 | ip6_local_out(skb); |
191 | percpu_write(tee_active, false); | 191 | __this_cpu_write(tee_active, false); |
192 | } else { | 192 | } else { |
193 | kfree_skb(skb); | 193 | kfree_skb(skb); |
194 | } | 194 | } |
diff --git a/net/socket.c b/net/socket.c index 2a2898ce596e..6e0ccc09b313 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -479,7 +479,7 @@ static struct socket *sock_alloc(void) | |||
479 | inode->i_uid = current_fsuid(); | 479 | inode->i_uid = current_fsuid(); |
480 | inode->i_gid = current_fsgid(); | 480 | inode->i_gid = current_fsgid(); |
481 | 481 | ||
482 | percpu_add(sockets_in_use, 1); | 482 | this_cpu_add(sockets_in_use, 1); |
483 | return sock; | 483 | return sock; |
484 | } | 484 | } |
485 | 485 | ||
@@ -522,7 +522,7 @@ void sock_release(struct socket *sock) | |||
522 | if (rcu_dereference_protected(sock->wq, 1)->fasync_list) | 522 | if (rcu_dereference_protected(sock->wq, 1)->fasync_list) |
523 | printk(KERN_ERR "sock_release: fasync list not empty!\n"); | 523 | printk(KERN_ERR "sock_release: fasync list not empty!\n"); |
524 | 524 | ||
525 | percpu_sub(sockets_in_use, 1); | 525 | this_cpu_sub(sockets_in_use, 1); |
526 | if (!sock->file) { | 526 | if (!sock->file) { |
527 | iput(SOCK_INODE(sock)); | 527 | iput(SOCK_INODE(sock)); |
528 | return; | 528 | return; |