aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-10-29 09:34:15 -0400
committerTejun Heo <tj@kernel.org>2009-10-29 09:34:15 -0400
commitdd17c8f72993f9461e9c19250e3f155d6d99df22 (patch)
treec33eedf0cf2862e9feeb796e94d49a2ccdce0149 /arch/x86/include
parent390dfd95c5df1ab3921dd388d11b2aee332c3f2c (diff)
percpu: remove per_cpu__ prefix.
Now that the return from alloc_percpu is compatible with the address of per-cpu vars, it makes sense to hand around the address of per-cpu variables. To make this sane, we remove the per_cpu__ prefix we used created to stop people accidentally using these vars directly. Now we have sparse, we can use that (next patch). tj: * Updated to convert stuff which were missed by or added after the original patch. * Kill per_cpu_var() macro. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/percpu.h37
-rw-r--r--arch/x86/include/asm/system.h8
2 files changed, 21 insertions, 24 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0c44196b78ac..4c170ccc72ed 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -25,19 +25,18 @@
25 */ 25 */
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27#define PER_CPU(var, reg) \ 27#define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ 28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea per_cpu__##var(reg), reg 29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var 30#define PER_CPU_VAR(var) %__percpu_seg:var
31#else /* ! SMP */ 31#else /* ! SMP */
32#define PER_CPU(var, reg) \ 32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33 __percpu_mov_op $per_cpu__##var, reg 33#define PER_CPU_VAR(var) var
34#define PER_CPU_VAR(var) per_cpu__##var
35#endif /* SMP */ 34#endif /* SMP */
36 35
37#ifdef CONFIG_X86_64_SMP 36#ifdef CONFIG_X86_64_SMP
38#define INIT_PER_CPU_VAR(var) init_per_cpu__##var 37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39#else 38#else
40#define INIT_PER_CPU_VAR(var) per_cpu__##var 39#define INIT_PER_CPU_VAR(var) var
41#endif 40#endif
42 41
43#else /* ...!ASSEMBLY */ 42#else /* ...!ASSEMBLY */
@@ -60,12 +59,12 @@
60 * There also must be an entry in vmlinux_64.lds.S 59 * There also must be an entry in vmlinux_64.lds.S
61 */ 60 */
62#define DECLARE_INIT_PER_CPU(var) \ 61#define DECLARE_INIT_PER_CPU(var) \
63 extern typeof(per_cpu_var(var)) init_per_cpu_var(var) 62 extern typeof(var) init_per_cpu_var(var)
64 63
65#ifdef CONFIG_X86_64_SMP 64#ifdef CONFIG_X86_64_SMP
66#define init_per_cpu_var(var) init_per_cpu__##var 65#define init_per_cpu_var(var) init_per_cpu__##var
67#else 66#else
68#define init_per_cpu_var(var) per_cpu_var(var) 67#define init_per_cpu_var(var) var
69#endif 68#endif
70 69
71/* For arch-specific code, we can use direct single-insn ops (they 70/* For arch-specific code, we can use direct single-insn ops (they
@@ -142,16 +141,14 @@ do { \
142 * per-thread variables implemented as per-cpu variables and thus 141 * per-thread variables implemented as per-cpu variables and thus
143 * stable for the duration of the respective task. 142 * stable for the duration of the respective task.
144 */ 143 */
145#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ 144#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
146 "m" (per_cpu__##var)) 145#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
147#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ 146#define percpu_write(var, val) percpu_to_op("mov", var, val)
148 "p" (&per_cpu__##var)) 147#define percpu_add(var, val) percpu_to_op("add", var, val)
149#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 148#define percpu_sub(var, val) percpu_to_op("sub", var, val)
150#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 149#define percpu_and(var, val) percpu_to_op("and", var, val)
151#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 150#define percpu_or(var, val) percpu_to_op("or", var, val)
152#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) 151#define percpu_xor(var, val) percpu_to_op("xor", var, val)
153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
155 152
156#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 153#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
157#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 154#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -236,7 +233,7 @@ do { \
236({ \ 233({ \
237 int old__; \ 234 int old__; \
238 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ 235 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
239 : "=r" (old__), "+m" (per_cpu__##var) \ 236 : "=r" (old__), "+m" (var) \
240 : "dIr" (bit)); \ 237 : "dIr" (bit)); \
241 old__; \ 238 old__; \
242}) 239})
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index f08f97374892..de10c19d9558 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
31 "movl %P[task_canary](%[next]), %%ebx\n\t" \ 31 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33#define __switch_canary_oparam \ 33#define __switch_canary_oparam \
34 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) 34 , [stack_canary] "=m" (stack_canary.canary)
35#define __switch_canary_iparam \ 35#define __switch_canary_iparam \
36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */ 37#else /* CC_STACKPROTECTOR */
@@ -113,7 +113,7 @@ do { \
113 "movq %P[task_canary](%%rsi),%%r8\n\t" \ 113 "movq %P[task_canary](%%rsi),%%r8\n\t" \
114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t" 114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
115#define __switch_canary_oparam \ 115#define __switch_canary_oparam \
116 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) 116 , [gs_canary] "=m" (irq_stack_union.stack_canary)
117#define __switch_canary_iparam \ 117#define __switch_canary_iparam \
118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
119#else /* CC_STACKPROTECTOR */ 119#else /* CC_STACKPROTECTOR */
@@ -134,7 +134,7 @@ do { \
134 __switch_canary \ 134 __switch_canary \
135 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 135 "movq %P[thread_info](%%rsi),%%r8\n\t" \
136 "movq %%rax,%%rdi\n\t" \ 136 "movq %%rax,%%rdi\n\t" \
137 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 137 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
138 "jnz ret_from_fork\n\t" \ 138 "jnz ret_from_fork\n\t" \
139 RESTORE_CONTEXT \ 139 RESTORE_CONTEXT \
140 : "=a" (last) \ 140 : "=a" (last) \
@@ -144,7 +144,7 @@ do { \
144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
145 [_tif_fork] "i" (_TIF_FORK), \ 145 [_tif_fork] "i" (_TIF_FORK), \
146 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 146 [thread_info] "i" (offsetof(struct task_struct, stack)), \
147 [current_task] "m" (per_cpu_var(current_task)) \ 147 [current_task] "m" (current_task) \
148 __switch_canary_iparam \ 148 __switch_canary_iparam \
149 : "memory", "cc" __EXTRA_CLOBBER) 149 : "memory", "cc" __EXTRA_CLOBBER)
150#endif 150#endif