diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-10-29 09:34:15 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:15 -0400 |
commit | dd17c8f72993f9461e9c19250e3f155d6d99df22 (patch) | |
tree | c33eedf0cf2862e9feeb796e94d49a2ccdce0149 /arch/x86 | |
parent | 390dfd95c5df1ab3921dd388d11b2aee332c3f2c (diff) |
percpu: remove per_cpu__ prefix.
Now that the return from alloc_percpu is compatible with the address
of per-cpu vars, it makes sense to hand around the address of per-cpu
variables. To make this sane, we remove the per_cpu__ prefix we used
created to stop people accidentally using these vars directly.
Now we have sparse, we can use that (next patch).
tj: * Updated to convert stuff which were missed by or added after the
original patch.
* Kill per_cpu_var() macro.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/percpu.h | 37 | ||||
-rw-r--r-- | arch/x86/include/asm/system.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/apic/nmi.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/head_32.S | 6 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 4 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_32.S | 4 |
6 files changed, 31 insertions, 34 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0c44196b78ac..4c170ccc72ed 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -25,19 +25,18 @@ | |||
25 | */ | 25 | */ |
26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
27 | #define PER_CPU(var, reg) \ | 27 | #define PER_CPU(var, reg) \ |
28 | __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ | 28 | __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
29 | lea per_cpu__##var(reg), reg | 29 | lea var(reg), reg |
30 | #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var | 30 | #define PER_CPU_VAR(var) %__percpu_seg:var |
31 | #else /* ! SMP */ | 31 | #else /* ! SMP */ |
32 | #define PER_CPU(var, reg) \ | 32 | #define PER_CPU(var, reg) __percpu_mov_op $var, reg |
33 | __percpu_mov_op $per_cpu__##var, reg | 33 | #define PER_CPU_VAR(var) var |
34 | #define PER_CPU_VAR(var) per_cpu__##var | ||
35 | #endif /* SMP */ | 34 | #endif /* SMP */ |
36 | 35 | ||
37 | #ifdef CONFIG_X86_64_SMP | 36 | #ifdef CONFIG_X86_64_SMP |
38 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | 37 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var |
39 | #else | 38 | #else |
40 | #define INIT_PER_CPU_VAR(var) per_cpu__##var | 39 | #define INIT_PER_CPU_VAR(var) var |
41 | #endif | 40 | #endif |
42 | 41 | ||
43 | #else /* ...!ASSEMBLY */ | 42 | #else /* ...!ASSEMBLY */ |
@@ -60,12 +59,12 @@ | |||
60 | * There also must be an entry in vmlinux_64.lds.S | 59 | * There also must be an entry in vmlinux_64.lds.S |
61 | */ | 60 | */ |
62 | #define DECLARE_INIT_PER_CPU(var) \ | 61 | #define DECLARE_INIT_PER_CPU(var) \ |
63 | extern typeof(per_cpu_var(var)) init_per_cpu_var(var) | 62 | extern typeof(var) init_per_cpu_var(var) |
64 | 63 | ||
65 | #ifdef CONFIG_X86_64_SMP | 64 | #ifdef CONFIG_X86_64_SMP |
66 | #define init_per_cpu_var(var) init_per_cpu__##var | 65 | #define init_per_cpu_var(var) init_per_cpu__##var |
67 | #else | 66 | #else |
68 | #define init_per_cpu_var(var) per_cpu_var(var) | 67 | #define init_per_cpu_var(var) var |
69 | #endif | 68 | #endif |
70 | 69 | ||
71 | /* For arch-specific code, we can use direct single-insn ops (they | 70 | /* For arch-specific code, we can use direct single-insn ops (they |
@@ -142,16 +141,14 @@ do { \ | |||
142 | * per-thread variables implemented as per-cpu variables and thus | 141 | * per-thread variables implemented as per-cpu variables and thus |
143 | * stable for the duration of the respective task. | 142 | * stable for the duration of the respective task. |
144 | */ | 143 | */ |
145 | #define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ | 144 | #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) |
146 | "m" (per_cpu__##var)) | 145 | #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) |
147 | #define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ | 146 | #define percpu_write(var, val) percpu_to_op("mov", var, val) |
148 | "p" (&per_cpu__##var)) | 147 | #define percpu_add(var, val) percpu_to_op("add", var, val) |
149 | #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) | 148 | #define percpu_sub(var, val) percpu_to_op("sub", var, val) |
150 | #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) | 149 | #define percpu_and(var, val) percpu_to_op("and", var, val) |
151 | #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) | 150 | #define percpu_or(var, val) percpu_to_op("or", var, val) |
152 | #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) | 151 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) |
153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) | ||
154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) | ||
155 | 152 | ||
156 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 153 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
157 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 154 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
@@ -236,7 +233,7 @@ do { \ | |||
236 | ({ \ | 233 | ({ \ |
237 | int old__; \ | 234 | int old__; \ |
238 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ | 235 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
239 | : "=r" (old__), "+m" (per_cpu__##var) \ | 236 | : "=r" (old__), "+m" (var) \ |
240 | : "dIr" (bit)); \ | 237 | : "dIr" (bit)); \ |
241 | old__; \ | 238 | old__; \ |
242 | }) | 239 | }) |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index f08f97374892..de10c19d9558 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ | 31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | 32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" |
33 | #define __switch_canary_oparam \ | 33 | #define __switch_canary_oparam \ |
34 | , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) | 34 | , [stack_canary] "=m" (stack_canary.canary) |
35 | #define __switch_canary_iparam \ | 35 | #define __switch_canary_iparam \ |
36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
37 | #else /* CC_STACKPROTECTOR */ | 37 | #else /* CC_STACKPROTECTOR */ |
@@ -113,7 +113,7 @@ do { \ | |||
113 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | 113 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ |
114 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" | 114 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" |
115 | #define __switch_canary_oparam \ | 115 | #define __switch_canary_oparam \ |
116 | , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | 116 | , [gs_canary] "=m" (irq_stack_union.stack_canary) |
117 | #define __switch_canary_iparam \ | 117 | #define __switch_canary_iparam \ |
118 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 118 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
119 | #else /* CC_STACKPROTECTOR */ | 119 | #else /* CC_STACKPROTECTOR */ |
@@ -134,7 +134,7 @@ do { \ | |||
134 | __switch_canary \ | 134 | __switch_canary \ |
135 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | 135 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
136 | "movq %%rax,%%rdi\n\t" \ | 136 | "movq %%rax,%%rdi\n\t" \ |
137 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ | 137 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
138 | "jnz ret_from_fork\n\t" \ | 138 | "jnz ret_from_fork\n\t" \ |
139 | RESTORE_CONTEXT \ | 139 | RESTORE_CONTEXT \ |
140 | : "=a" (last) \ | 140 | : "=a" (last) \ |
@@ -144,7 +144,7 @@ do { \ | |||
144 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | 144 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ |
145 | [_tif_fork] "i" (_TIF_FORK), \ | 145 | [_tif_fork] "i" (_TIF_FORK), \ |
146 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | 146 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
147 | [current_task] "m" (per_cpu_var(current_task)) \ | 147 | [current_task] "m" (current_task) \ |
148 | __switch_canary_iparam \ | 148 | __switch_canary_iparam \ |
149 | : "memory", "cc" __EXTRA_CLOBBER) | 149 | : "memory", "cc" __EXTRA_CLOBBER) |
150 | #endif | 150 | #endif |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index e631cc4416f7..45404379d173 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -437,8 +437,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
437 | * Ayiee, looks like this CPU is stuck ... | 437 | * Ayiee, looks like this CPU is stuck ... |
438 | * wait a few IRQs (5 seconds) before doing the oops ... | 438 | * wait a few IRQs (5 seconds) before doing the oops ... |
439 | */ | 439 | */ |
440 | __this_cpu_inc(per_cpu_var(alert_counter)); | 440 | __this_cpu_inc(alert_counter); |
441 | if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) | 441 | if (__this_cpu_read(alert_counter) == 5 * nmi_hz) |
442 | /* | 442 | /* |
443 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 443 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
444 | */ | 444 | */ |
@@ -446,7 +446,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
446 | regs, panic_on_timeout); | 446 | regs, panic_on_timeout); |
447 | } else { | 447 | } else { |
448 | __get_cpu_var(last_irq_sum) = sum; | 448 | __get_cpu_var(last_irq_sum) = sum; |
449 | __this_cpu_write(per_cpu_var(alert_counter), 0); | 449 | __this_cpu_write(alert_counter, 0); |
450 | } | 450 | } |
451 | 451 | ||
452 | /* see if the nmi watchdog went off */ | 452 | /* see if the nmi watchdog went off */ |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 050c278481b1..fd39eaf83b84 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -438,8 +438,8 @@ is386: movl $2,%ecx # set MP | |||
438 | */ | 438 | */ |
439 | cmpb $0,ready | 439 | cmpb $0,ready |
440 | jne 1f | 440 | jne 1f |
441 | movl $per_cpu__gdt_page,%eax | 441 | movl $gdt_page,%eax |
442 | movl $per_cpu__stack_canary,%ecx | 442 | movl $stack_canary,%ecx |
443 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) | 443 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) |
444 | shrl $16, %ecx | 444 | shrl $16, %ecx |
445 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) | 445 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) |
@@ -702,7 +702,7 @@ idt_descr: | |||
702 | .word 0 # 32 bit align gdt_desc.address | 702 | .word 0 # 32 bit align gdt_desc.address |
703 | ENTRY(early_gdt_descr) | 703 | ENTRY(early_gdt_descr) |
704 | .word GDT_ENTRIES*8-1 | 704 | .word GDT_ENTRIES*8-1 |
705 | .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ | 705 | .long gdt_page /* Overwritten for secondary CPUs */ |
706 | 706 | ||
707 | /* | 707 | /* |
708 | * The boot_gdt must mirror the equivalent in setup.S and is | 708 | * The boot_gdt must mirror the equivalent in setup.S and is |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 92929fb3f9fa..ecb92717c412 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -312,7 +312,7 @@ SECTIONS | |||
312 | * Per-cpu symbols which need to be offset from __per_cpu_load | 312 | * Per-cpu symbols which need to be offset from __per_cpu_load |
313 | * for the boot processor. | 313 | * for the boot processor. |
314 | */ | 314 | */ |
315 | #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load | 315 | #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load |
316 | INIT_PER_CPU(gdt_page); | 316 | INIT_PER_CPU(gdt_page); |
317 | INIT_PER_CPU(irq_stack_union); | 317 | INIT_PER_CPU(irq_stack_union); |
318 | 318 | ||
@@ -323,7 +323,7 @@ INIT_PER_CPU(irq_stack_union); | |||
323 | "kernel image bigger than KERNEL_IMAGE_SIZE"); | 323 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
324 | 324 | ||
325 | #ifdef CONFIG_SMP | 325 | #ifdef CONFIG_SMP |
326 | . = ASSERT((per_cpu__irq_stack_union == 0), | 326 | . = ASSERT((irq_stack_union == 0), |
327 | "irq_stack_union is not at start of per-cpu area"); | 327 | "irq_stack_union is not at start of per-cpu area"); |
328 | #endif | 328 | #endif |
329 | 329 | ||
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 88e15deb8b82..22a2093b5862 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S | |||
@@ -90,9 +90,9 @@ ENTRY(xen_iret) | |||
90 | GET_THREAD_INFO(%eax) | 90 | GET_THREAD_INFO(%eax) |
91 | movl TI_cpu(%eax), %eax | 91 | movl TI_cpu(%eax), %eax |
92 | movl __per_cpu_offset(,%eax,4), %eax | 92 | movl __per_cpu_offset(,%eax,4), %eax |
93 | mov per_cpu__xen_vcpu(%eax), %eax | 93 | mov xen_vcpu(%eax), %eax |
94 | #else | 94 | #else |
95 | movl per_cpu__xen_vcpu, %eax | 95 | movl xen_vcpu, %eax |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | /* check IF state we're restoring */ | 98 | /* check IF state we're restoring */ |