diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-06 10:44:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-06 10:45:01 -0500 |
commit | f0ef03985130287c6c84ebe69416cf790e6cc00e (patch) | |
tree | 3ecb04cc4d82e5fc3ae5f1747e6da172ae8cbcb7 /arch/x86/include/asm/percpu.h | |
parent | 16097439703bcd38e9fe5608c12add6dacb825ea (diff) | |
parent | 31bbed527e7039203920c51c9fb48c27aed0820c (diff) |
Merge branch 'x86/core' into tracing/textedit
Conflicts:
arch/x86/Kconfig
block/blktrace.c
kernel/irq/handle.c
Semantic conflict:
kernel/trace/blktrace.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r-- | arch/x86/include/asm/percpu.h | 177 |
1 files changed, 86 insertions, 91 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index ece72053ba63..8f1d2fbec1d4 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -2,53 +2,12 @@ | |||
2 | #define _ASM_X86_PERCPU_H | 2 | #define _ASM_X86_PERCPU_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_64 | 4 | #ifdef CONFIG_X86_64 |
5 | #include <linux/compiler.h> | 5 | #define __percpu_seg gs |
6 | 6 | #define __percpu_mov_op movq | |
7 | /* Same as asm-generic/percpu.h, except that we store the per cpu offset | 7 | #else |
8 | in the PDA. Longer term the PDA and every per cpu variable | 8 | #define __percpu_seg fs |
9 | should be just put into a single section and referenced directly | 9 | #define __percpu_mov_op movl |
10 | from %gs */ | ||
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | #include <asm/pda.h> | ||
14 | |||
15 | #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) | ||
16 | #define __my_cpu_offset read_pda(data_offset) | ||
17 | |||
18 | #define per_cpu_offset(x) (__per_cpu_offset(x)) | ||
19 | |||
20 | #endif | 10 | #endif |
21 | #include <asm-generic/percpu.h> | ||
22 | |||
23 | DECLARE_PER_CPU(struct x8664_pda, pda); | ||
24 | |||
25 | /* | ||
26 | * These are supposed to be implemented as a single instruction which | ||
27 | * operates on the per-cpu data base segment. x86-64 doesn't have | ||
28 | * that yet, so this is a fairly inefficient workaround for the | ||
29 | * meantime. The single instruction is atomic with respect to | ||
30 | * preemption and interrupts, so we need to explicitly disable | ||
31 | * interrupts here to achieve the same effect. However, because it | ||
32 | * can be used from within interrupt-disable/enable, we can't actually | ||
33 | * disable interrupts; disabling preemption is enough. | ||
34 | */ | ||
35 | #define x86_read_percpu(var) \ | ||
36 | ({ \ | ||
37 | typeof(per_cpu_var(var)) __tmp; \ | ||
38 | preempt_disable(); \ | ||
39 | __tmp = __get_cpu_var(var); \ | ||
40 | preempt_enable(); \ | ||
41 | __tmp; \ | ||
42 | }) | ||
43 | |||
44 | #define x86_write_percpu(var, val) \ | ||
45 | do { \ | ||
46 | preempt_disable(); \ | ||
47 | __get_cpu_var(var) = (val); \ | ||
48 | preempt_enable(); \ | ||
49 | } while(0) | ||
50 | |||
51 | #else /* CONFIG_X86_64 */ | ||
52 | 11 | ||
53 | #ifdef __ASSEMBLY__ | 12 | #ifdef __ASSEMBLY__ |
54 | 13 | ||
@@ -65,47 +24,56 @@ DECLARE_PER_CPU(struct x8664_pda, pda); | |||
65 | * PER_CPU(cpu_gdt_descr, %ebx) | 24 | * PER_CPU(cpu_gdt_descr, %ebx) |
66 | */ | 25 | */ |
67 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
68 | #define PER_CPU(var, reg) \ | 27 | #define PER_CPU(var, reg) \ |
69 | movl %fs:per_cpu__##this_cpu_off, reg; \ | 28 | __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ |
70 | lea per_cpu__##var(reg), reg | 29 | lea per_cpu__##var(reg), reg |
71 | #define PER_CPU_VAR(var) %fs:per_cpu__##var | 30 | #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var |
72 | #else /* ! SMP */ | 31 | #else /* ! SMP */ |
73 | #define PER_CPU(var, reg) \ | 32 | #define PER_CPU(var, reg) \ |
74 | movl $per_cpu__##var, reg | 33 | __percpu_mov_op $per_cpu__##var, reg |
75 | #define PER_CPU_VAR(var) per_cpu__##var | 34 | #define PER_CPU_VAR(var) per_cpu__##var |
76 | #endif /* SMP */ | 35 | #endif /* SMP */ |
77 | 36 | ||
78 | #else /* ...!ASSEMBLY */ | 37 | #ifdef CONFIG_X86_64_SMP |
79 | 38 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var | |
80 | /* | 39 | #else |
81 | * PER_CPU finds an address of a per-cpu variable. | 40 | #define INIT_PER_CPU_VAR(var) per_cpu__##var |
82 | * | 41 | #endif |
83 | * Args: | ||
84 | * var - variable name | ||
85 | * cpu - 32bit register containing the current CPU number | ||
86 | * | ||
87 | * The resulting address is stored in the "cpu" argument. | ||
88 | * | ||
89 | * Example: | ||
90 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
91 | */ | ||
92 | #ifdef CONFIG_SMP | ||
93 | |||
94 | #define __my_cpu_offset x86_read_percpu(this_cpu_off) | ||
95 | 42 | ||
96 | /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ | 43 | #else /* ...!ASSEMBLY */ |
97 | #define __percpu_seg "%%fs:" | ||
98 | 44 | ||
99 | #else /* !SMP */ | 45 | #include <linux/stringify.h> |
46 | #include <asm/sections.h> | ||
100 | 47 | ||
101 | #define __percpu_seg "" | 48 | #define __addr_to_pcpu_ptr(addr) \ |
49 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | ||
50 | + (unsigned long)__per_cpu_start) | ||
51 | #define __pcpu_ptr_to_addr(ptr) \ | ||
52 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | ||
53 | - (unsigned long)__per_cpu_start) | ||
102 | 54 | ||
103 | #endif /* SMP */ | 55 | #ifdef CONFIG_SMP |
56 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x | ||
57 | #define __my_cpu_offset percpu_read(this_cpu_off) | ||
58 | #else | ||
59 | #define __percpu_arg(x) "%" #x | ||
60 | #endif | ||
104 | 61 | ||
105 | #include <asm-generic/percpu.h> | 62 | /* |
63 | * Initialized pointers to per-cpu variables needed for the boot | ||
64 | * processor need to use these macros to get the proper address | ||
65 | * offset from __per_cpu_load on SMP. | ||
66 | * | ||
67 | * There also must be an entry in vmlinux_64.lds.S | ||
68 | */ | ||
69 | #define DECLARE_INIT_PER_CPU(var) \ | ||
70 | extern typeof(per_cpu_var(var)) init_per_cpu_var(var) | ||
106 | 71 | ||
107 | /* We can use this directly for local CPU (faster). */ | 72 | #ifdef CONFIG_X86_64_SMP |
108 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | 73 | #define init_per_cpu_var(var) init_per_cpu__##var |
74 | #else | ||
75 | #define init_per_cpu_var(var) per_cpu_var(var) | ||
76 | #endif | ||
109 | 77 | ||
110 | /* For arch-specific code, we can use direct single-insn ops (they | 78 | /* For arch-specific code, we can use direct single-insn ops (they |
111 | * don't give an lvalue though). */ | 79 | * don't give an lvalue though). */ |
@@ -120,20 +88,25 @@ do { \ | |||
120 | } \ | 88 | } \ |
121 | switch (sizeof(var)) { \ | 89 | switch (sizeof(var)) { \ |
122 | case 1: \ | 90 | case 1: \ |
123 | asm(op "b %1,"__percpu_seg"%0" \ | 91 | asm(op "b %1,"__percpu_arg(0) \ |
124 | : "+m" (var) \ | 92 | : "+m" (var) \ |
125 | : "ri" ((T__)val)); \ | 93 | : "ri" ((T__)val)); \ |
126 | break; \ | 94 | break; \ |
127 | case 2: \ | 95 | case 2: \ |
128 | asm(op "w %1,"__percpu_seg"%0" \ | 96 | asm(op "w %1,"__percpu_arg(0) \ |
129 | : "+m" (var) \ | 97 | : "+m" (var) \ |
130 | : "ri" ((T__)val)); \ | 98 | : "ri" ((T__)val)); \ |
131 | break; \ | 99 | break; \ |
132 | case 4: \ | 100 | case 4: \ |
133 | asm(op "l %1,"__percpu_seg"%0" \ | 101 | asm(op "l %1,"__percpu_arg(0) \ |
134 | : "+m" (var) \ | 102 | : "+m" (var) \ |
135 | : "ri" ((T__)val)); \ | 103 | : "ri" ((T__)val)); \ |
136 | break; \ | 104 | break; \ |
105 | case 8: \ | ||
106 | asm(op "q %1,"__percpu_arg(0) \ | ||
107 | : "+m" (var) \ | ||
108 | : "re" ((T__)val)); \ | ||
109 | break; \ | ||
137 | default: __bad_percpu_size(); \ | 110 | default: __bad_percpu_size(); \ |
138 | } \ | 111 | } \ |
139 | } while (0) | 112 | } while (0) |
@@ -143,17 +116,22 @@ do { \ | |||
143 | typeof(var) ret__; \ | 116 | typeof(var) ret__; \ |
144 | switch (sizeof(var)) { \ | 117 | switch (sizeof(var)) { \ |
145 | case 1: \ | 118 | case 1: \ |
146 | asm(op "b "__percpu_seg"%1,%0" \ | 119 | asm(op "b "__percpu_arg(1)",%0" \ |
147 | : "=r" (ret__) \ | 120 | : "=r" (ret__) \ |
148 | : "m" (var)); \ | 121 | : "m" (var)); \ |
149 | break; \ | 122 | break; \ |
150 | case 2: \ | 123 | case 2: \ |
151 | asm(op "w "__percpu_seg"%1,%0" \ | 124 | asm(op "w "__percpu_arg(1)",%0" \ |
152 | : "=r" (ret__) \ | 125 | : "=r" (ret__) \ |
153 | : "m" (var)); \ | 126 | : "m" (var)); \ |
154 | break; \ | 127 | break; \ |
155 | case 4: \ | 128 | case 4: \ |
156 | asm(op "l "__percpu_seg"%1,%0" \ | 129 | asm(op "l "__percpu_arg(1)",%0" \ |
130 | : "=r" (ret__) \ | ||
131 | : "m" (var)); \ | ||
132 | break; \ | ||
133 | case 8: \ | ||
134 | asm(op "q "__percpu_arg(1)",%0" \ | ||
157 | : "=r" (ret__) \ | 135 | : "=r" (ret__) \ |
158 | : "m" (var)); \ | 136 | : "m" (var)); \ |
159 | break; \ | 137 | break; \ |
@@ -162,13 +140,30 @@ do { \ | |||
162 | ret__; \ | 140 | ret__; \ |
163 | }) | 141 | }) |
164 | 142 | ||
165 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) | 143 | #define percpu_read(var) percpu_from_op("mov", per_cpu__##var) |
166 | #define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) | 144 | #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) |
167 | #define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) | 145 | #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) |
168 | #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) | 146 | #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) |
169 | #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) | 147 | #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) |
148 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) | ||
149 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) | ||
150 | |||
151 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | ||
152 | #define x86_test_and_clear_bit_percpu(bit, var) \ | ||
153 | ({ \ | ||
154 | int old__; \ | ||
155 | asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ | ||
156 | : "=r" (old__), "+m" (per_cpu__##var) \ | ||
157 | : "dIr" (bit)); \ | ||
158 | old__; \ | ||
159 | }) | ||
160 | |||
161 | #include <asm-generic/percpu.h> | ||
162 | |||
163 | /* We can use this directly for local CPU (faster). */ | ||
164 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | ||
165 | |||
170 | #endif /* !__ASSEMBLY__ */ | 166 | #endif /* !__ASSEMBLY__ */ |
171 | #endif /* !CONFIG_X86_64 */ | ||
172 | 167 | ||
173 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
174 | 169 | ||
@@ -195,9 +190,9 @@ do { \ | |||
195 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) | 190 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) |
196 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) | 191 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) |
197 | #define early_per_cpu(_name, _cpu) \ | 192 | #define early_per_cpu(_name, _cpu) \ |
198 | (early_per_cpu_ptr(_name) ? \ | 193 | *(early_per_cpu_ptr(_name) ? \ |
199 | early_per_cpu_ptr(_name)[_cpu] : \ | 194 | &early_per_cpu_ptr(_name)[_cpu] : \ |
200 | per_cpu(_name, _cpu)) | 195 | &per_cpu(_name, _cpu)) |
201 | 196 | ||
202 | #else /* !CONFIG_SMP */ | 197 | #else /* !CONFIG_SMP */ |
203 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | 198 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |