diff options
author | Alex Shi <alex.shi@intel.com> | 2012-05-14 17:15:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-05-14 17:15:32 -0400 |
commit | 641b695c2f11397bd307ea689d4d3f128360ce49 (patch) | |
tree | e7e4e1895295628c8a4dd2e662644bcfbfe67b14 | |
parent | c6ae41e7d469f00d9c92a2b2887c7235d121c009 (diff) |
percpu: remove percpu_xxx() functions
Remove percpu_xxx serial functions, all of them were replaced by
this_cpu_xxx or __this_cpu_xxx serial functions
Signed-off-by: Alex Shi <alex.shi@intel.com>
Acked-by: Christoph Lameter <cl@gentwo.org>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | arch/x86/include/asm/percpu.h | 16 | ||||
-rw-r--r-- | include/linux/percpu.h | 54 |
2 files changed, 6 insertions, 64 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 967ee3be5c0a..d9b8e3f7f42a 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -351,7 +351,7 @@ do { \ | |||
351 | }) | 351 | }) |
352 | 352 | ||
353 | /* | 353 | /* |
354 | * percpu_read() makes gcc load the percpu variable every time it is | 354 | * this_cpu_read() makes gcc load the percpu variable every time it is |
355 | * accessed while this_cpu_read_stable() allows the value to be cached. | 355 | * accessed while this_cpu_read_stable() allows the value to be cached. |
356 | * this_cpu_read_stable() is more efficient and can be used if its value | 356 | * this_cpu_read_stable() is more efficient and can be used if its value |
357 | * is guaranteed to be valid across cpus. The current users include | 357 | * is guaranteed to be valid across cpus. The current users include |
@@ -359,15 +359,7 @@ do { \ | |||
359 | * per-thread variables implemented as per-cpu variables and thus | 359 | * per-thread variables implemented as per-cpu variables and thus |
360 | * stable for the duration of the respective task. | 360 | * stable for the duration of the respective task. |
361 | */ | 361 | */ |
362 | #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) | ||
363 | #define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) | 362 | #define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) |
364 | #define percpu_write(var, val) percpu_to_op("mov", var, val) | ||
365 | #define percpu_add(var, val) percpu_add_op(var, val) | ||
366 | #define percpu_sub(var, val) percpu_add_op(var, -(val)) | ||
367 | #define percpu_and(var, val) percpu_to_op("and", var, val) | ||
368 | #define percpu_or(var, val) percpu_to_op("or", var, val) | ||
369 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) | ||
370 | #define percpu_inc(var) percpu_unary_op("inc", var) | ||
371 | 363 | ||
372 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 364 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
373 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 365 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
@@ -512,7 +504,11 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, | |||
512 | { | 504 | { |
513 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; | 505 | unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; |
514 | 506 | ||
515 | return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0; | 507 | #ifdef CONFIG_X86_64 |
508 | return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_8(*a)) != 0; | ||
509 | #else | ||
510 | return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_4(*a)) != 0; | ||
511 | #endif | ||
516 | } | 512 | } |
517 | 513 | ||
518 | static inline int x86_this_cpu_variable_test_bit(int nr, | 514 | static inline int x86_this_cpu_variable_test_bit(int nr, |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 21638ae14e07..2b9f82c037c9 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -166,60 +166,6 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
166 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) | 166 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Optional methods for optimized non-lvalue per-cpu variable access. | ||
170 | * | ||
171 | * @var can be a percpu variable or a field of it and its size should | ||
172 | * equal char, int or long. percpu_read() evaluates to a lvalue and | ||
173 | * all others to void. | ||
174 | * | ||
175 | * These operations are guaranteed to be atomic. | ||
176 | * The generic versions disable interrupts. Archs are | ||
177 | * encouraged to implement single-instruction alternatives which don't | ||
178 | * require protection. | ||
179 | */ | ||
180 | #ifndef percpu_read | ||
181 | # define percpu_read(var) \ | ||
182 | ({ \ | ||
183 | typeof(var) *pr_ptr__ = &(var); \ | ||
184 | typeof(var) pr_ret__; \ | ||
185 | pr_ret__ = get_cpu_var(*pr_ptr__); \ | ||
186 | put_cpu_var(*pr_ptr__); \ | ||
187 | pr_ret__; \ | ||
188 | }) | ||
189 | #endif | ||
190 | |||
191 | #define __percpu_generic_to_op(var, val, op) \ | ||
192 | do { \ | ||
193 | typeof(var) *pgto_ptr__ = &(var); \ | ||
194 | get_cpu_var(*pgto_ptr__) op val; \ | ||
195 | put_cpu_var(*pgto_ptr__); \ | ||
196 | } while (0) | ||
197 | |||
198 | #ifndef percpu_write | ||
199 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | ||
200 | #endif | ||
201 | |||
202 | #ifndef percpu_add | ||
203 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | ||
204 | #endif | ||
205 | |||
206 | #ifndef percpu_sub | ||
207 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | ||
208 | #endif | ||
209 | |||
210 | #ifndef percpu_and | ||
211 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | ||
212 | #endif | ||
213 | |||
214 | #ifndef percpu_or | ||
215 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | ||
216 | #endif | ||
217 | |||
218 | #ifndef percpu_xor | ||
219 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | ||
220 | #endif | ||
221 | |||
222 | /* | ||
223 | * Branching function to split up a function into a set of functions that | 169 | * Branching function to split up a function into a set of functions that |
224 | * are called for different scalar sizes of the objects handled. | 170 | * are called for different scalar sizes of the objects handled. |
225 | */ | 171 | */ |