aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/percpu.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-12-22 12:58:51 -0500
committerTejun Heo <tj@kernel.org>2011-12-22 13:40:20 -0500
commit933393f58fef9963eac61db8093689544e29a600 (patch)
tree719f8b231499aa4ea023bc1a06db4582df5f0965 /arch/x86/include/asm/percpu.h
parentecefc36b41ac0fe92d76273a23faf27b2da13411 (diff)
percpu: Remove irqsafe_cpu_xxx variants
We simply say that regular this_cpu use must be safe regardless of preemption and interrupt state. That has no material change for x86 and s390 implementations of this_cpu operations. However, arches that do not provide their own implementation for this_cpu operations will now get code generated that disables interrupts instead of preemption. -tj: This is part of on-going percpu API cleanup. For detailed discussion of the subject, please refer to the following thread. http://thread.gmane.org/gmane.linux.kernel/1222078 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org> LKML-Reference: <alpine.DEB.2.00.1112221154380.11787@router.home>
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r--arch/x86/include/asm/percpu.h28
1 files changed, 0 insertions, 28 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 3470c9d0ebba..562ccb5323de 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -414,22 +414,6 @@ do { \
414#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 414#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
415#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 415#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
416 416
417#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
418#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
419#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
420#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
421#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
422#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
423#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
424#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
425#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
426#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
427#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
428#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
429#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
430#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
431#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
432
433#ifndef CONFIG_M386 417#ifndef CONFIG_M386
434#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 418#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
435#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) 419#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
@@ -445,9 +429,6 @@ do { \
445#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 429#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
446#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 430#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
447 431
448#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
451#endif /* !CONFIG_M386 */ 432#endif /* !CONFIG_M386 */
452 433
453#ifdef CONFIG_X86_CMPXCHG64 434#ifdef CONFIG_X86_CMPXCHG64
@@ -467,7 +448,6 @@ do { \
467 448
468#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) 449#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
469#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) 450#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
470#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
471#endif /* CONFIG_X86_CMPXCHG64 */ 451#endif /* CONFIG_X86_CMPXCHG64 */
472 452
473/* 453/*
@@ -495,13 +475,6 @@ do { \
495#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 475#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
496#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 476#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
497 477
498#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
499#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
500#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
501#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
502#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
503#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
504
505/* 478/*
506 * Pretty complex macro to generate cmpxchg16 instruction. The instruction 479 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
507 * is not supported on early AMD64 processors so we must be able to emulate 480 * is not supported on early AMD64 processors so we must be able to emulate
@@ -532,7 +505,6 @@ do { \
532 505
533#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) 506#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
534#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) 507#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
535#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
536 508
537#endif 509#endif
538 510