diff options
author | Christoph Lameter <cl@linux.com> | 2011-12-22 12:58:51 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-12-22 13:40:20 -0500 |
commit | 933393f58fef9963eac61db8093689544e29a600 (patch) | |
tree | 719f8b231499aa4ea023bc1a06db4582df5f0965 /arch/s390/include | |
parent | ecefc36b41ac0fe92d76273a23faf27b2da13411 (diff) |
percpu: Remove irqsafe_cpu_xxx variants
We simply say that regular this_cpu use must be safe regardless of
preemption and interrupt state. That has no material change for x86
and s390 implementations of this_cpu operations. However, arches that
do not provide their own implementation for this_cpu operations will
now get code generated that disables interrupts instead of preemption.
-tj: This is part of on-going percpu API cleanup. For detailed
discussion of the subject, please refer to the following thread.
http://thread.gmane.org/gmane.linux.kernel/1222078
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
LKML-Reference: <alpine.DEB.2.00.1112221154380.11787@router.home>
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/percpu.h | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 5325c89a5843..0fbd1899c7b0 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define ARCH_NEEDS_WEAK_PER_CPU | 19 | #define ARCH_NEEDS_WEAK_PER_CPU |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define arch_irqsafe_cpu_to_op(pcp, val, op) \ | 22 | #define arch_this_cpu_to_op(pcp, val, op) \ |
23 | do { \ | 23 | do { \ |
24 | typedef typeof(pcp) pcp_op_T__; \ | 24 | typedef typeof(pcp) pcp_op_T__; \ |
25 | pcp_op_T__ old__, new__, prev__; \ | 25 | pcp_op_T__ old__, new__, prev__; \ |
@@ -41,27 +41,27 @@ do { \ | |||
41 | preempt_enable(); \ | 41 | preempt_enable(); \ |
42 | } while (0) | 42 | } while (0) |
43 | 43 | ||
44 | #define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 44 | #define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
45 | #define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 45 | #define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
46 | #define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 46 | #define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
47 | #define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | 47 | #define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) |
48 | 48 | ||
49 | #define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 49 | #define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
50 | #define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 50 | #define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
51 | #define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 51 | #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
52 | #define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | 52 | #define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) |
53 | 53 | ||
54 | #define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 54 | #define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
55 | #define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 55 | #define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
56 | #define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 56 | #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
57 | #define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | 57 | #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |) |
58 | 58 | ||
59 | #define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 59 | #define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
60 | #define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 60 | #define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
61 | #define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 61 | #define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
62 | #define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | 62 | #define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) |
63 | 63 | ||
64 | #define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | 64 | #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ |
65 | ({ \ | 65 | ({ \ |
66 | typedef typeof(pcp) pcp_op_T__; \ | 66 | typedef typeof(pcp) pcp_op_T__; \ |
67 | pcp_op_T__ ret__; \ | 67 | pcp_op_T__ ret__; \ |
@@ -79,10 +79,10 @@ do { \ | |||
79 | ret__; \ | 79 | ret__; \ |
80 | }) | 80 | }) |
81 | 81 | ||
82 | #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 82 | #define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
83 | #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 83 | #define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
84 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 84 | #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
85 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | 85 | #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) |
86 | 86 | ||
87 | #include <asm-generic/percpu.h> | 87 | #include <asm-generic/percpu.h> |
88 | 88 | ||