diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-05-23 04:24:32 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-05-23 04:24:29 -0400 |
commit | 4c2241fd42298007d7c3a92318806a4a9490a93c (patch) | |
tree | 13e9af56e8f0badae7c8a238901e542163c115e2 /arch/s390/include/asm | |
parent | add7490c273650c3aa8012f3e082aa64af86f17c (diff) |
[S390] percpu: implement arch specific irqsafe_cpu_ops
Implement arch specific irqsafe_cpu ops. The arch specific ops do not
disable/enable interrupts since that is an expensive operation. Instead
we disable preemption and perform a compare and swap loop.
Since on server distros (the ones we care about) preemption is disabled
the preempt_disable()/preempt_enable() pair is a nop.
In the end this code should be faster than the generic one.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/percpu.h | 68 |
2 files changed, 68 insertions, 1 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 7488e52efa97..81d7908416cf 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h | |||
@@ -167,7 +167,6 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old, | |||
167 | #ifdef CONFIG_64BIT | 167 | #ifdef CONFIG_64BIT |
168 | #define cmpxchg64(ptr, o, n) \ | 168 | #define cmpxchg64(ptr, o, n) \ |
169 | ({ \ | 169 | ({ \ |
170 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
171 | cmpxchg((ptr), (o), (n)); \ | 170 | cmpxchg((ptr), (o), (n)); \ |
172 | }) | 171 | }) |
173 | #else /* CONFIG_64BIT */ | 172 | #else /* CONFIG_64BIT */ |
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index f7ad8719d02d..5325c89a5843 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __ARCH_S390_PERCPU__ | 1 | #ifndef __ARCH_S390_PERCPU__ |
2 | #define __ARCH_S390_PERCPU__ | 2 | #define __ARCH_S390_PERCPU__ |
3 | 3 | ||
4 | #include <linux/preempt.h> | ||
5 | #include <asm/cmpxchg.h> | ||
6 | |||
4 | /* | 7 | /* |
5 | * s390 uses its own implementation for per cpu data, the offset of | 8 | * s390 uses its own implementation for per cpu data, the offset of |
6 | * the cpu local data area is cached in the cpu's lowcore memory. | 9 | * the cpu local data area is cached in the cpu's lowcore memory. |
@@ -16,6 +19,71 @@ | |||
16 | #define ARCH_NEEDS_WEAK_PER_CPU | 19 | #define ARCH_NEEDS_WEAK_PER_CPU |
17 | #endif | 20 | #endif |
18 | 21 | ||
22 | #define arch_irqsafe_cpu_to_op(pcp, val, op) \ | ||
23 | do { \ | ||
24 | typedef typeof(pcp) pcp_op_T__; \ | ||
25 | pcp_op_T__ old__, new__, prev__; \ | ||
26 | pcp_op_T__ *ptr__; \ | ||
27 | preempt_disable(); \ | ||
28 | ptr__ = __this_cpu_ptr(&(pcp)); \ | ||
29 | prev__ = *ptr__; \ | ||
30 | do { \ | ||
31 | old__ = prev__; \ | ||
32 | new__ = old__ op (val); \ | ||
33 | switch (sizeof(*ptr__)) { \ | ||
34 | case 8: \ | ||
35 | prev__ = cmpxchg64(ptr__, old__, new__); \ | ||
36 | break; \ | ||
37 | default: \ | ||
38 | prev__ = cmpxchg(ptr__, old__, new__); \ | ||
39 | } \ | ||
40 | } while (prev__ != old__); \ | ||
41 | preempt_enable(); \ | ||
42 | } while (0) | ||
43 | |||
44 | #define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | ||
45 | #define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | ||
46 | #define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | ||
47 | #define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) | ||
48 | |||
49 | #define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | ||
50 | #define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | ||
51 | #define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | ||
52 | #define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) | ||
53 | |||
54 | #define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | ||
55 | #define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | ||
56 | #define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | ||
57 | #define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) | ||
58 | |||
59 | #define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | ||
60 | #define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | ||
61 | #define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | ||
62 | #define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) | ||
63 | |||
64 | #define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | ||
65 | ({ \ | ||
66 | typedef typeof(pcp) pcp_op_T__; \ | ||
67 | pcp_op_T__ ret__; \ | ||
68 | pcp_op_T__ *ptr__; \ | ||
69 | preempt_disable(); \ | ||
70 | ptr__ = __this_cpu_ptr(&(pcp)); \ | ||
71 | switch (sizeof(*ptr__)) { \ | ||
72 | case 8: \ | ||
73 | ret__ = cmpxchg64(ptr__, oval, nval); \ | ||
74 | break; \ | ||
75 | default: \ | ||
76 | ret__ = cmpxchg(ptr__, oval, nval); \ | ||
77 | } \ | ||
78 | preempt_enable(); \ | ||
79 | ret__; \ | ||
80 | }) | ||
81 | |||
82 | #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | ||
83 | #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | ||
84 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | ||
85 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) | ||
86 | |||
19 | #include <asm-generic/percpu.h> | 87 | #include <asm-generic/percpu.h> |
20 | 88 | ||
21 | #endif /* __ARCH_S390_PERCPU__ */ | 89 | #endif /* __ARCH_S390_PERCPU__ */ |