aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-10-21 01:44:08 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-24 11:17:13 -0400
commit0702fbf572ac5e513873628bf534da4a8a2025b4 (patch)
tree29a64064d299f04f7fc22e65c5a5c1ecc828ea60 /arch/s390/include
parentf26946d7ecad0afdd85e6ae56663d0fe26676b34 (diff)
s390/percpu: use generic percpu ops for CONFIG_32BIT
Remove the special cases for the this_cpu_* functions for 32 bit in order to make it easier to add additional code for 64 bit. 32 bit will use the generic implementation. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/percpu.h26
1 files changed, 7 insertions, 19 deletions
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 86fe0ee2cee5..41baca870d0c 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,12 +10,14 @@
10 */ 10 */
11#define __my_cpu_offset S390_lowcore.percpu_offset 11#define __my_cpu_offset S390_lowcore.percpu_offset
12 12
13#ifdef CONFIG_64BIT
14
13/* 15/*
14 * For 64 bit module code, the module may be more than 4G above the 16 * For 64 bit module code, the module may be more than 4G above the
15 * per cpu area, use weak definitions to force the compiler to 17 * per cpu area, use weak definitions to force the compiler to
16 * generate external references. 18 * generate external references.
17 */ 19 */
18#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE) 20#if defined(CONFIG_SMP) && defined(MODULE)
19#define ARCH_NEEDS_WEAK_PER_CPU 21#define ARCH_NEEDS_WEAK_PER_CPU
20#endif 22#endif
21 23
@@ -30,13 +32,7 @@
30 do { \ 32 do { \
31 old__ = prev__; \ 33 old__ = prev__; \
32 new__ = old__ op (val); \ 34 new__ = old__ op (val); \
33 switch (sizeof(*ptr__)) { \ 35 prev__ = cmpxchg(ptr__, old__, new__); \
34 case 8: \
35 prev__ = cmpxchg64(ptr__, old__, new__); \
36 break; \
37 default: \
38 prev__ = cmpxchg(ptr__, old__, new__); \
39 } \
40 } while (prev__ != old__); \ 36 } while (prev__ != old__); \
41 preempt_enable(); \ 37 preempt_enable(); \
42 new__; \ 38 new__; \
@@ -74,13 +70,7 @@
74 pcp_op_T__ *ptr__; \ 70 pcp_op_T__ *ptr__; \
75 preempt_disable(); \ 71 preempt_disable(); \
76 ptr__ = __this_cpu_ptr(&(pcp)); \ 72 ptr__ = __this_cpu_ptr(&(pcp)); \
77 switch (sizeof(*ptr__)) { \ 73 ret__ = cmpxchg(ptr__, oval, nval); \
78 case 8: \
79 ret__ = cmpxchg64(ptr__, oval, nval); \
80 break; \
81 default: \
82 ret__ = cmpxchg(ptr__, oval, nval); \
83 } \
84 preempt_enable(); \ 74 preempt_enable(); \
85 ret__; \ 75 ret__; \
86}) 76})
@@ -104,9 +94,7 @@
104#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval) 94#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
105#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval) 95#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
106#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) 96#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
107#ifdef CONFIG_64BIT
108#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) 97#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
109#endif
110 98
111#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \ 99#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
112({ \ 100({ \
@@ -124,9 +112,9 @@
124}) 112})
125 113
126#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double 114#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
127#ifdef CONFIG_64BIT
128#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double 115#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
129#endif 116
117#endif /* CONFIG_64BIT */
130 118
131#include <asm-generic/percpu.h> 119#include <asm-generic/percpu.h>
132 120