diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-09-28 10:56:43 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-09-28 10:56:43 -0400 |
commit | 94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch) | |
tree | 8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /include/asm-s390/atomic.h | |
parent | 25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff) |
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common
coding style. Quite a few have been shortened, mainly by using register
asm variables. Use of the EX_TABLE macro helps as well. The atomic ops,
bit ops and locking inlines new use the Q-constraint if a newer gcc
is used. That results in slightly better code.
Thanks to Christian Borntraeger for proof reading the changes.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/atomic.h')
-rw-r--r-- | include/asm-s390/atomic.h | 120 |
1 files changed, 92 insertions, 28 deletions
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index 399bf02894dd..af20c7462485 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h | |||
@@ -30,20 +30,43 @@ typedef struct { | |||
30 | 30 | ||
31 | #ifdef __KERNEL__ | 31 | #ifdef __KERNEL__ |
32 | 32 | ||
33 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
34 | |||
33 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 35 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
34 | typeof(ptr->counter) old_val, new_val; \ | 36 | typeof(ptr->counter) old_val, new_val; \ |
35 | __asm__ __volatile__(" l %0,0(%3)\n" \ | 37 | asm volatile( \ |
36 | "0: lr %1,%0\n" \ | 38 | " l %0,%2\n" \ |
37 | op_string " %1,%4\n" \ | 39 | "0: lr %1,%0\n" \ |
38 | " cs %0,%1,0(%3)\n" \ | 40 | op_string " %1,%3\n" \ |
39 | " jl 0b" \ | 41 | " cs %0,%1,%2\n" \ |
40 | : "=&d" (old_val), "=&d" (new_val), \ | 42 | " jl 0b" \ |
41 | "=m" (((atomic_t *)(ptr))->counter) \ | 43 | : "=&d" (old_val), "=&d" (new_val), \ |
42 | : "a" (ptr), "d" (op_val), \ | 44 | "=Q" (((atomic_t *)(ptr))->counter) \ |
43 | "m" (((atomic_t *)(ptr))->counter) \ | 45 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
44 | : "cc", "memory" ); \ | 46 | : "cc", "memory"); \ |
45 | new_val; \ | 47 | new_val; \ |
46 | }) | 48 | }) |
49 | |||
50 | #else /* __GNUC__ */ | ||
51 | |||
52 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | ||
53 | typeof(ptr->counter) old_val, new_val; \ | ||
54 | asm volatile( \ | ||
55 | " l %0,0(%3)\n" \ | ||
56 | "0: lr %1,%0\n" \ | ||
57 | op_string " %1,%4\n" \ | ||
58 | " cs %0,%1,0(%3)\n" \ | ||
59 | " jl 0b" \ | ||
60 | : "=&d" (old_val), "=&d" (new_val), \ | ||
61 | "=m" (((atomic_t *)(ptr))->counter) \ | ||
62 | : "a" (ptr), "d" (op_val), \ | ||
63 | "m" (((atomic_t *)(ptr))->counter) \ | ||
64 | : "cc", "memory"); \ | ||
65 | new_val; \ | ||
66 | }) | ||
67 | |||
68 | #endif /* __GNUC__ */ | ||
69 | |||
47 | #define atomic_read(v) ((v)->counter) | 70 | #define atomic_read(v) ((v)->counter) |
48 | #define atomic_set(v,i) (((v)->counter) = (i)) | 71 | #define atomic_set(v,i) (((v)->counter) = (i)) |
49 | 72 | ||
@@ -81,10 +104,19 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | |||
81 | 104 | ||
82 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | 105 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) |
83 | { | 106 | { |
84 | __asm__ __volatile__(" cs %0,%3,0(%2)\n" | 107 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
85 | : "+d" (old), "=m" (v->counter) | 108 | asm volatile( |
86 | : "a" (v), "d" (new), "m" (v->counter) | 109 | " cs %0,%2,%1" |
87 | : "cc", "memory" ); | 110 | : "+d" (old), "=Q" (v->counter) |
111 | : "d" (new), "Q" (v->counter) | ||
112 | : "cc", "memory"); | ||
113 | #else /* __GNUC__ */ | ||
114 | asm volatile( | ||
115 | " cs %0,%3,0(%2)" | ||
116 | : "+d" (old), "=m" (v->counter) | ||
117 | : "a" (v), "d" (new), "m" (v->counter) | ||
118 | : "cc", "memory"); | ||
119 | #endif /* __GNUC__ */ | ||
88 | return old; | 120 | return old; |
89 | } | 121 | } |
90 | 122 | ||
@@ -113,20 +145,43 @@ typedef struct { | |||
113 | } __attribute__ ((aligned (8))) atomic64_t; | 145 | } __attribute__ ((aligned (8))) atomic64_t; |
114 | #define ATOMIC64_INIT(i) { (i) } | 146 | #define ATOMIC64_INIT(i) { (i) } |
115 | 147 | ||
148 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
149 | |||
116 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 150 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
117 | typeof(ptr->counter) old_val, new_val; \ | 151 | typeof(ptr->counter) old_val, new_val; \ |
118 | __asm__ __volatile__(" lg %0,0(%3)\n" \ | 152 | asm volatile( \ |
119 | "0: lgr %1,%0\n" \ | 153 | " lg %0,%2\n" \ |
120 | op_string " %1,%4\n" \ | 154 | "0: lgr %1,%0\n" \ |
121 | " csg %0,%1,0(%3)\n" \ | 155 | op_string " %1,%3\n" \ |
122 | " jl 0b" \ | 156 | " csg %0,%1,%2\n" \ |
123 | : "=&d" (old_val), "=&d" (new_val), \ | 157 | " jl 0b" \ |
124 | "=m" (((atomic_t *)(ptr))->counter) \ | 158 | : "=&d" (old_val), "=&d" (new_val), \ |
125 | : "a" (ptr), "d" (op_val), \ | 159 | "=Q" (((atomic_t *)(ptr))->counter) \ |
126 | "m" (((atomic_t *)(ptr))->counter) \ | 160 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
127 | : "cc", "memory" ); \ | 161 | : "cc", "memory" ); \ |
128 | new_val; \ | 162 | new_val; \ |
129 | }) | 163 | }) |
164 | |||
165 | #else /* __GNUC__ */ | ||
166 | |||
167 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | ||
168 | typeof(ptr->counter) old_val, new_val; \ | ||
169 | asm volatile( \ | ||
170 | " lg %0,0(%3)\n" \ | ||
171 | "0: lgr %1,%0\n" \ | ||
172 | op_string " %1,%4\n" \ | ||
173 | " csg %0,%1,0(%3)\n" \ | ||
174 | " jl 0b" \ | ||
175 | : "=&d" (old_val), "=&d" (new_val), \ | ||
176 | "=m" (((atomic_t *)(ptr))->counter) \ | ||
177 | : "a" (ptr), "d" (op_val), \ | ||
178 | "m" (((atomic_t *)(ptr))->counter) \ | ||
179 | : "cc", "memory" ); \ | ||
180 | new_val; \ | ||
181 | }) | ||
182 | |||
183 | #endif /* __GNUC__ */ | ||
184 | |||
130 | #define atomic64_read(v) ((v)->counter) | 185 | #define atomic64_read(v) ((v)->counter) |
131 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 186 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
132 | 187 | ||
@@ -163,10 +218,19 @@ static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | |||
163 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | 218 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, |
164 | long long old, long long new) | 219 | long long old, long long new) |
165 | { | 220 | { |
166 | __asm__ __volatile__(" csg %0,%3,0(%2)\n" | 221 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
167 | : "+d" (old), "=m" (v->counter) | 222 | asm volatile( |
168 | : "a" (v), "d" (new), "m" (v->counter) | 223 | " csg %0,%2,%1" |
169 | : "cc", "memory" ); | 224 | : "+d" (old), "=Q" (v->counter) |
225 | : "d" (new), "Q" (v->counter) | ||
226 | : "cc", "memory"); | ||
227 | #else /* __GNUC__ */ | ||
228 | asm volatile( | ||
229 | " csg %0,%3,0(%2)" | ||
230 | : "+d" (old), "=m" (v->counter) | ||
231 | : "a" (v), "d" (new), "m" (v->counter) | ||
232 | : "cc", "memory"); | ||
233 | #endif /* __GNUC__ */ | ||
170 | return old; | 234 | return old; |
171 | } | 235 | } |
172 | 236 | ||