diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 03:05:56 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 03:05:56 -0400 |
commit | 00b3aa3fc9bd827caaa859de90d9eba831b77d40 (patch) | |
tree | 303ec8e19c9289b1a54d2c67005ac44f68adcd9d | |
parent | bc8bff63bacea47561de34e04a17c79846ecfe91 (diff) |
sh: xchg()/__xchg() always_inline fixes for gcc4.
Make __xchg() a macro, so that gcc 4.0 doesn't blow up thanks to
always_inline..
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | include/asm-sh/system.h | 57 |
1 files changed, 32 insertions, 25 deletions
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 198d17e3069..bd7dc0554b1 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h | |||
@@ -79,10 +79,8 @@ static inline void sched_cacheflush(void) | |||
79 | } | 79 | } |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
83 | |||
84 | static __inline__ unsigned long tas(volatile int *m) | 82 | static __inline__ unsigned long tas(volatile int *m) |
85 | { /* #define tas(ptr) (xchg((ptr),1)) */ | 83 | { |
86 | unsigned long retval; | 84 | unsigned long retval; |
87 | 85 | ||
88 | __asm__ __volatile__ ("tas.b @%1\n\t" | 86 | __asm__ __volatile__ ("tas.b @%1\n\t" |
@@ -91,8 +89,6 @@ static __inline__ unsigned long tas(volatile int *m) | |||
91 | return retval; | 89 | return retval; |
92 | } | 90 | } |
93 | 91 | ||
94 | extern void __xchg_called_with_bad_pointer(void); | ||
95 | |||
96 | /* | 92 | /* |
97 | * A brief note on ctrl_barrier(), the control register write barrier. | 93 | * A brief note on ctrl_barrier(), the control register write barrier. |
98 | * | 94 | * |
@@ -220,17 +216,17 @@ static __inline__ void local_irq_restore(unsigned long x) | |||
220 | } | 216 | } |
221 | } | 217 | } |
222 | #else | 218 | #else |
223 | #define local_irq_restore(x) do { \ | 219 | #define local_irq_restore(x) do { \ |
224 | if ((x & 0x000000f0) != 0x000000f0) \ | 220 | if ((x & 0x000000f0) != 0x000000f0) \ |
225 | local_irq_enable(); \ | 221 | local_irq_enable(); \ |
226 | } while (0) | 222 | } while (0) |
227 | #endif | 223 | #endif |
228 | 224 | ||
229 | #define really_restore_flags(x) do { \ | 225 | #define really_restore_flags(x) do { \ |
230 | if ((x & 0x000000f0) != 0x000000f0) \ | 226 | if ((x & 0x000000f0) != 0x000000f0) \ |
231 | local_irq_enable(); \ | 227 | local_irq_enable(); \ |
232 | else \ | 228 | else \ |
233 | local_irq_disable(); \ | 229 | local_irq_disable(); \ |
234 | } while (0) | 230 | } while (0) |
235 | 231 | ||
236 | /* | 232 | /* |
@@ -272,7 +268,7 @@ do { \ | |||
272 | /* For spinlocks etc */ | 268 | /* For spinlocks etc */ |
273 | #define local_irq_save(x) x = local_irq_save() | 269 | #define local_irq_save(x) x = local_irq_save() |
274 | 270 | ||
275 | static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) | 271 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
276 | { | 272 | { |
277 | unsigned long flags, retval; | 273 | unsigned long flags, retval; |
278 | 274 | ||
@@ -283,7 +279,7 @@ static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) | |||
283 | return retval; | 279 | return retval; |
284 | } | 280 | } |
285 | 281 | ||
286 | static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) | 282 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) |
287 | { | 283 | { |
288 | unsigned long flags, retval; | 284 | unsigned long flags, retval; |
289 | 285 | ||
@@ -294,19 +290,30 @@ static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned lon | |||
294 | return retval; | 290 | return retval; |
295 | } | 291 | } |
296 | 292 | ||
297 | static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 293 | extern void __xchg_called_with_bad_pointer(void); |
298 | { | 294 | |
299 | switch (size) { | 295 | #define __xchg(ptr, x, size) \ |
300 | case 4: | 296 | ({ \ |
301 | return xchg_u32(ptr, x); | 297 | unsigned long __xchg__res; \ |
302 | break; | 298 | volatile void *__xchg_ptr = (ptr); \ |
303 | case 1: | 299 | switch (size) { \ |
304 | return xchg_u8(ptr, x); | 300 | case 4: \ |
305 | break; | 301 | __xchg__res = xchg_u32(__xchg_ptr, x); \ |
306 | } | 302 | break; \ |
307 | __xchg_called_with_bad_pointer(); | 303 | case 1: \ |
308 | return x; | 304 | __xchg__res = xchg_u8(__xchg_ptr, x); \ |
309 | } | 305 | break; \ |
306 | default: \ | ||
307 | __xchg_called_with_bad_pointer(); \ | ||
308 | __xchg__res = x; \ | ||
309 | break; \ | ||
310 | } \ | ||
311 | \ | ||
312 | __xchg__res; \ | ||
313 | }) | ||
314 | |||
315 | #define xchg(ptr,x) \ | ||
316 | ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) | ||
310 | 317 | ||
311 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | 318 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, |
312 | unsigned long new) | 319 | unsigned long new) |