diff options
Diffstat (limited to 'include/asm-sh/atomic.h')
| -rw-r--r-- | include/asm-sh/atomic.h | 106 |
1 files changed, 97 insertions, 9 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index fb627de217f2..8bdc1ba56f73 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h | |||
| @@ -14,6 +14,7 @@ typedef struct { volatile int counter; } atomic_t; | |||
| 14 | #define atomic_read(v) ((v)->counter) | 14 | #define atomic_read(v) ((v)->counter) |
| 15 | #define atomic_set(v,i) ((v)->counter = (i)) | 15 | #define atomic_set(v,i) ((v)->counter = (i)) |
| 16 | 16 | ||
| 17 | #include <linux/compiler.h> | ||
| 17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 18 | 19 | ||
| 19 | /* | 20 | /* |
| @@ -21,49 +22,110 @@ typedef struct { volatile int counter; } atomic_t; | |||
| 21 | * forward to code at the end of this object's .text section, then | 22 | * forward to code at the end of this object's .text section, then |
| 22 | * branch back to restart the operation. | 23 | * branch back to restart the operation. |
| 23 | */ | 24 | */ |
| 24 | 25 | static inline void atomic_add(int i, atomic_t *v) | |
| 25 | static __inline__ void atomic_add(int i, atomic_t * v) | ||
| 26 | { | 26 | { |
| 27 | #ifdef CONFIG_CPU_SH4A | ||
| 28 | unsigned long tmp; | ||
| 29 | |||
| 30 | __asm__ __volatile__ ( | ||
| 31 | "1: movli.l @%3, %0 ! atomic_add \n" | ||
| 32 | " add %2, %0 \n" | ||
| 33 | " movco.l %0, @%3 \n" | ||
| 34 | " bf 1b \n" | ||
| 35 | : "=&z" (tmp), "=r" (&v->counter) | ||
| 36 | : "r" (i), "r" (&v->counter) | ||
| 37 | : "t"); | ||
| 38 | #else | ||
| 27 | unsigned long flags; | 39 | unsigned long flags; |
| 28 | 40 | ||
| 29 | local_irq_save(flags); | 41 | local_irq_save(flags); |
| 30 | *(long *)v += i; | 42 | *(long *)v += i; |
| 31 | local_irq_restore(flags); | 43 | local_irq_restore(flags); |
| 44 | #endif | ||
| 32 | } | 45 | } |
| 33 | 46 | ||
| 34 | static __inline__ void atomic_sub(int i, atomic_t *v) | 47 | static inline void atomic_sub(int i, atomic_t *v) |
| 35 | { | 48 | { |
| 49 | #ifdef CONFIG_CPU_SH4A | ||
| 50 | unsigned long tmp; | ||
| 51 | |||
| 52 | __asm__ __volatile__ ( | ||
| 53 | "1: movli.l @%3, %0 ! atomic_sub \n" | ||
| 54 | " sub %2, %0 \n" | ||
| 55 | " movco.l %0, @%3 \n" | ||
| 56 | " bf 1b \n" | ||
| 57 | : "=&z" (tmp), "=r" (&v->counter) | ||
| 58 | : "r" (i), "r" (&v->counter) | ||
| 59 | : "t"); | ||
| 60 | #else | ||
| 36 | unsigned long flags; | 61 | unsigned long flags; |
| 37 | 62 | ||
| 38 | local_irq_save(flags); | 63 | local_irq_save(flags); |
| 39 | *(long *)v -= i; | 64 | *(long *)v -= i; |
| 40 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
| 66 | #endif | ||
| 41 | } | 67 | } |
| 42 | 68 | ||
| 43 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 69 | /* |
| 70 | * SH-4A note: | ||
| 71 | * | ||
| 72 | * We basically get atomic_xxx_return() for free compared with | ||
| 73 | * atomic_xxx(). movli.l/movco.l require r0 due to the instruction | ||
| 74 | * encoding, so the retval is automatically set without having to | ||
| 75 | * do any special work. | ||
| 76 | */ | ||
| 77 | static inline int atomic_add_return(int i, atomic_t *v) | ||
| 44 | { | 78 | { |
| 45 | unsigned long temp, flags; | 79 | unsigned long temp; |
| 80 | |||
| 81 | #ifdef CONFIG_CPU_SH4A | ||
| 82 | __asm__ __volatile__ ( | ||
| 83 | "1: movli.l @%3, %0 ! atomic_add_return \n" | ||
| 84 | " add %2, %0 \n" | ||
| 85 | " movco.l %0, @%3 \n" | ||
| 86 | " bf 1b \n" | ||
| 87 | " synco \n" | ||
| 88 | : "=&z" (temp), "=r" (&v->counter) | ||
| 89 | : "r" (i), "r" (&v->counter) | ||
| 90 | : "t"); | ||
| 91 | #else | ||
| 92 | unsigned long flags; | ||
| 46 | 93 | ||
| 47 | local_irq_save(flags); | 94 | local_irq_save(flags); |
| 48 | temp = *(long *)v; | 95 | temp = *(long *)v; |
| 49 | temp += i; | 96 | temp += i; |
| 50 | *(long *)v = temp; | 97 | *(long *)v = temp; |
| 51 | local_irq_restore(flags); | 98 | local_irq_restore(flags); |
| 99 | #endif | ||
| 52 | 100 | ||
| 53 | return temp; | 101 | return temp; |
| 54 | } | 102 | } |
| 55 | 103 | ||
| 56 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | 104 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
| 57 | 105 | ||
| 58 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 106 | static inline int atomic_sub_return(int i, atomic_t *v) |
| 59 | { | 107 | { |
| 60 | unsigned long temp, flags; | 108 | unsigned long temp; |
| 109 | |||
| 110 | #ifdef CONFIG_CPU_SH4A | ||
| 111 | __asm__ __volatile__ ( | ||
| 112 | "1: movli.l @%3, %0 ! atomic_sub_return \n" | ||
| 113 | " sub %2, %0 \n" | ||
| 114 | " movco.l %0, @%3 \n" | ||
| 115 | " bf 1b \n" | ||
| 116 | " synco \n" | ||
| 117 | : "=&z" (temp), "=r" (&v->counter) | ||
| 118 | : "r" (i), "r" (&v->counter) | ||
| 119 | : "t"); | ||
| 120 | #else | ||
| 121 | unsigned long flags; | ||
| 61 | 122 | ||
| 62 | local_irq_save(flags); | 123 | local_irq_save(flags); |
| 63 | temp = *(long *)v; | 124 | temp = *(long *)v; |
| 64 | temp -= i; | 125 | temp -= i; |
| 65 | *(long *)v = temp; | 126 | *(long *)v = temp; |
| 66 | local_irq_restore(flags); | 127 | local_irq_restore(flags); |
| 128 | #endif | ||
| 67 | 129 | ||
| 68 | return temp; | 130 | return temp; |
| 69 | } | 131 | } |
| @@ -118,22 +180,48 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
| 118 | } | 180 | } |
| 119 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 181 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 120 | 182 | ||
| 121 | static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) | 183 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
| 122 | { | 184 | { |
| 185 | #ifdef CONFIG_CPU_SH4A | ||
| 186 | unsigned long tmp; | ||
| 187 | |||
| 188 | __asm__ __volatile__ ( | ||
| 189 | "1: movli.l @%3, %0 ! atomic_clear_mask \n" | ||
| 190 | " and %2, %0 \n" | ||
| 191 | " movco.l %0, @%3 \n" | ||
| 192 | " bf 1b \n" | ||
| 193 | : "=&z" (tmp), "=r" (&v->counter) | ||
| 194 | : "r" (~mask), "r" (&v->counter) | ||
| 195 | : "t"); | ||
| 196 | #else | ||
| 123 | unsigned long flags; | 197 | unsigned long flags; |
| 124 | 198 | ||
| 125 | local_irq_save(flags); | 199 | local_irq_save(flags); |
| 126 | *(long *)v &= ~mask; | 200 | *(long *)v &= ~mask; |
| 127 | local_irq_restore(flags); | 201 | local_irq_restore(flags); |
| 202 | #endif | ||
| 128 | } | 203 | } |
| 129 | 204 | ||
| 130 | static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) | 205 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
| 131 | { | 206 | { |
| 207 | #ifdef CONFIG_CPU_SH4A | ||
| 208 | unsigned long tmp; | ||
| 209 | |||
| 210 | __asm__ __volatile__ ( | ||
| 211 | "1: movli.l @%3, %0 ! atomic_set_mask \n" | ||
| 212 | " or %2, %0 \n" | ||
| 213 | " movco.l %0, @%3 \n" | ||
| 214 | " bf 1b \n" | ||
| 215 | : "=&z" (tmp), "=r" (&v->counter) | ||
| 216 | : "r" (mask), "r" (&v->counter) | ||
| 217 | : "t"); | ||
| 218 | #else | ||
| 132 | unsigned long flags; | 219 | unsigned long flags; |
| 133 | 220 | ||
| 134 | local_irq_save(flags); | 221 | local_irq_save(flags); |
| 135 | *(long *)v |= mask; | 222 | *(long *)v |= mask; |
| 136 | local_irq_restore(flags); | 223 | local_irq_restore(flags); |
| 224 | #endif | ||
| 137 | } | 225 | } |
| 138 | 226 | ||
| 139 | /* Atomic operations are already serializing on SH */ | 227 | /* Atomic operations are already serializing on SH */ |
