aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/atomic.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 04:52:19 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 04:52:19 -0400
commit781125ca58dfbd47635cfc0e408f1f9d7e10b227 (patch)
tree92d44b262478d5cece046ff661694b1109ab1e9d /include/asm-sh/atomic.h
parent15f57a29a19ad0dbb468363cb617b06f71f6de92 (diff)
sh: New atomic ops for SH-4A movli.l/movco.l
SH-4A implements LL/SC instructions, so we implement a simple set of atomic operations using these. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/atomic.h')
-rw-r--r--include/asm-sh/atomic.h105
1 files changed, 96 insertions, 9 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 049eb2dda6b6..8bdc1ba56f73 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -22,49 +22,110 @@ typedef struct { volatile int counter; } atomic_t;
22 * forward to code at the end of this object's .text section, then 22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation. 23 * branch back to restart the operation.
24 */ 24 */
25 25static inline void atomic_add(int i, atomic_t *v)
26static __inline__ void atomic_add(int i, atomic_t * v)
27{ 26{
27#ifdef CONFIG_CPU_SH4A
28 unsigned long tmp;
29
30 __asm__ __volatile__ (
31"1: movli.l @%3, %0 ! atomic_add \n"
32" add %2, %0 \n"
33" movco.l %0, @%3 \n"
34" bf 1b \n"
35 : "=&z" (tmp), "=r" (&v->counter)
36 : "r" (i), "r" (&v->counter)
37 : "t");
38#else
28 unsigned long flags; 39 unsigned long flags;
29 40
30 local_irq_save(flags); 41 local_irq_save(flags);
31 *(long *)v += i; 42 *(long *)v += i;
32 local_irq_restore(flags); 43 local_irq_restore(flags);
44#endif
33} 45}
34 46
35static __inline__ void atomic_sub(int i, atomic_t *v) 47static inline void atomic_sub(int i, atomic_t *v)
36{ 48{
49#ifdef CONFIG_CPU_SH4A
50 unsigned long tmp;
51
52 __asm__ __volatile__ (
53"1: movli.l @%3, %0 ! atomic_sub \n"
54" sub %2, %0 \n"
55" movco.l %0, @%3 \n"
56" bf 1b \n"
57 : "=&z" (tmp), "=r" (&v->counter)
58 : "r" (i), "r" (&v->counter)
59 : "t");
60#else
37 unsigned long flags; 61 unsigned long flags;
38 62
39 local_irq_save(flags); 63 local_irq_save(flags);
40 *(long *)v -= i; 64 *(long *)v -= i;
41 local_irq_restore(flags); 65 local_irq_restore(flags);
66#endif
42} 67}
43 68
44static __inline__ int atomic_add_return(int i, atomic_t * v) 69/*
70 * SH-4A note:
71 *
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
76 */
77static inline int atomic_add_return(int i, atomic_t *v)
45{ 78{
46 unsigned long temp, flags; 79 unsigned long temp;
80
81#ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83"1: movli.l @%3, %0 ! atomic_add_return \n"
84" add %2, %0 \n"
85" movco.l %0, @%3 \n"
86" bf 1b \n"
87" synco \n"
88 : "=&z" (temp), "=r" (&v->counter)
89 : "r" (i), "r" (&v->counter)
90 : "t");
91#else
92 unsigned long flags;
47 93
48 local_irq_save(flags); 94 local_irq_save(flags);
49 temp = *(long *)v; 95 temp = *(long *)v;
50 temp += i; 96 temp += i;
51 *(long *)v = temp; 97 *(long *)v = temp;
52 local_irq_restore(flags); 98 local_irq_restore(flags);
99#endif
53 100
54 return temp; 101 return temp;
55} 102}
56 103
57#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 104#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
58 105
59static __inline__ int atomic_sub_return(int i, atomic_t * v) 106static inline int atomic_sub_return(int i, atomic_t *v)
60{ 107{
61 unsigned long temp, flags; 108 unsigned long temp;
109
110#ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112"1: movli.l @%3, %0 ! atomic_sub_return \n"
113" sub %2, %0 \n"
114" movco.l %0, @%3 \n"
115" bf 1b \n"
116" synco \n"
117 : "=&z" (temp), "=r" (&v->counter)
118 : "r" (i), "r" (&v->counter)
119 : "t");
120#else
121 unsigned long flags;
62 122
63 local_irq_save(flags); 123 local_irq_save(flags);
64 temp = *(long *)v; 124 temp = *(long *)v;
65 temp -= i; 125 temp -= i;
66 *(long *)v = temp; 126 *(long *)v = temp;
67 local_irq_restore(flags); 127 local_irq_restore(flags);
128#endif
68 129
69 return temp; 130 return temp;
70} 131}
@@ -119,22 +180,48 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
119} 180}
120#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 181#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
121 182
122static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 183static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
123{ 184{
185#ifdef CONFIG_CPU_SH4A
186 unsigned long tmp;
187
188 __asm__ __volatile__ (
189"1: movli.l @%3, %0 ! atomic_clear_mask \n"
190" and %2, %0 \n"
191" movco.l %0, @%3 \n"
192" bf 1b \n"
193 : "=&z" (tmp), "=r" (&v->counter)
194 : "r" (~mask), "r" (&v->counter)
195 : "t");
196#else
124 unsigned long flags; 197 unsigned long flags;
125 198
126 local_irq_save(flags); 199 local_irq_save(flags);
127 *(long *)v &= ~mask; 200 *(long *)v &= ~mask;
128 local_irq_restore(flags); 201 local_irq_restore(flags);
202#endif
129} 203}
130 204
131static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) 205static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
132{ 206{
207#ifdef CONFIG_CPU_SH4A
208 unsigned long tmp;
209
210 __asm__ __volatile__ (
211"1: movli.l @%3, %0 ! atomic_set_mask \n"
212" or %2, %0 \n"
213" movco.l %0, @%3 \n"
214" bf 1b \n"
215 : "=&z" (tmp), "=r" (&v->counter)
216 : "r" (mask), "r" (&v->counter)
217 : "t");
218#else
133 unsigned long flags; 219 unsigned long flags;
134 220
135 local_irq_save(flags); 221 local_irq_save(flags);
136 *(long *)v |= mask; 222 *(long *)v |= mask;
137 local_irq_restore(flags); 223 local_irq_restore(flags);
224#endif
138} 225}
139 226
140/* Atomic operations are already serializing on SH */ 227/* Atomic operations are already serializing on SH */