aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-01 00:32:54 -0500
committerPaul Mundt <lethal@linux-sh.org>2006-12-05 20:45:40 -0500
commitc03c69610bfa728805deceeb624ee4268c722a5a (patch)
tree07085eab7196c1e7219473d5a164a0569bce9611 /include/asm-sh
parentbd156147eb63ae525e0ac67868e41a808f03c532 (diff)
sh: Fixup movli.l/movco.l atomic ops for gcc4.
gcc4 gets a bit pissy about the outputs: include/asm/atomic.h: In function 'atomic_add': include/asm/atomic.h:37: error: invalid lvalue in asm statement include/asm/atomic.h:30: error: invalid lvalue in asm output 1 ... this ended up being a thinko anyways, so just fix it up. Verified for proper behaviour with the older toolchains, too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/atomic.h48
1 files changed, 24 insertions, 24 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 8bdc1ba56f73..28305c3cbddf 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -28,11 +28,11 @@ static inline void atomic_add(int i, atomic_t *v)
28 unsigned long tmp; 28 unsigned long tmp;
29 29
30 __asm__ __volatile__ ( 30 __asm__ __volatile__ (
31"1: movli.l @%3, %0 ! atomic_add \n" 31"1: movli.l @%2, %0 ! atomic_add \n"
32" add %2, %0 \n" 32" add %1, %0 \n"
33" movco.l %0, @%3 \n" 33" movco.l %0, @%2 \n"
34" bf 1b \n" 34" bf 1b \n"
35 : "=&z" (tmp), "=r" (&v->counter) 35 : "=&z" (tmp)
36 : "r" (i), "r" (&v->counter) 36 : "r" (i), "r" (&v->counter)
37 : "t"); 37 : "t");
38#else 38#else
@@ -50,11 +50,11 @@ static inline void atomic_sub(int i, atomic_t *v)
50 unsigned long tmp; 50 unsigned long tmp;
51 51
52 __asm__ __volatile__ ( 52 __asm__ __volatile__ (
53"1: movli.l @%3, %0 ! atomic_sub \n" 53"1: movli.l @%2, %0 ! atomic_sub \n"
54" sub %2, %0 \n" 54" sub %1, %0 \n"
55" movco.l %0, @%3 \n" 55" movco.l %0, @%2 \n"
56" bf 1b \n" 56" bf 1b \n"
57 : "=&z" (tmp), "=r" (&v->counter) 57 : "=&z" (tmp)
58 : "r" (i), "r" (&v->counter) 58 : "r" (i), "r" (&v->counter)
59 : "t"); 59 : "t");
60#else 60#else
@@ -80,12 +80,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
80 80
81#ifdef CONFIG_CPU_SH4A 81#ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ ( 82 __asm__ __volatile__ (
83"1: movli.l @%3, %0 ! atomic_add_return \n" 83"1: movli.l @%2, %0 ! atomic_add_return \n"
84" add %2, %0 \n" 84" add %1, %0 \n"
85" movco.l %0, @%3 \n" 85" movco.l %0, @%2 \n"
86" bf 1b \n" 86" bf 1b \n"
87" synco \n" 87" synco \n"
88 : "=&z" (temp), "=r" (&v->counter) 88 : "=&z" (temp)
89 : "r" (i), "r" (&v->counter) 89 : "r" (i), "r" (&v->counter)
90 : "t"); 90 : "t");
91#else 91#else
@@ -109,12 +109,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
109 109
110#ifdef CONFIG_CPU_SH4A 110#ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ ( 111 __asm__ __volatile__ (
112"1: movli.l @%3, %0 ! atomic_sub_return \n" 112"1: movli.l @%2, %0 ! atomic_sub_return \n"
113" sub %2, %0 \n" 113" sub %1, %0 \n"
114" movco.l %0, @%3 \n" 114" movco.l %0, @%2 \n"
115" bf 1b \n" 115" bf 1b \n"
116" synco \n" 116" synco \n"
117 : "=&z" (temp), "=r" (&v->counter) 117 : "=&z" (temp)
118 : "r" (i), "r" (&v->counter) 118 : "r" (i), "r" (&v->counter)
119 : "t"); 119 : "t");
120#else 120#else
@@ -186,11 +186,11 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
186 unsigned long tmp; 186 unsigned long tmp;
187 187
188 __asm__ __volatile__ ( 188 __asm__ __volatile__ (
189"1: movli.l @%3, %0 ! atomic_clear_mask \n" 189"1: movli.l @%2, %0 ! atomic_clear_mask \n"
190" and %2, %0 \n" 190" and %1, %0 \n"
191" movco.l %0, @%3 \n" 191" movco.l %0, @%2 \n"
192" bf 1b \n" 192" bf 1b \n"
193 : "=&z" (tmp), "=r" (&v->counter) 193 : "=&z" (tmp)
194 : "r" (~mask), "r" (&v->counter) 194 : "r" (~mask), "r" (&v->counter)
195 : "t"); 195 : "t");
196#else 196#else
@@ -208,11 +208,11 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
208 unsigned long tmp; 208 unsigned long tmp;
209 209
210 __asm__ __volatile__ ( 210 __asm__ __volatile__ (
211"1: movli.l @%3, %0 ! atomic_set_mask \n" 211"1: movli.l @%2, %0 ! atomic_set_mask \n"
212" or %2, %0 \n" 212" or %1, %0 \n"
213" movco.l %0, @%3 \n" 213" movco.l %0, @%2 \n"
214" bf 1b \n" 214" bf 1b \n"
215 : "=&z" (tmp), "=r" (&v->counter) 215 : "=&z" (tmp)
216 : "r" (mask), "r" (&v->counter) 216 : "r" (mask), "r" (&v->counter)
217 : "t"); 217 : "t");
218#else 218#else