diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-01-08 03:02:17 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-01-08 03:02:17 -0500 |
commit | 8c0b8139c87cfe8b95c6e763b4ca3190aa9b1ad0 (patch) | |
tree | 79ee3c612843d11d5b3fbebb8d00ca956a044316 /arch/sh/include/asm/atomic-grb.h | |
parent | 56d45b62ce622a003da972428fdbba2b42102efb (diff) |
sh: consolidate atomic_cmpxchg()/atomic_add_unless() definitions.
The LL/SC and IRQ versions were using generic stubs while the GRB version
was just reimplementing what it already had for the standard cmpxchg()
code. As we have optimized cmpxchg() implementations that are decoupled
from the atomic code, simply falling back on the generic wrapper does the
right thing. With this in place the GRB case is unaffected while the
LL/SC case gets to use its optimized cmpxchg().
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/atomic-grb.h')
-rw-r--r-- | arch/sh/include/asm/atomic-grb.h | 46 |
1 files changed, 0 insertions, 46 deletions
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index 4c5b7dbfcedb..a273c88578fc 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h | |||
@@ -120,50 +120,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | |||
120 | : "memory" , "r0", "r1"); | 120 | : "memory" , "r0", "r1"); |
121 | } | 121 | } |
122 | 122 | ||
123 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
124 | { | ||
125 | int ret; | ||
126 | |||
127 | __asm__ __volatile__ ( | ||
128 | " .align 2 \n\t" | ||
129 | " mova 1f, r0 \n\t" | ||
130 | " nop \n\t" | ||
131 | " mov r15, r1 \n\t" | ||
132 | " mov #-8, r15 \n\t" | ||
133 | " mov.l @%1, %0 \n\t" | ||
134 | " cmp/eq %2, %0 \n\t" | ||
135 | " bf 1f \n\t" | ||
136 | " mov.l %3, @%1 \n\t" | ||
137 | "1: mov r1, r15 \n\t" | ||
138 | : "=&r" (ret) | ||
139 | : "r" (v), "r" (old), "r" (new) | ||
140 | : "memory" , "r0", "r1" , "t"); | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
146 | { | ||
147 | int ret; | ||
148 | unsigned long tmp; | ||
149 | |||
150 | __asm__ __volatile__ ( | ||
151 | " .align 2 \n\t" | ||
152 | " mova 1f, r0 \n\t" | ||
153 | " nop \n\t" | ||
154 | " mov r15, r1 \n\t" | ||
155 | " mov #-12, r15 \n\t" | ||
156 | " mov.l @%2, %1 \n\t" | ||
157 | " mov %1, %0 \n\t" | ||
158 | " cmp/eq %4, %0 \n\t" | ||
159 | " bt/s 1f \n\t" | ||
160 | " add %3, %1 \n\t" | ||
161 | " mov.l %1, @%2 \n\t" | ||
162 | "1: mov r1, r15 \n\t" | ||
163 | : "=&r" (ret), "=&r" (tmp) | ||
164 | : "r" (v), "r" (a), "r" (u) | ||
165 | : "memory" , "r0", "r1" , "t"); | ||
166 | |||
167 | return ret != u; | ||
168 | } | ||
169 | #endif /* __ASM_SH_ATOMIC_GRB_H */ | 123 | #endif /* __ASM_SH_ATOMIC_GRB_H */ |