aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips/system.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-09-27 20:45:21 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-02-13 17:40:50 -0500
commitf65e4fa8e0c6022ad58dc88d1b11b12589ed7f9f (patch)
tree2405e012e079693e0fcfde9ff981c549d6c68a21 /include/asm-mips/system.h
parent509cb37e173d4e39cec47238397e91b718730794 (diff)
[MIPS] Improve branch prediction in ll/sc atomic operations.
Now that finally all supported versions of binutils have functioning support for .subsection use .subsection to tweak the branch prediction I did not modify the R10000 errata variants because it seems unclear if this will invalidate the workaround which actually relies on the cheesy prediction of branch likely to cause a misspredict if the sc was successful. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/system.h')
-rw-r--r--include/asm-mips/system.h20
1 files changed, 16 insertions, 4 deletions
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 5e1289c85ed9..597a3743f6a1 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -110,7 +110,10 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
110 " move %2, %z4 \n" 110 " move %2, %z4 \n"
111 " .set mips3 \n" 111 " .set mips3 \n"
112 " sc %2, %1 \n" 112 " sc %2, %1 \n"
113 " beqz %2, 1b \n" 113 " beqz %2, 2f \n"
114 " .subsection 2 \n"
115 "2: b 1b \n"
116 " .previous \n"
114 " .set mips0 \n" 117 " .set mips0 \n"
115 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 118 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
116 : "R" (*m), "Jr" (val) 119 : "R" (*m), "Jr" (val)
@@ -155,7 +158,10 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
155 "1: lld %0, %3 # xchg_u64 \n" 158 "1: lld %0, %3 # xchg_u64 \n"
156 " move %2, %z4 \n" 159 " move %2, %z4 \n"
157 " scd %2, %1 \n" 160 " scd %2, %1 \n"
158 " beqz %2, 1b \n" 161 " beqz %2, 2f \n"
162 " .subsection 2 \n"
163 "2: b 1b \n"
164 " .previous \n"
159 " .set mips0 \n" 165 " .set mips0 \n"
160 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 166 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
161 : "R" (*m), "Jr" (val) 167 : "R" (*m), "Jr" (val)
@@ -232,8 +238,11 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
232 " move $1, %z4 \n" 238 " move $1, %z4 \n"
233 " .set mips3 \n" 239 " .set mips3 \n"
234 " sc $1, %1 \n" 240 " sc $1, %1 \n"
235 " beqz $1, 1b \n" 241 " beqz $1, 3f \n"
236 "2: \n" 242 "2: \n"
243 " .subsection 2 \n"
244 "3: b 1b \n"
245 " .previous \n"
237 " .set pop \n" 246 " .set pop \n"
238 : "=&r" (retval), "=R" (*m) 247 : "=&r" (retval), "=R" (*m)
239 : "R" (*m), "Jr" (old), "Jr" (new) 248 : "R" (*m), "Jr" (old), "Jr" (new)
@@ -283,8 +292,11 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
283 " bne %0, %z3, 2f \n" 292 " bne %0, %z3, 2f \n"
284 " move $1, %z4 \n" 293 " move $1, %z4 \n"
285 " scd $1, %1 \n" 294 " scd $1, %1 \n"
286 " beqz $1, 1b \n" 295 " beqz $1, 3f \n"
287 "2: \n" 296 "2: \n"
297 " .subsection 2 \n"
298 "3: b 1b \n"
299 " .previous \n"
288 " .set pop \n" 300 " .set pop \n"
289 : "=&r" (retval), "=R" (*m) 301 : "=&r" (retval), "=R" (*m)
290 : "R" (*m), "Jr" (old), "Jr" (new) 302 : "R" (*m), "Jr" (old), "Jr" (new)