diff options
author | Joshua Kinard <kumba@gentoo.org> | 2012-06-24 21:01:34 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2012-10-11 05:02:36 -0400 |
commit | b4f2a17ba96a79f1069a2c0f1c648cf6d497f2f3 (patch) | |
tree | 75f44d8efb4748d9639869dc7089533b0964b037 | |
parent | 12250d843e8489ee00b5b7726da855e51694e792 (diff) |
Improve atomic.h robustness
I've maintained this patch, originally from Thiemo Seufer in 2004, for a
really long time, but I think it's time for it to get a look at for
possible inclusion. I have had no problems with it across various SGI
systems over the years.
To quote the post here:
http://www.linux-mips.org/archives/linux-mips/2004-12/msg00000.html
"the atomic functions use so far memory references for the inline
assembler to access the semaphore. This can lead to additional
instructions in the ll/sc loop, because newer compilers don't
expand the memory reference any more but leave it to the assembler.
The appended patch uses registers instead, and makes the ll/sc
arguments more explicit. In some cases it will lead also to better
register scheduling because the register isn't bound to an output
any more."
Signed-off-by: Joshua Kinard <kumba@gentoo.org>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/4029/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/include/asm/atomic.h | 64 |
1 files changed, 29 insertions, 35 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 3f4c5cb6433e..01cc6ba64831 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -59,8 +59,8 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
59 | " sc %0, %1 \n" | 59 | " sc %0, %1 \n" |
60 | " beqzl %0, 1b \n" | 60 | " beqzl %0, 1b \n" |
61 | " .set mips0 \n" | 61 | " .set mips0 \n" |
62 | : "=&r" (temp), "=m" (v->counter) | 62 | : "=&r" (temp), "+m" (v->counter) |
63 | : "Ir" (i), "m" (v->counter)); | 63 | : "Ir" (i)); |
64 | } else if (kernel_uses_llsc) { | 64 | } else if (kernel_uses_llsc) { |
65 | int temp; | 65 | int temp; |
66 | 66 | ||
@@ -71,8 +71,8 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
71 | " addu %0, %2 \n" | 71 | " addu %0, %2 \n" |
72 | " sc %0, %1 \n" | 72 | " sc %0, %1 \n" |
73 | " .set mips0 \n" | 73 | " .set mips0 \n" |
74 | : "=&r" (temp), "=m" (v->counter) | 74 | : "=&r" (temp), "+m" (v->counter) |
75 | : "Ir" (i), "m" (v->counter)); | 75 | : "Ir" (i)); |
76 | } while (unlikely(!temp)); | 76 | } while (unlikely(!temp)); |
77 | } else { | 77 | } else { |
78 | unsigned long flags; | 78 | unsigned long flags; |
@@ -102,8 +102,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
102 | " sc %0, %1 \n" | 102 | " sc %0, %1 \n" |
103 | " beqzl %0, 1b \n" | 103 | " beqzl %0, 1b \n" |
104 | " .set mips0 \n" | 104 | " .set mips0 \n" |
105 | : "=&r" (temp), "=m" (v->counter) | 105 | : "=&r" (temp), "+m" (v->counter) |
106 | : "Ir" (i), "m" (v->counter)); | 106 | : "Ir" (i)); |
107 | } else if (kernel_uses_llsc) { | 107 | } else if (kernel_uses_llsc) { |
108 | int temp; | 108 | int temp; |
109 | 109 | ||
@@ -114,8 +114,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
114 | " subu %0, %2 \n" | 114 | " subu %0, %2 \n" |
115 | " sc %0, %1 \n" | 115 | " sc %0, %1 \n" |
116 | " .set mips0 \n" | 116 | " .set mips0 \n" |
117 | : "=&r" (temp), "=m" (v->counter) | 117 | : "=&r" (temp), "+m" (v->counter) |
118 | : "Ir" (i), "m" (v->counter)); | 118 | : "Ir" (i)); |
119 | } while (unlikely(!temp)); | 119 | } while (unlikely(!temp)); |
120 | } else { | 120 | } else { |
121 | unsigned long flags; | 121 | unsigned long flags; |
@@ -146,9 +146,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
146 | " beqzl %0, 1b \n" | 146 | " beqzl %0, 1b \n" |
147 | " addu %0, %1, %3 \n" | 147 | " addu %0, %1, %3 \n" |
148 | " .set mips0 \n" | 148 | " .set mips0 \n" |
149 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 149 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
150 | : "Ir" (i), "m" (v->counter) | 150 | : "Ir" (i)); |
151 | : "memory"); | ||
152 | } else if (kernel_uses_llsc) { | 151 | } else if (kernel_uses_llsc) { |
153 | int temp; | 152 | int temp; |
154 | 153 | ||
@@ -159,9 +158,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
159 | " addu %0, %1, %3 \n" | 158 | " addu %0, %1, %3 \n" |
160 | " sc %0, %2 \n" | 159 | " sc %0, %2 \n" |
161 | " .set mips0 \n" | 160 | " .set mips0 \n" |
162 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 161 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
163 | : "Ir" (i), "m" (v->counter) | 162 | : "Ir" (i)); |
164 | : "memory"); | ||
165 | } while (unlikely(!result)); | 163 | } while (unlikely(!result)); |
166 | 164 | ||
167 | result = temp + i; | 165 | result = temp + i; |
@@ -212,9 +210,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
212 | " subu %0, %1, %3 \n" | 210 | " subu %0, %1, %3 \n" |
213 | " sc %0, %2 \n" | 211 | " sc %0, %2 \n" |
214 | " .set mips0 \n" | 212 | " .set mips0 \n" |
215 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 213 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
216 | : "Ir" (i), "m" (v->counter) | 214 | : "Ir" (i)); |
217 | : "memory"); | ||
218 | } while (unlikely(!result)); | 215 | } while (unlikely(!result)); |
219 | 216 | ||
220 | result = temp - i; | 217 | result = temp - i; |
@@ -262,7 +259,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
262 | " .set reorder \n" | 259 | " .set reorder \n" |
263 | "1: \n" | 260 | "1: \n" |
264 | " .set mips0 \n" | 261 | " .set mips0 \n" |
265 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 262 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
266 | : "Ir" (i), "m" (v->counter) | 263 | : "Ir" (i), "m" (v->counter) |
267 | : "memory"); | 264 | : "memory"); |
268 | } else if (kernel_uses_llsc) { | 265 | } else if (kernel_uses_llsc) { |
@@ -280,9 +277,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
280 | " .set reorder \n" | 277 | " .set reorder \n" |
281 | "1: \n" | 278 | "1: \n" |
282 | " .set mips0 \n" | 279 | " .set mips0 \n" |
283 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 280 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
284 | : "Ir" (i), "m" (v->counter) | 281 | : "Ir" (i)); |
285 | : "memory"); | ||
286 | } else { | 282 | } else { |
287 | unsigned long flags; | 283 | unsigned long flags; |
288 | 284 | ||
@@ -430,8 +426,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
430 | " scd %0, %1 \n" | 426 | " scd %0, %1 \n" |
431 | " beqzl %0, 1b \n" | 427 | " beqzl %0, 1b \n" |
432 | " .set mips0 \n" | 428 | " .set mips0 \n" |
433 | : "=&r" (temp), "=m" (v->counter) | 429 | : "=&r" (temp), "+m" (v->counter) |
434 | : "Ir" (i), "m" (v->counter)); | 430 | : "Ir" (i)); |
435 | } else if (kernel_uses_llsc) { | 431 | } else if (kernel_uses_llsc) { |
436 | long temp; | 432 | long temp; |
437 | 433 | ||
@@ -442,8 +438,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
442 | " daddu %0, %2 \n" | 438 | " daddu %0, %2 \n" |
443 | " scd %0, %1 \n" | 439 | " scd %0, %1 \n" |
444 | " .set mips0 \n" | 440 | " .set mips0 \n" |
445 | : "=&r" (temp), "=m" (v->counter) | 441 | : "=&r" (temp), "+m" (v->counter) |
446 | : "Ir" (i), "m" (v->counter)); | 442 | : "Ir" (i)); |
447 | } while (unlikely(!temp)); | 443 | } while (unlikely(!temp)); |
448 | } else { | 444 | } else { |
449 | unsigned long flags; | 445 | unsigned long flags; |
@@ -473,8 +469,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
473 | " scd %0, %1 \n" | 469 | " scd %0, %1 \n" |
474 | " beqzl %0, 1b \n" | 470 | " beqzl %0, 1b \n" |
475 | " .set mips0 \n" | 471 | " .set mips0 \n" |
476 | : "=&r" (temp), "=m" (v->counter) | 472 | : "=&r" (temp), "+m" (v->counter) |
477 | : "Ir" (i), "m" (v->counter)); | 473 | : "Ir" (i)); |
478 | } else if (kernel_uses_llsc) { | 474 | } else if (kernel_uses_llsc) { |
479 | long temp; | 475 | long temp; |
480 | 476 | ||
@@ -485,8 +481,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
485 | " dsubu %0, %2 \n" | 481 | " dsubu %0, %2 \n" |
486 | " scd %0, %1 \n" | 482 | " scd %0, %1 \n" |
487 | " .set mips0 \n" | 483 | " .set mips0 \n" |
488 | : "=&r" (temp), "=m" (v->counter) | 484 | : "=&r" (temp), "+m" (v->counter) |
489 | : "Ir" (i), "m" (v->counter)); | 485 | : "Ir" (i)); |
490 | } while (unlikely(!temp)); | 486 | } while (unlikely(!temp)); |
491 | } else { | 487 | } else { |
492 | unsigned long flags; | 488 | unsigned long flags; |
@@ -517,9 +513,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
517 | " beqzl %0, 1b \n" | 513 | " beqzl %0, 1b \n" |
518 | " daddu %0, %1, %3 \n" | 514 | " daddu %0, %1, %3 \n" |
519 | " .set mips0 \n" | 515 | " .set mips0 \n" |
520 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 516 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
521 | : "Ir" (i), "m" (v->counter) | 517 | : "Ir" (i)); |
522 | : "memory"); | ||
523 | } else if (kernel_uses_llsc) { | 518 | } else if (kernel_uses_llsc) { |
524 | long temp; | 519 | long temp; |
525 | 520 | ||
@@ -649,9 +644,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
649 | " .set reorder \n" | 644 | " .set reorder \n" |
650 | "1: \n" | 645 | "1: \n" |
651 | " .set mips0 \n" | 646 | " .set mips0 \n" |
652 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 647 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) |
653 | : "Ir" (i), "m" (v->counter) | 648 | : "Ir" (i)); |
654 | : "memory"); | ||
655 | } else { | 649 | } else { |
656 | unsigned long flags; | 650 | unsigned long flags; |
657 | 651 | ||