diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2010-10-29 14:08:24 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2010-10-29 14:08:24 -0400 |
commit | 7837314d141c661c70bc13c5050694413ecfe14a (patch) | |
tree | de137b1d2945d2490bc1dcdf6d76eac6006f7ab0 /arch/mips/include/asm/atomic.h | |
parent | 18cb657ca1bafe635f368346a1676fb04c512edf (diff) |
MIPS: Get rid of branches to .subsections.
It was a nice optimization - on paper at least. In practice it results in
branches that may exceed the maximum legal range for a branch. We can
fight that problem with -ffunction-sections but -ffunction-sections again
is incompatible with -pg used by the function tracer.
By rewriting the loop around all simple LL/SC blocks to C we reduce the
amount of inline assembler and at the same time allow GCC to often fill
the branch delay slots with something sensible or whatever else clever
optimization it may have up in its sleeve.
With this optimization gone we also no longer need -ffunction-sections,
so drop it.
This optimization was originally introduced in 2.6.21, commit
5999eca25c1fd4b9b9aca7833b04d10fe4bc877d (linux-mips.org) rsp.
f65e4fa8e0c6022ad58dc88d1b11b12589ed7f9f (kernel.org).
Original fix for the issues which caused me to pull this optimization by
Paul Gortmaker <paul.gortmaker@windriver.com>.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/atomic.h')
-rw-r--r-- | arch/mips/include/asm/atomic.h | 208 |
1 files changed, 96 insertions, 112 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 47d87da379f9..4a02fe891ab6 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -64,18 +64,16 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
64 | } else if (kernel_uses_llsc) { | 64 | } else if (kernel_uses_llsc) { |
65 | int temp; | 65 | int temp; |
66 | 66 | ||
67 | __asm__ __volatile__( | 67 | do { |
68 | " .set mips3 \n" | 68 | __asm__ __volatile__( |
69 | "1: ll %0, %1 # atomic_add \n" | 69 | " .set mips3 \n" |
70 | " addu %0, %2 \n" | 70 | " ll %0, %1 # atomic_add \n" |
71 | " sc %0, %1 \n" | 71 | " addu %0, %2 \n" |
72 | " beqz %0, 2f \n" | 72 | " sc %0, %1 \n" |
73 | " .subsection 2 \n" | 73 | " .set mips0 \n" |
74 | "2: b 1b \n" | 74 | : "=&r" (temp), "=m" (v->counter) |
75 | " .previous \n" | 75 | : "Ir" (i), "m" (v->counter)); |
76 | " .set mips0 \n" | 76 | } while (unlikely(!temp)); |
77 | : "=&r" (temp), "=m" (v->counter) | ||
78 | : "Ir" (i), "m" (v->counter)); | ||
79 | } else { | 77 | } else { |
80 | unsigned long flags; | 78 | unsigned long flags; |
81 | 79 | ||
@@ -109,18 +107,16 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
109 | } else if (kernel_uses_llsc) { | 107 | } else if (kernel_uses_llsc) { |
110 | int temp; | 108 | int temp; |
111 | 109 | ||
112 | __asm__ __volatile__( | 110 | do { |
113 | " .set mips3 \n" | 111 | __asm__ __volatile__( |
114 | "1: ll %0, %1 # atomic_sub \n" | 112 | " .set mips3 \n" |
115 | " subu %0, %2 \n" | 113 | " ll %0, %1 # atomic_sub \n" |
116 | " sc %0, %1 \n" | 114 | " subu %0, %2 \n" |
117 | " beqz %0, 2f \n" | 115 | " sc %0, %1 \n" |
118 | " .subsection 2 \n" | 116 | " .set mips0 \n" |
119 | "2: b 1b \n" | 117 | : "=&r" (temp), "=m" (v->counter) |
120 | " .previous \n" | 118 | : "Ir" (i), "m" (v->counter)); |
121 | " .set mips0 \n" | 119 | } while (unlikely(!temp)); |
122 | : "=&r" (temp), "=m" (v->counter) | ||
123 | : "Ir" (i), "m" (v->counter)); | ||
124 | } else { | 120 | } else { |
125 | unsigned long flags; | 121 | unsigned long flags; |
126 | 122 | ||
@@ -156,20 +152,19 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
156 | } else if (kernel_uses_llsc) { | 152 | } else if (kernel_uses_llsc) { |
157 | int temp; | 153 | int temp; |
158 | 154 | ||
159 | __asm__ __volatile__( | 155 | do { |
160 | " .set mips3 \n" | 156 | __asm__ __volatile__( |
161 | "1: ll %1, %2 # atomic_add_return \n" | 157 | " .set mips3 \n" |
162 | " addu %0, %1, %3 \n" | 158 | " ll %1, %2 # atomic_add_return \n" |
163 | " sc %0, %2 \n" | 159 | " addu %0, %1, %3 \n" |
164 | " beqz %0, 2f \n" | 160 | " sc %0, %2 \n" |
165 | " addu %0, %1, %3 \n" | 161 | " .set mips0 \n" |
166 | " .subsection 2 \n" | 162 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
167 | "2: b 1b \n" | 163 | : "Ir" (i), "m" (v->counter) |
168 | " .previous \n" | 164 | : "memory"); |
169 | " .set mips0 \n" | 165 | } while (unlikely(!result)); |
170 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 166 | |
171 | : "Ir" (i), "m" (v->counter) | 167 | result = temp + i; |
172 | : "memory"); | ||
173 | } else { | 168 | } else { |
174 | unsigned long flags; | 169 | unsigned long flags; |
175 | 170 | ||
@@ -205,23 +200,24 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
205 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 200 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
206 | : "Ir" (i), "m" (v->counter) | 201 | : "Ir" (i), "m" (v->counter) |
207 | : "memory"); | 202 | : "memory"); |
203 | |||
204 | result = temp - i; | ||
208 | } else if (kernel_uses_llsc) { | 205 | } else if (kernel_uses_llsc) { |
209 | int temp; | 206 | int temp; |
210 | 207 | ||
211 | __asm__ __volatile__( | 208 | do { |
212 | " .set mips3 \n" | 209 | __asm__ __volatile__( |
213 | "1: ll %1, %2 # atomic_sub_return \n" | 210 | " .set mips3 \n" |
214 | " subu %0, %1, %3 \n" | 211 | " ll %1, %2 # atomic_sub_return \n" |
215 | " sc %0, %2 \n" | 212 | " subu %0, %1, %3 \n" |
216 | " beqz %0, 2f \n" | 213 | " sc %0, %2 \n" |
217 | " subu %0, %1, %3 \n" | 214 | " .set mips0 \n" |
218 | " .subsection 2 \n" | 215 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
219 | "2: b 1b \n" | 216 | : "Ir" (i), "m" (v->counter) |
220 | " .previous \n" | 217 | : "memory"); |
221 | " .set mips0 \n" | 218 | } while (unlikely(!result)); |
222 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 219 | |
223 | : "Ir" (i), "m" (v->counter) | 220 | result = temp - i; |
224 | : "memory"); | ||
225 | } else { | 221 | } else { |
226 | unsigned long flags; | 222 | unsigned long flags; |
227 | 223 | ||
@@ -279,12 +275,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
279 | " bltz %0, 1f \n" | 275 | " bltz %0, 1f \n" |
280 | " sc %0, %2 \n" | 276 | " sc %0, %2 \n" |
281 | " .set noreorder \n" | 277 | " .set noreorder \n" |
282 | " beqz %0, 2f \n" | 278 | " beqz %0, 1b \n" |
283 | " subu %0, %1, %3 \n" | 279 | " subu %0, %1, %3 \n" |
284 | " .set reorder \n" | 280 | " .set reorder \n" |
285 | " .subsection 2 \n" | ||
286 | "2: b 1b \n" | ||
287 | " .previous \n" | ||
288 | "1: \n" | 281 | "1: \n" |
289 | " .set mips0 \n" | 282 | " .set mips0 \n" |
290 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 283 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
@@ -443,18 +436,16 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
443 | } else if (kernel_uses_llsc) { | 436 | } else if (kernel_uses_llsc) { |
444 | long temp; | 437 | long temp; |
445 | 438 | ||
446 | __asm__ __volatile__( | 439 | do { |
447 | " .set mips3 \n" | 440 | __asm__ __volatile__( |
448 | "1: lld %0, %1 # atomic64_add \n" | 441 | " .set mips3 \n" |
449 | " daddu %0, %2 \n" | 442 | " lld %0, %1 # atomic64_add \n" |
450 | " scd %0, %1 \n" | 443 | " daddu %0, %2 \n" |
451 | " beqz %0, 2f \n" | 444 | " scd %0, %1 \n" |
452 | " .subsection 2 \n" | 445 | " .set mips0 \n" |
453 | "2: b 1b \n" | 446 | : "=&r" (temp), "=m" (v->counter) |
454 | " .previous \n" | 447 | : "Ir" (i), "m" (v->counter)); |
455 | " .set mips0 \n" | 448 | } while (unlikely(!temp)); |
456 | : "=&r" (temp), "=m" (v->counter) | ||
457 | : "Ir" (i), "m" (v->counter)); | ||
458 | } else { | 449 | } else { |
459 | unsigned long flags; | 450 | unsigned long flags; |
460 | 451 | ||
@@ -488,18 +479,16 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
488 | } else if (kernel_uses_llsc) { | 479 | } else if (kernel_uses_llsc) { |
489 | long temp; | 480 | long temp; |
490 | 481 | ||
491 | __asm__ __volatile__( | 482 | do { |
492 | " .set mips3 \n" | 483 | __asm__ __volatile__( |
493 | "1: lld %0, %1 # atomic64_sub \n" | 484 | " .set mips3 \n" |
494 | " dsubu %0, %2 \n" | 485 | " lld %0, %1 # atomic64_sub \n" |
495 | " scd %0, %1 \n" | 486 | " dsubu %0, %2 \n" |
496 | " beqz %0, 2f \n" | 487 | " scd %0, %1 \n" |
497 | " .subsection 2 \n" | 488 | " .set mips0 \n" |
498 | "2: b 1b \n" | 489 | : "=&r" (temp), "=m" (v->counter) |
499 | " .previous \n" | 490 | : "Ir" (i), "m" (v->counter)); |
500 | " .set mips0 \n" | 491 | } while (unlikely(!temp)); |
501 | : "=&r" (temp), "=m" (v->counter) | ||
502 | : "Ir" (i), "m" (v->counter)); | ||
503 | } else { | 492 | } else { |
504 | unsigned long flags; | 493 | unsigned long flags; |
505 | 494 | ||
@@ -535,20 +524,19 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
535 | } else if (kernel_uses_llsc) { | 524 | } else if (kernel_uses_llsc) { |
536 | long temp; | 525 | long temp; |
537 | 526 | ||
538 | __asm__ __volatile__( | 527 | do { |
539 | " .set mips3 \n" | 528 | __asm__ __volatile__( |
540 | "1: lld %1, %2 # atomic64_add_return \n" | 529 | " .set mips3 \n" |
541 | " daddu %0, %1, %3 \n" | 530 | " lld %1, %2 # atomic64_add_return \n" |
542 | " scd %0, %2 \n" | 531 | " daddu %0, %1, %3 \n" |
543 | " beqz %0, 2f \n" | 532 | " scd %0, %2 \n" |
544 | " daddu %0, %1, %3 \n" | 533 | " .set mips0 \n" |
545 | " .subsection 2 \n" | 534 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
546 | "2: b 1b \n" | 535 | : "Ir" (i), "m" (v->counter) |
547 | " .previous \n" | 536 | : "memory"); |
548 | " .set mips0 \n" | 537 | } while (unlikely(!result)); |
549 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 538 | |
550 | : "Ir" (i), "m" (v->counter) | 539 | result = temp + i; |
551 | : "memory"); | ||
552 | } else { | 540 | } else { |
553 | unsigned long flags; | 541 | unsigned long flags; |
554 | 542 | ||
@@ -587,20 +575,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
587 | } else if (kernel_uses_llsc) { | 575 | } else if (kernel_uses_llsc) { |
588 | long temp; | 576 | long temp; |
589 | 577 | ||
590 | __asm__ __volatile__( | 578 | do { |
591 | " .set mips3 \n" | 579 | __asm__ __volatile__( |
592 | "1: lld %1, %2 # atomic64_sub_return \n" | 580 | " .set mips3 \n" |
593 | " dsubu %0, %1, %3 \n" | 581 | " lld %1, %2 # atomic64_sub_return \n" |
594 | " scd %0, %2 \n" | 582 | " dsubu %0, %1, %3 \n" |
595 | " beqz %0, 2f \n" | 583 | " scd %0, %2 \n" |
596 | " dsubu %0, %1, %3 \n" | 584 | " .set mips0 \n" |
597 | " .subsection 2 \n" | 585 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
598 | "2: b 1b \n" | 586 | : "Ir" (i), "m" (v->counter) |
599 | " .previous \n" | 587 | : "memory"); |
600 | " .set mips0 \n" | 588 | } while (unlikely(!result)); |
601 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 589 | |
602 | : "Ir" (i), "m" (v->counter) | 590 | result = temp - i; |
603 | : "memory"); | ||
604 | } else { | 591 | } else { |
605 | unsigned long flags; | 592 | unsigned long flags; |
606 | 593 | ||
@@ -658,12 +645,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
658 | " bltz %0, 1f \n" | 645 | " bltz %0, 1f \n" |
659 | " scd %0, %2 \n" | 646 | " scd %0, %2 \n" |
660 | " .set noreorder \n" | 647 | " .set noreorder \n" |
661 | " beqz %0, 2f \n" | 648 | " beqz %0, 1b \n" |
662 | " dsubu %0, %1, %3 \n" | 649 | " dsubu %0, %1, %3 \n" |
663 | " .set reorder \n" | 650 | " .set reorder \n" |
664 | " .subsection 2 \n" | ||
665 | "2: b 1b \n" | ||
666 | " .previous \n" | ||
667 | "1: \n" | 651 | "1: \n" |
668 | " .set mips0 \n" | 652 | " .set mips0 \n" |
669 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 653 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |