diff options
| author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-04 22:23:34 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-04 22:23:34 -0500 |
| commit | 91f433cacc9d1ae95ae46ce26d7bcf3a724c72d0 (patch) | |
| tree | 058a5961548d489fdaffdbcfb7b34893946d37cf /include | |
| parent | 15a4cb9c25df05a5d4844e80a1aea83d66165868 (diff) | |
| parent | 0004a9dfeaa709a7f853487aba19932c9b1a87c8 (diff) | |
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
[MIPS] Cleanup memory barriers for weakly ordered systems.
[MIPS] Alchemy: Automatically enable CONFIG_RESOURCES_64BIT for PCI configs.
[MIPS] Unify csum_partial.S
[MIPS] SWARM: Fix a typo in #error directives
[MIPS] Fix atomic.h build errors.
[MIPS] Use SYSVIPC_COMPAT to fix various problems on N32
[MIPS] klconfig add missing bracket
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-mips/atomic.h | 39 | ||||
| -rw-r--r-- | include/asm-mips/barrier.h | 132 | ||||
| -rw-r--r-- | include/asm-mips/bitops.h | 27 | ||||
| -rw-r--r-- | include/asm-mips/compat.h | 68 | ||||
| -rw-r--r-- | include/asm-mips/futex.h | 22 | ||||
| -rw-r--r-- | include/asm-mips/sn/klconfig.h | 2 | ||||
| -rw-r--r-- | include/asm-mips/spinlock.h | 53 | ||||
| -rw-r--r-- | include/asm-mips/system.h | 156 |
8 files changed, 288 insertions, 211 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 7978d8e11647..c1a2409bb52a 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #define _ASM_ATOMIC_H | 15 | #define _ASM_ATOMIC_H |
| 16 | 16 | ||
| 17 | #include <linux/irqflags.h> | 17 | #include <linux/irqflags.h> |
| 18 | #include <asm/barrier.h> | ||
| 18 | #include <asm/cpu-features.h> | 19 | #include <asm/cpu-features.h> |
| 19 | #include <asm/war.h> | 20 | #include <asm/war.h> |
| 20 | 21 | ||
| @@ -130,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
| 130 | { | 131 | { |
| 131 | unsigned long result; | 132 | unsigned long result; |
| 132 | 133 | ||
| 134 | smp_mb(); | ||
| 135 | |||
| 133 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 136 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 134 | unsigned long temp; | 137 | unsigned long temp; |
| 135 | 138 | ||
| @@ -140,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
| 140 | " sc %0, %2 \n" | 143 | " sc %0, %2 \n" |
| 141 | " beqzl %0, 1b \n" | 144 | " beqzl %0, 1b \n" |
| 142 | " addu %0, %1, %3 \n" | 145 | " addu %0, %1, %3 \n" |
| 143 | " sync \n" | ||
| 144 | " .set mips0 \n" | 146 | " .set mips0 \n" |
| 145 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 147 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 146 | : "Ir" (i), "m" (v->counter) | 148 | : "Ir" (i), "m" (v->counter) |
| @@ -155,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
| 155 | " sc %0, %2 \n" | 157 | " sc %0, %2 \n" |
| 156 | " beqz %0, 1b \n" | 158 | " beqz %0, 1b \n" |
| 157 | " addu %0, %1, %3 \n" | 159 | " addu %0, %1, %3 \n" |
| 158 | " sync \n" | ||
| 159 | " .set mips0 \n" | 160 | " .set mips0 \n" |
| 160 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 161 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 161 | : "Ir" (i), "m" (v->counter) | 162 | : "Ir" (i), "m" (v->counter) |
| @@ -170,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
| 170 | local_irq_restore(flags); | 171 | local_irq_restore(flags); |
| 171 | } | 172 | } |
| 172 | 173 | ||
| 174 | smp_mb(); | ||
| 175 | |||
| 173 | return result; | 176 | return result; |
| 174 | } | 177 | } |
| 175 | 178 | ||
| @@ -177,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
| 177 | { | 180 | { |
| 178 | unsigned long result; | 181 | unsigned long result; |
| 179 | 182 | ||
| 183 | smp_mb(); | ||
| 184 | |||
| 180 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 185 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 181 | unsigned long temp; | 186 | unsigned long temp; |
| 182 | 187 | ||
| @@ -187,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
| 187 | " sc %0, %2 \n" | 192 | " sc %0, %2 \n" |
| 188 | " beqzl %0, 1b \n" | 193 | " beqzl %0, 1b \n" |
| 189 | " subu %0, %1, %3 \n" | 194 | " subu %0, %1, %3 \n" |
| 190 | " sync \n" | ||
| 191 | " .set mips0 \n" | 195 | " .set mips0 \n" |
| 192 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 196 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 193 | : "Ir" (i), "m" (v->counter) | 197 | : "Ir" (i), "m" (v->counter) |
| @@ -202,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
| 202 | " sc %0, %2 \n" | 206 | " sc %0, %2 \n" |
| 203 | " beqz %0, 1b \n" | 207 | " beqz %0, 1b \n" |
| 204 | " subu %0, %1, %3 \n" | 208 | " subu %0, %1, %3 \n" |
| 205 | " sync \n" | ||
| 206 | " .set mips0 \n" | 209 | " .set mips0 \n" |
| 207 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 210 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 208 | : "Ir" (i), "m" (v->counter) | 211 | : "Ir" (i), "m" (v->counter) |
| @@ -217,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
| 217 | local_irq_restore(flags); | 220 | local_irq_restore(flags); |
| 218 | } | 221 | } |
| 219 | 222 | ||
| 223 | smp_mb(); | ||
| 224 | |||
| 220 | return result; | 225 | return result; |
| 221 | } | 226 | } |
| 222 | 227 | ||
| @@ -232,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
| 232 | { | 237 | { |
| 233 | unsigned long result; | 238 | unsigned long result; |
| 234 | 239 | ||
| 240 | smp_mb(); | ||
| 241 | |||
| 235 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 242 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 236 | unsigned long temp; | 243 | unsigned long temp; |
| 237 | 244 | ||
| @@ -245,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
| 245 | " beqzl %0, 1b \n" | 252 | " beqzl %0, 1b \n" |
| 246 | " subu %0, %1, %3 \n" | 253 | " subu %0, %1, %3 \n" |
| 247 | " .set reorder \n" | 254 | " .set reorder \n" |
| 248 | " sync \n" | ||
| 249 | "1: \n" | 255 | "1: \n" |
| 250 | " .set mips0 \n" | 256 | " .set mips0 \n" |
| 251 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 257 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| @@ -264,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
| 264 | " beqz %0, 1b \n" | 270 | " beqz %0, 1b \n" |
| 265 | " subu %0, %1, %3 \n" | 271 | " subu %0, %1, %3 \n" |
| 266 | " .set reorder \n" | 272 | " .set reorder \n" |
| 267 | " sync \n" | ||
| 268 | "1: \n" | 273 | "1: \n" |
| 269 | " .set mips0 \n" | 274 | " .set mips0 \n" |
| 270 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 275 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| @@ -281,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
| 281 | local_irq_restore(flags); | 286 | local_irq_restore(flags); |
| 282 | } | 287 | } |
| 283 | 288 | ||
| 289 | smp_mb(); | ||
| 290 | |||
| 284 | return result; | 291 | return result; |
| 285 | } | 292 | } |
| 286 | 293 | ||
| @@ -375,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
| 375 | 382 | ||
| 376 | #ifdef CONFIG_64BIT | 383 | #ifdef CONFIG_64BIT |
| 377 | 384 | ||
| 378 | typedef struct { volatile __s64 counter; } atomic64_t; | 385 | typedef struct { volatile long counter; } atomic64_t; |
| 379 | 386 | ||
| 380 | #define ATOMIC64_INIT(i) { (i) } | 387 | #define ATOMIC64_INIT(i) { (i) } |
| 381 | 388 | ||
| @@ -484,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
| 484 | { | 491 | { |
| 485 | unsigned long result; | 492 | unsigned long result; |
| 486 | 493 | ||
| 494 | smp_mb(); | ||
| 495 | |||
| 487 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 496 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 488 | unsigned long temp; | 497 | unsigned long temp; |
| 489 | 498 | ||
| @@ -494,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
| 494 | " scd %0, %2 \n" | 503 | " scd %0, %2 \n" |
| 495 | " beqzl %0, 1b \n" | 504 | " beqzl %0, 1b \n" |
| 496 | " addu %0, %1, %3 \n" | 505 | " addu %0, %1, %3 \n" |
| 497 | " sync \n" | ||
| 498 | " .set mips0 \n" | 506 | " .set mips0 \n" |
| 499 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 507 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 500 | : "Ir" (i), "m" (v->counter) | 508 | : "Ir" (i), "m" (v->counter) |
| @@ -509,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
| 509 | " scd %0, %2 \n" | 517 | " scd %0, %2 \n" |
| 510 | " beqz %0, 1b \n" | 518 | " beqz %0, 1b \n" |
| 511 | " addu %0, %1, %3 \n" | 519 | " addu %0, %1, %3 \n" |
| 512 | " sync \n" | ||
| 513 | " .set mips0 \n" | 520 | " .set mips0 \n" |
| 514 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 521 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 515 | : "Ir" (i), "m" (v->counter) | 522 | : "Ir" (i), "m" (v->counter) |
| @@ -524,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
| 524 | local_irq_restore(flags); | 531 | local_irq_restore(flags); |
| 525 | } | 532 | } |
| 526 | 533 | ||
| 534 | smp_mb(); | ||
| 535 | |||
| 527 | return result; | 536 | return result; |
| 528 | } | 537 | } |
| 529 | 538 | ||
| @@ -531,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
| 531 | { | 540 | { |
| 532 | unsigned long result; | 541 | unsigned long result; |
| 533 | 542 | ||
| 543 | smp_mb(); | ||
| 544 | |||
| 534 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 545 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 535 | unsigned long temp; | 546 | unsigned long temp; |
| 536 | 547 | ||
| @@ -541,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
| 541 | " scd %0, %2 \n" | 552 | " scd %0, %2 \n" |
| 542 | " beqzl %0, 1b \n" | 553 | " beqzl %0, 1b \n" |
| 543 | " subu %0, %1, %3 \n" | 554 | " subu %0, %1, %3 \n" |
| 544 | " sync \n" | ||
| 545 | " .set mips0 \n" | 555 | " .set mips0 \n" |
| 546 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 556 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 547 | : "Ir" (i), "m" (v->counter) | 557 | : "Ir" (i), "m" (v->counter) |
| @@ -556,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
| 556 | " scd %0, %2 \n" | 566 | " scd %0, %2 \n" |
| 557 | " beqz %0, 1b \n" | 567 | " beqz %0, 1b \n" |
| 558 | " subu %0, %1, %3 \n" | 568 | " subu %0, %1, %3 \n" |
| 559 | " sync \n" | ||
| 560 | " .set mips0 \n" | 569 | " .set mips0 \n" |
| 561 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 570 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| 562 | : "Ir" (i), "m" (v->counter) | 571 | : "Ir" (i), "m" (v->counter) |
| @@ -571,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
| 571 | local_irq_restore(flags); | 580 | local_irq_restore(flags); |
| 572 | } | 581 | } |
| 573 | 582 | ||
| 583 | smp_mb(); | ||
| 584 | |||
| 574 | return result; | 585 | return result; |
| 575 | } | 586 | } |
| 576 | 587 | ||
| @@ -586,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
| 586 | { | 597 | { |
| 587 | unsigned long result; | 598 | unsigned long result; |
| 588 | 599 | ||
| 600 | smp_mb(); | ||
| 601 | |||
| 589 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 602 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 590 | unsigned long temp; | 603 | unsigned long temp; |
| 591 | 604 | ||
| @@ -599,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
| 599 | " beqzl %0, 1b \n" | 612 | " beqzl %0, 1b \n" |
| 600 | " dsubu %0, %1, %3 \n" | 613 | " dsubu %0, %1, %3 \n" |
| 601 | " .set reorder \n" | 614 | " .set reorder \n" |
| 602 | " sync \n" | ||
| 603 | "1: \n" | 615 | "1: \n" |
| 604 | " .set mips0 \n" | 616 | " .set mips0 \n" |
| 605 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 617 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| @@ -618,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
| 618 | " beqz %0, 1b \n" | 630 | " beqz %0, 1b \n" |
| 619 | " dsubu %0, %1, %3 \n" | 631 | " dsubu %0, %1, %3 \n" |
| 620 | " .set reorder \n" | 632 | " .set reorder \n" |
| 621 | " sync \n" | ||
| 622 | "1: \n" | 633 | "1: \n" |
| 623 | " .set mips0 \n" | 634 | " .set mips0 \n" |
| 624 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 635 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
| @@ -635,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
| 635 | local_irq_restore(flags); | 646 | local_irq_restore(flags); |
| 636 | } | 647 | } |
| 637 | 648 | ||
| 649 | smp_mb(); | ||
| 650 | |||
| 638 | return result; | 651 | return result; |
| 639 | } | 652 | } |
| 640 | 653 | ||
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h new file mode 100644 index 000000000000..ed82631b0017 --- /dev/null +++ b/include/asm-mips/barrier.h | |||
| @@ -0,0 +1,132 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) | ||
| 7 | */ | ||
| 8 | #ifndef __ASM_BARRIER_H | ||
| 9 | #define __ASM_BARRIER_H | ||
| 10 | |||
| 11 | /* | ||
| 12 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
| 13 | * depend on. | ||
| 14 | * | ||
| 15 | * No data-dependent reads from memory-like regions are ever reordered | ||
| 16 | * over this barrier. All reads preceding this primitive are guaranteed | ||
| 17 | * to access memory (but not necessarily other CPUs' caches) before any | ||
| 18 | * reads following this primitive that depend on the data return by | ||
| 19 | * any of the preceding reads. This primitive is much lighter weight than | ||
| 20 | * rmb() on most CPUs, and is never heavier weight than is | ||
| 21 | * rmb(). | ||
| 22 | * | ||
| 23 | * These ordering constraints are respected by both the local CPU | ||
| 24 | * and the compiler. | ||
| 25 | * | ||
| 26 | * Ordering is not guaranteed by anything other than these primitives, | ||
| 27 | * not even by data dependencies. See the documentation for | ||
| 28 | * memory_barrier() for examples and URLs to more information. | ||
| 29 | * | ||
| 30 | * For example, the following code would force ordering (the initial | ||
| 31 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
| 32 | * | ||
| 33 | * <programlisting> | ||
| 34 | * CPU 0 CPU 1 | ||
| 35 | * | ||
| 36 | * b = 2; | ||
| 37 | * memory_barrier(); | ||
| 38 | * p = &b; q = p; | ||
| 39 | * read_barrier_depends(); | ||
| 40 | * d = *q; | ||
| 41 | * </programlisting> | ||
| 42 | * | ||
| 43 | * because the read of "*q" depends on the read of "p" and these | ||
| 44 | * two reads are separated by a read_barrier_depends(). However, | ||
| 45 | * the following code, with the same initial values for "a" and "b": | ||
| 46 | * | ||
| 47 | * <programlisting> | ||
| 48 | * CPU 0 CPU 1 | ||
| 49 | * | ||
| 50 | * a = 2; | ||
| 51 | * memory_barrier(); | ||
| 52 | * b = 3; y = b; | ||
| 53 | * read_barrier_depends(); | ||
| 54 | * x = a; | ||
| 55 | * </programlisting> | ||
| 56 | * | ||
| 57 | * does not enforce ordering, since there is no data dependency between | ||
| 58 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
| 59 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
| 60 | * in cases like this where there are no data dependencies. | ||
| 61 | */ | ||
| 62 | |||
| 63 | #define read_barrier_depends() do { } while(0) | ||
| 64 | #define smp_read_barrier_depends() do { } while(0) | ||
| 65 | |||
| 66 | #ifdef CONFIG_CPU_HAS_SYNC | ||
| 67 | #define __sync() \ | ||
| 68 | __asm__ __volatile__( \ | ||
| 69 | ".set push\n\t" \ | ||
| 70 | ".set noreorder\n\t" \ | ||
| 71 | ".set mips2\n\t" \ | ||
| 72 | "sync\n\t" \ | ||
| 73 | ".set pop" \ | ||
| 74 | : /* no output */ \ | ||
| 75 | : /* no input */ \ | ||
| 76 | : "memory") | ||
| 77 | #else | ||
| 78 | #define __sync() do { } while(0) | ||
| 79 | #endif | ||
| 80 | |||
| 81 | #define __fast_iob() \ | ||
| 82 | __asm__ __volatile__( \ | ||
| 83 | ".set push\n\t" \ | ||
| 84 | ".set noreorder\n\t" \ | ||
| 85 | "lw $0,%0\n\t" \ | ||
| 86 | "nop\n\t" \ | ||
| 87 | ".set pop" \ | ||
| 88 | : /* no output */ \ | ||
| 89 | : "m" (*(int *)CKSEG1) \ | ||
| 90 | : "memory") | ||
| 91 | |||
| 92 | #define fast_wmb() __sync() | ||
| 93 | #define fast_rmb() __sync() | ||
| 94 | #define fast_mb() __sync() | ||
| 95 | #define fast_iob() \ | ||
| 96 | do { \ | ||
| 97 | __sync(); \ | ||
| 98 | __fast_iob(); \ | ||
| 99 | } while (0) | ||
| 100 | |||
| 101 | #ifdef CONFIG_CPU_HAS_WB | ||
| 102 | |||
| 103 | #include <asm/wbflush.h> | ||
| 104 | |||
| 105 | #define wmb() fast_wmb() | ||
| 106 | #define rmb() fast_rmb() | ||
| 107 | #define mb() wbflush() | ||
| 108 | #define iob() wbflush() | ||
| 109 | |||
| 110 | #else /* !CONFIG_CPU_HAS_WB */ | ||
| 111 | |||
| 112 | #define wmb() fast_wmb() | ||
| 113 | #define rmb() fast_rmb() | ||
| 114 | #define mb() fast_mb() | ||
| 115 | #define iob() fast_iob() | ||
| 116 | |||
| 117 | #endif /* !CONFIG_CPU_HAS_WB */ | ||
| 118 | |||
| 119 | #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) | ||
| 120 | #define __WEAK_ORDERING_MB " sync \n" | ||
| 121 | #else | ||
| 122 | #define __WEAK_ORDERING_MB " \n" | ||
| 123 | #endif | ||
| 124 | |||
| 125 | #define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") | ||
| 126 | #define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") | ||
| 127 | #define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") | ||
| 128 | |||
| 129 | #define set_mb(var, value) \ | ||
| 130 | do { var = value; smp_mb(); } while (0) | ||
| 131 | |||
| 132 | #endif /* __ASM_BARRIER_H */ | ||
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index b9007411b60f..06445de1324b 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) | 6 | * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) |
| 7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ | 8 | */ |
| 9 | #ifndef _ASM_BITOPS_H | 9 | #ifndef _ASM_BITOPS_H |
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| 13 | #include <linux/irqflags.h> | 13 | #include <linux/irqflags.h> |
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <asm/barrier.h> | ||
| 15 | #include <asm/bug.h> | 16 | #include <asm/bug.h> |
| 16 | #include <asm/byteorder.h> /* sigh ... */ | 17 | #include <asm/byteorder.h> /* sigh ... */ |
| 17 | #include <asm/cpu-features.h> | 18 | #include <asm/cpu-features.h> |
| @@ -204,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr, | |||
| 204 | " " __SC "%2, %1 \n" | 205 | " " __SC "%2, %1 \n" |
| 205 | " beqzl %2, 1b \n" | 206 | " beqzl %2, 1b \n" |
| 206 | " and %2, %0, %3 \n" | 207 | " and %2, %0, %3 \n" |
| 207 | #ifdef CONFIG_SMP | ||
| 208 | " sync \n" | ||
| 209 | #endif | ||
| 210 | " .set mips0 \n" | 208 | " .set mips0 \n" |
| 211 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 209 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 212 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 210 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| @@ -226,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr, | |||
| 226 | " " __SC "%2, %1 \n" | 224 | " " __SC "%2, %1 \n" |
| 227 | " beqz %2, 1b \n" | 225 | " beqz %2, 1b \n" |
| 228 | " and %2, %0, %3 \n" | 226 | " and %2, %0, %3 \n" |
| 229 | #ifdef CONFIG_SMP | ||
| 230 | " sync \n" | ||
| 231 | #endif | ||
| 232 | " .set pop \n" | 227 | " .set pop \n" |
| 233 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 228 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 234 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 229 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| @@ -250,6 +245,8 @@ static inline int test_and_set_bit(unsigned long nr, | |||
| 250 | 245 | ||
| 251 | return retval; | 246 | return retval; |
| 252 | } | 247 | } |
| 248 | |||
| 249 | smp_mb(); | ||
| 253 | } | 250 | } |
| 254 | 251 | ||
| 255 | /* | 252 | /* |
| @@ -275,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
| 275 | " " __SC "%2, %1 \n" | 272 | " " __SC "%2, %1 \n" |
| 276 | " beqzl %2, 1b \n" | 273 | " beqzl %2, 1b \n" |
| 277 | " and %2, %0, %3 \n" | 274 | " and %2, %0, %3 \n" |
| 278 | #ifdef CONFIG_SMP | ||
| 279 | " sync \n" | ||
| 280 | #endif | ||
| 281 | " .set mips0 \n" | 275 | " .set mips0 \n" |
| 282 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 276 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 283 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 277 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| @@ -298,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
| 298 | " " __SC "%2, %1 \n" | 292 | " " __SC "%2, %1 \n" |
| 299 | " beqz %2, 1b \n" | 293 | " beqz %2, 1b \n" |
| 300 | " and %2, %0, %3 \n" | 294 | " and %2, %0, %3 \n" |
| 301 | #ifdef CONFIG_SMP | ||
| 302 | " sync \n" | ||
| 303 | #endif | ||
| 304 | " .set pop \n" | 295 | " .set pop \n" |
| 305 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 296 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 306 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 297 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| @@ -322,6 +313,8 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
| 322 | 313 | ||
| 323 | return retval; | 314 | return retval; |
| 324 | } | 315 | } |
| 316 | |||
| 317 | smp_mb(); | ||
| 325 | } | 318 | } |
| 326 | 319 | ||
| 327 | /* | 320 | /* |
| @@ -346,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr, | |||
| 346 | " " __SC "%2, %1 \n" | 339 | " " __SC "%2, %1 \n" |
| 347 | " beqzl %2, 1b \n" | 340 | " beqzl %2, 1b \n" |
| 348 | " and %2, %0, %3 \n" | 341 | " and %2, %0, %3 \n" |
| 349 | #ifdef CONFIG_SMP | ||
| 350 | " sync \n" | ||
| 351 | #endif | ||
| 352 | " .set mips0 \n" | 342 | " .set mips0 \n" |
| 353 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 343 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 354 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 344 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| @@ -368,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr, | |||
| 368 | " " __SC "\t%2, %1 \n" | 358 | " " __SC "\t%2, %1 \n" |
| 369 | " beqz %2, 1b \n" | 359 | " beqz %2, 1b \n" |
| 370 | " and %2, %0, %3 \n" | 360 | " and %2, %0, %3 \n" |
| 371 | #ifdef CONFIG_SMP | ||
| 372 | " sync \n" | ||
| 373 | #endif | ||
| 374 | " .set pop \n" | 361 | " .set pop \n" |
| 375 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 362 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 376 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 363 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| @@ -391,6 +378,8 @@ static inline int test_and_change_bit(unsigned long nr, | |||
| 391 | 378 | ||
| 392 | return retval; | 379 | return retval; |
| 393 | } | 380 | } |
| 381 | |||
| 382 | smp_mb(); | ||
| 394 | } | 383 | } |
| 395 | 384 | ||
| 396 | #include <asm-generic/bitops/non-atomic.h> | 385 | #include <asm-generic/bitops/non-atomic.h> |
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h index 900f472fdd2b..55a0152feb08 100644 --- a/include/asm-mips/compat.h +++ b/include/asm-mips/compat.h | |||
| @@ -32,6 +32,7 @@ typedef struct { | |||
| 32 | s32 val[2]; | 32 | s32 val[2]; |
| 33 | } compat_fsid_t; | 33 | } compat_fsid_t; |
| 34 | typedef s32 compat_timer_t; | 34 | typedef s32 compat_timer_t; |
| 35 | typedef s32 compat_key_t; | ||
| 35 | 36 | ||
| 36 | typedef s32 compat_int_t; | 37 | typedef s32 compat_int_t; |
| 37 | typedef s32 compat_long_t; | 38 | typedef s32 compat_long_t; |
| @@ -146,4 +147,71 @@ static inline void __user *compat_alloc_user_space(long len) | |||
| 146 | return (void __user *) (regs->regs[29] - len); | 147 | return (void __user *) (regs->regs[29] - len); |
| 147 | } | 148 | } |
| 148 | 149 | ||
| 150 | struct compat_ipc64_perm { | ||
| 151 | compat_key_t key; | ||
| 152 | __compat_uid32_t uid; | ||
| 153 | __compat_gid32_t gid; | ||
| 154 | __compat_uid32_t cuid; | ||
| 155 | __compat_gid32_t cgid; | ||
| 156 | compat_mode_t mode; | ||
| 157 | unsigned short seq; | ||
| 158 | unsigned short __pad2; | ||
| 159 | compat_ulong_t __unused1; | ||
| 160 | compat_ulong_t __unused2; | ||
| 161 | }; | ||
| 162 | |||
| 163 | struct compat_semid64_ds { | ||
| 164 | struct compat_ipc64_perm sem_perm; | ||
| 165 | compat_time_t sem_otime; | ||
| 166 | compat_time_t sem_ctime; | ||
| 167 | compat_ulong_t sem_nsems; | ||
| 168 | compat_ulong_t __unused1; | ||
| 169 | compat_ulong_t __unused2; | ||
| 170 | }; | ||
| 171 | |||
| 172 | struct compat_msqid64_ds { | ||
| 173 | struct compat_ipc64_perm msg_perm; | ||
| 174 | #ifndef CONFIG_CPU_LITTLE_ENDIAN | ||
| 175 | compat_ulong_t __unused1; | ||
| 176 | #endif | ||
| 177 | compat_time_t msg_stime; | ||
| 178 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
| 179 | compat_ulong_t __unused1; | ||
| 180 | #endif | ||
| 181 | #ifndef CONFIG_CPU_LITTLE_ENDIAN | ||
| 182 | compat_ulong_t __unused2; | ||
| 183 | #endif | ||
| 184 | compat_time_t msg_rtime; | ||
| 185 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
| 186 | compat_ulong_t __unused2; | ||
| 187 | #endif | ||
| 188 | #ifndef CONFIG_CPU_LITTLE_ENDIAN | ||
| 189 | compat_ulong_t __unused3; | ||
| 190 | #endif | ||
| 191 | compat_time_t msg_ctime; | ||
| 192 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
| 193 | compat_ulong_t __unused3; | ||
| 194 | #endif | ||
| 195 | compat_ulong_t msg_cbytes; | ||
| 196 | compat_ulong_t msg_qnum; | ||
| 197 | compat_ulong_t msg_qbytes; | ||
| 198 | compat_pid_t msg_lspid; | ||
| 199 | compat_pid_t msg_lrpid; | ||
| 200 | compat_ulong_t __unused4; | ||
| 201 | compat_ulong_t __unused5; | ||
| 202 | }; | ||
| 203 | |||
| 204 | struct compat_shmid64_ds { | ||
| 205 | struct compat_ipc64_perm shm_perm; | ||
| 206 | compat_size_t shm_segsz; | ||
| 207 | compat_time_t shm_atime; | ||
| 208 | compat_time_t shm_dtime; | ||
| 209 | compat_time_t shm_ctime; | ||
| 210 | compat_pid_t shm_cpid; | ||
| 211 | compat_pid_t shm_lpid; | ||
| 212 | compat_ulong_t shm_nattch; | ||
| 213 | compat_ulong_t __unused1; | ||
| 214 | compat_ulong_t __unused2; | ||
| 215 | }; | ||
| 216 | |||
| 149 | #endif /* _ASM_COMPAT_H */ | 217 | #endif /* _ASM_COMPAT_H */ |
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h index ed023eae0674..927a216bd530 100644 --- a/include/asm-mips/futex.h +++ b/include/asm-mips/futex.h | |||
| @@ -1,19 +1,21 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org) | ||
| 7 | */ | ||
| 1 | #ifndef _ASM_FUTEX_H | 8 | #ifndef _ASM_FUTEX_H |
| 2 | #define _ASM_FUTEX_H | 9 | #define _ASM_FUTEX_H |
| 3 | 10 | ||
| 4 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
| 5 | 12 | ||
| 6 | #include <linux/futex.h> | 13 | #include <linux/futex.h> |
| 14 | #include <asm/barrier.h> | ||
| 7 | #include <asm/errno.h> | 15 | #include <asm/errno.h> |
| 8 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
| 9 | #include <asm/war.h> | 17 | #include <asm/war.h> |
| 10 | 18 | ||
| 11 | #ifdef CONFIG_SMP | ||
| 12 | #define __FUTEX_SMP_SYNC " sync \n" | ||
| 13 | #else | ||
| 14 | #define __FUTEX_SMP_SYNC | ||
| 15 | #endif | ||
| 16 | |||
| 17 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 19 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ |
| 18 | { \ | 20 | { \ |
| 19 | if (cpu_has_llsc && R10000_LLSC_WAR) { \ | 21 | if (cpu_has_llsc && R10000_LLSC_WAR) { \ |
| @@ -27,7 +29,7 @@ | |||
| 27 | " .set mips3 \n" \ | 29 | " .set mips3 \n" \ |
| 28 | "2: sc $1, %2 \n" \ | 30 | "2: sc $1, %2 \n" \ |
| 29 | " beqzl $1, 1b \n" \ | 31 | " beqzl $1, 1b \n" \ |
| 30 | __FUTEX_SMP_SYNC \ | 32 | __WEAK_ORDERING_MB \ |
| 31 | "3: \n" \ | 33 | "3: \n" \ |
| 32 | " .set pop \n" \ | 34 | " .set pop \n" \ |
| 33 | " .set mips0 \n" \ | 35 | " .set mips0 \n" \ |
| @@ -53,7 +55,7 @@ | |||
| 53 | " .set mips3 \n" \ | 55 | " .set mips3 \n" \ |
| 54 | "2: sc $1, %2 \n" \ | 56 | "2: sc $1, %2 \n" \ |
| 55 | " beqz $1, 1b \n" \ | 57 | " beqz $1, 1b \n" \ |
| 56 | __FUTEX_SMP_SYNC \ | 58 | __WEAK_ORDERING_MB \ |
| 57 | "3: \n" \ | 59 | "3: \n" \ |
| 58 | " .set pop \n" \ | 60 | " .set pop \n" \ |
| 59 | " .set mips0 \n" \ | 61 | " .set mips0 \n" \ |
| @@ -150,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
| 150 | " .set mips3 \n" | 152 | " .set mips3 \n" |
| 151 | "2: sc $1, %1 \n" | 153 | "2: sc $1, %1 \n" |
| 152 | " beqzl $1, 1b \n" | 154 | " beqzl $1, 1b \n" |
| 153 | __FUTEX_SMP_SYNC | 155 | __WEAK_ORDERING_MB |
| 154 | "3: \n" | 156 | "3: \n" |
| 155 | " .set pop \n" | 157 | " .set pop \n" |
| 156 | " .section .fixup,\"ax\" \n" | 158 | " .section .fixup,\"ax\" \n" |
| @@ -177,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
| 177 | " .set mips3 \n" | 179 | " .set mips3 \n" |
| 178 | "2: sc $1, %1 \n" | 180 | "2: sc $1, %1 \n" |
| 179 | " beqz $1, 1b \n" | 181 | " beqz $1, 1b \n" |
| 180 | __FUTEX_SMP_SYNC | 182 | __WEAK_ORDERING_MB |
| 181 | "3: \n" | 183 | "3: \n" |
| 182 | " .set pop \n" | 184 | " .set pop \n" |
| 183 | " .section .fixup,\"ax\" \n" | 185 | " .section .fixup,\"ax\" \n" |
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h index b63cd0655b3d..15d70ca56187 100644 --- a/include/asm-mips/sn/klconfig.h +++ b/include/asm-mips/sn/klconfig.h | |||
| @@ -176,7 +176,7 @@ typedef struct kl_config_hdr { | |||
| 176 | /* --- New Macros for the changed kl_config_hdr_t structure --- */ | 176 | /* --- New Macros for the changed kl_config_hdr_t structure --- */ |
| 177 | 177 | ||
| 178 | #define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\ | 178 | #define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\ |
| 179 | (unsigned long)_k + (_k->ch_malloc_hdr_off))) | 179 | ((unsigned long)_k + (_k->ch_malloc_hdr_off))) |
| 180 | 180 | ||
| 181 | #define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n)) | 181 | #define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n)) |
| 182 | 182 | ||
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index c8d5587467bb..fc3217fc1118 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
| @@ -3,12 +3,13 @@ | |||
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 1999, 2000 by Ralf Baechle | 6 | * Copyright (C) 1999, 2000, 06 by Ralf Baechle |
| 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ | 8 | */ |
| 9 | #ifndef _ASM_SPINLOCK_H | 9 | #ifndef _ASM_SPINLOCK_H |
| 10 | #define _ASM_SPINLOCK_H | 10 | #define _ASM_SPINLOCK_H |
| 11 | 11 | ||
| 12 | #include <asm/barrier.h> | ||
| 12 | #include <asm/war.h> | 13 | #include <asm/war.h> |
| 13 | 14 | ||
| 14 | /* | 15 | /* |
| @@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
| 40 | " sc %1, %0 \n" | 41 | " sc %1, %0 \n" |
| 41 | " beqzl %1, 1b \n" | 42 | " beqzl %1, 1b \n" |
| 42 | " nop \n" | 43 | " nop \n" |
| 43 | " sync \n" | ||
| 44 | " .set reorder \n" | 44 | " .set reorder \n" |
| 45 | : "=m" (lock->lock), "=&r" (tmp) | 45 | : "=m" (lock->lock), "=&r" (tmp) |
| 46 | : "m" (lock->lock) | 46 | : "m" (lock->lock) |
| @@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
| 53 | " li %1, 1 \n" | 53 | " li %1, 1 \n" |
| 54 | " sc %1, %0 \n" | 54 | " sc %1, %0 \n" |
| 55 | " beqz %1, 1b \n" | 55 | " beqz %1, 1b \n" |
| 56 | " sync \n" | 56 | " nop \n" |
| 57 | " .set reorder \n" | 57 | " .set reorder \n" |
| 58 | : "=m" (lock->lock), "=&r" (tmp) | 58 | : "=m" (lock->lock), "=&r" (tmp) |
| 59 | : "m" (lock->lock) | 59 | : "m" (lock->lock) |
| 60 | : "memory"); | 60 | : "memory"); |
| 61 | } | 61 | } |
| 62 | |||
| 63 | smp_mb(); | ||
| 62 | } | 64 | } |
| 63 | 65 | ||
| 64 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 65 | { | 67 | { |
| 68 | smp_mb(); | ||
| 69 | |||
| 66 | __asm__ __volatile__( | 70 | __asm__ __volatile__( |
| 67 | " .set noreorder # __raw_spin_unlock \n" | 71 | " .set noreorder # __raw_spin_unlock \n" |
| 68 | " sync \n" | ||
| 69 | " sw $0, %0 \n" | 72 | " sw $0, %0 \n" |
| 70 | " .set\treorder \n" | 73 | " .set\treorder \n" |
| 71 | : "=m" (lock->lock) | 74 | : "=m" (lock->lock) |
| @@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
| 86 | " beqzl %2, 1b \n" | 89 | " beqzl %2, 1b \n" |
| 87 | " nop \n" | 90 | " nop \n" |
| 88 | " andi %2, %0, 1 \n" | 91 | " andi %2, %0, 1 \n" |
| 89 | " sync \n" | ||
| 90 | " .set reorder" | 92 | " .set reorder" |
| 91 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | 93 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) |
| 92 | : "m" (lock->lock) | 94 | : "m" (lock->lock) |
| @@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
| 99 | " sc %2, %1 \n" | 101 | " sc %2, %1 \n" |
| 100 | " beqz %2, 1b \n" | 102 | " beqz %2, 1b \n" |
| 101 | " andi %2, %0, 1 \n" | 103 | " andi %2, %0, 1 \n" |
| 102 | " sync \n" | ||
| 103 | " .set reorder" | 104 | " .set reorder" |
| 104 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | 105 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) |
| 105 | : "m" (lock->lock) | 106 | : "m" (lock->lock) |
| 106 | : "memory"); | 107 | : "memory"); |
| 107 | } | 108 | } |
| 108 | 109 | ||
| 110 | smp_mb(); | ||
| 111 | |||
| 109 | return res == 0; | 112 | return res == 0; |
| 110 | } | 113 | } |
| 111 | 114 | ||
| @@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
| 143 | " sc %1, %0 \n" | 146 | " sc %1, %0 \n" |
| 144 | " beqzl %1, 1b \n" | 147 | " beqzl %1, 1b \n" |
| 145 | " nop \n" | 148 | " nop \n" |
| 146 | " sync \n" | ||
| 147 | " .set reorder \n" | 149 | " .set reorder \n" |
| 148 | : "=m" (rw->lock), "=&r" (tmp) | 150 | : "=m" (rw->lock), "=&r" (tmp) |
| 149 | : "m" (rw->lock) | 151 | : "m" (rw->lock) |
| @@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
| 156 | " addu %1, 1 \n" | 158 | " addu %1, 1 \n" |
| 157 | " sc %1, %0 \n" | 159 | " sc %1, %0 \n" |
| 158 | " beqz %1, 1b \n" | 160 | " beqz %1, 1b \n" |
| 159 | " sync \n" | 161 | " nop \n" |
| 160 | " .set reorder \n" | 162 | " .set reorder \n" |
| 161 | : "=m" (rw->lock), "=&r" (tmp) | 163 | : "=m" (rw->lock), "=&r" (tmp) |
| 162 | : "m" (rw->lock) | 164 | : "m" (rw->lock) |
| 163 | : "memory"); | 165 | : "memory"); |
| 164 | } | 166 | } |
| 167 | |||
| 168 | smp_mb(); | ||
| 165 | } | 169 | } |
| 166 | 170 | ||
| 167 | /* Note the use of sub, not subu which will make the kernel die with an | 171 | /* Note the use of sub, not subu which will make the kernel die with an |
| @@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
| 171 | { | 175 | { |
| 172 | unsigned int tmp; | 176 | unsigned int tmp; |
| 173 | 177 | ||
| 178 | smp_mb(); | ||
| 179 | |||
| 174 | if (R10000_LLSC_WAR) { | 180 | if (R10000_LLSC_WAR) { |
| 175 | __asm__ __volatile__( | 181 | __asm__ __volatile__( |
| 176 | "1: ll %1, %2 # __raw_read_unlock \n" | 182 | "1: ll %1, %2 # __raw_read_unlock \n" |
| 177 | " sub %1, 1 \n" | 183 | " sub %1, 1 \n" |
| 178 | " sc %1, %0 \n" | 184 | " sc %1, %0 \n" |
| 179 | " beqzl %1, 1b \n" | 185 | " beqzl %1, 1b \n" |
| 180 | " sync \n" | ||
| 181 | : "=m" (rw->lock), "=&r" (tmp) | 186 | : "=m" (rw->lock), "=&r" (tmp) |
| 182 | : "m" (rw->lock) | 187 | : "m" (rw->lock) |
| 183 | : "memory"); | 188 | : "memory"); |
| @@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
| 188 | " sub %1, 1 \n" | 193 | " sub %1, 1 \n" |
| 189 | " sc %1, %0 \n" | 194 | " sc %1, %0 \n" |
| 190 | " beqz %1, 1b \n" | 195 | " beqz %1, 1b \n" |
| 191 | " sync \n" | 196 | " nop \n" |
| 192 | " .set reorder \n" | 197 | " .set reorder \n" |
| 193 | : "=m" (rw->lock), "=&r" (tmp) | 198 | : "=m" (rw->lock), "=&r" (tmp) |
| 194 | : "m" (rw->lock) | 199 | : "m" (rw->lock) |
| @@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
| 208 | " lui %1, 0x8000 \n" | 213 | " lui %1, 0x8000 \n" |
| 209 | " sc %1, %0 \n" | 214 | " sc %1, %0 \n" |
| 210 | " beqzl %1, 1b \n" | 215 | " beqzl %1, 1b \n" |
| 211 | " sync \n" | 216 | " nop \n" |
| 212 | " .set reorder \n" | 217 | " .set reorder \n" |
| 213 | : "=m" (rw->lock), "=&r" (tmp) | 218 | : "=m" (rw->lock), "=&r" (tmp) |
| 214 | : "m" (rw->lock) | 219 | : "m" (rw->lock) |
| @@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
| 221 | " lui %1, 0x8000 \n" | 226 | " lui %1, 0x8000 \n" |
| 222 | " sc %1, %0 \n" | 227 | " sc %1, %0 \n" |
| 223 | " beqz %1, 1b \n" | 228 | " beqz %1, 1b \n" |
| 224 | " sync \n" | 229 | " nop \n" |
| 225 | " .set reorder \n" | 230 | " .set reorder \n" |
| 226 | : "=m" (rw->lock), "=&r" (tmp) | 231 | : "=m" (rw->lock), "=&r" (tmp) |
| 227 | : "m" (rw->lock) | 232 | : "m" (rw->lock) |
| 228 | : "memory"); | 233 | : "memory"); |
| 229 | } | 234 | } |
| 235 | |||
| 236 | smp_mb(); | ||
| 230 | } | 237 | } |
| 231 | 238 | ||
| 232 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 239 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 233 | { | 240 | { |
| 241 | smp_mb(); | ||
| 242 | |||
| 234 | __asm__ __volatile__( | 243 | __asm__ __volatile__( |
| 235 | " sync # __raw_write_unlock \n" | 244 | " # __raw_write_unlock \n" |
| 236 | " sw $0, %0 \n" | 245 | " sw $0, %0 \n" |
| 237 | : "=m" (rw->lock) | 246 | : "=m" (rw->lock) |
| 238 | : "m" (rw->lock) | 247 | : "m" (rw->lock) |
| @@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
| 252 | " bnez %1, 2f \n" | 261 | " bnez %1, 2f \n" |
| 253 | " addu %1, 1 \n" | 262 | " addu %1, 1 \n" |
| 254 | " sc %1, %0 \n" | 263 | " sc %1, %0 \n" |
| 255 | " beqzl %1, 1b \n" | ||
| 256 | " .set reorder \n" | 264 | " .set reorder \n" |
| 257 | #ifdef CONFIG_SMP | 265 | " beqzl %1, 1b \n" |
| 258 | " sync \n" | 266 | " nop \n" |
| 259 | #endif | 267 | __WEAK_ORDERING_MB |
| 260 | " li %2, 1 \n" | 268 | " li %2, 1 \n" |
| 261 | "2: \n" | 269 | "2: \n" |
| 262 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | 270 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
| @@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
| 271 | " addu %1, 1 \n" | 279 | " addu %1, 1 \n" |
| 272 | " sc %1, %0 \n" | 280 | " sc %1, %0 \n" |
| 273 | " beqz %1, 1b \n" | 281 | " beqz %1, 1b \n" |
| 282 | " nop \n" | ||
| 274 | " .set reorder \n" | 283 | " .set reorder \n" |
| 275 | #ifdef CONFIG_SMP | 284 | __WEAK_ORDERING_MB |
| 276 | " sync \n" | ||
| 277 | #endif | ||
| 278 | " li %2, 1 \n" | 285 | " li %2, 1 \n" |
| 279 | "2: \n" | 286 | "2: \n" |
| 280 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | 287 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) |
| @@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
| 299 | " lui %1, 0x8000 \n" | 306 | " lui %1, 0x8000 \n" |
| 300 | " sc %1, %0 \n" | 307 | " sc %1, %0 \n" |
| 301 | " beqzl %1, 1b \n" | 308 | " beqzl %1, 1b \n" |
| 302 | " sync \n" | 309 | " nop \n" |
| 310 | __WEAK_ORDERING_MB | ||
| 303 | " li %2, 1 \n" | 311 | " li %2, 1 \n" |
| 304 | " .set reorder \n" | 312 | " .set reorder \n" |
| 305 | "2: \n" | 313 | "2: \n" |
| @@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
| 315 | " lui %1, 0x8000 \n" | 323 | " lui %1, 0x8000 \n" |
| 316 | " sc %1, %0 \n" | 324 | " sc %1, %0 \n" |
| 317 | " beqz %1, 1b \n" | 325 | " beqz %1, 1b \n" |
| 318 | " sync \n" | 326 | " nop \n" |
| 327 | __WEAK_ORDERING_MB | ||
| 319 | " li %2, 1 \n" | 328 | " li %2, 1 \n" |
| 320 | " .set reorder \n" | 329 | " .set reorder \n" |
| 321 | "2: \n" | 330 | "2: \n" |
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 3056feed5a36..9428057a50cf 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle |
| 7 | * Copyright (C) 1996 by Paul M. Antoine | 7 | * Copyright (C) 1996 by Paul M. Antoine |
| 8 | * Copyright (C) 1999 Silicon Graphics | 8 | * Copyright (C) 1999 Silicon Graphics |
| 9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com | 9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com |
| @@ -16,132 +16,12 @@ | |||
| 16 | #include <linux/irqflags.h> | 16 | #include <linux/irqflags.h> |
| 17 | 17 | ||
| 18 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
| 19 | #include <asm/barrier.h> | ||
| 19 | #include <asm/cpu-features.h> | 20 | #include <asm/cpu-features.h> |
| 20 | #include <asm/dsp.h> | 21 | #include <asm/dsp.h> |
| 21 | #include <asm/ptrace.h> | 22 | #include <asm/ptrace.h> |
| 22 | #include <asm/war.h> | 23 | #include <asm/war.h> |
| 23 | 24 | ||
| 24 | /* | ||
| 25 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
| 26 | * depend on. | ||
| 27 | * | ||
| 28 | * No data-dependent reads from memory-like regions are ever reordered | ||
| 29 | * over this barrier. All reads preceding this primitive are guaranteed | ||
| 30 | * to access memory (but not necessarily other CPUs' caches) before any | ||
| 31 | * reads following this primitive that depend on the data return by | ||
| 32 | * any of the preceding reads. This primitive is much lighter weight than | ||
| 33 | * rmb() on most CPUs, and is never heavier weight than is | ||
| 34 | * rmb(). | ||
| 35 | * | ||
| 36 | * These ordering constraints are respected by both the local CPU | ||
| 37 | * and the compiler. | ||
| 38 | * | ||
| 39 | * Ordering is not guaranteed by anything other than these primitives, | ||
| 40 | * not even by data dependencies. See the documentation for | ||
| 41 | * memory_barrier() for examples and URLs to more information. | ||
| 42 | * | ||
| 43 | * For example, the following code would force ordering (the initial | ||
| 44 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
| 45 | * | ||
| 46 | * <programlisting> | ||
| 47 | * CPU 0 CPU 1 | ||
| 48 | * | ||
| 49 | * b = 2; | ||
| 50 | * memory_barrier(); | ||
| 51 | * p = &b; q = p; | ||
| 52 | * read_barrier_depends(); | ||
| 53 | * d = *q; | ||
| 54 | * </programlisting> | ||
| 55 | * | ||
| 56 | * because the read of "*q" depends on the read of "p" and these | ||
| 57 | * two reads are separated by a read_barrier_depends(). However, | ||
| 58 | * the following code, with the same initial values for "a" and "b": | ||
| 59 | * | ||
| 60 | * <programlisting> | ||
| 61 | * CPU 0 CPU 1 | ||
| 62 | * | ||
| 63 | * a = 2; | ||
| 64 | * memory_barrier(); | ||
| 65 | * b = 3; y = b; | ||
| 66 | * read_barrier_depends(); | ||
| 67 | * x = a; | ||
| 68 | * </programlisting> | ||
| 69 | * | ||
| 70 | * does not enforce ordering, since there is no data dependency between | ||
| 71 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
| 72 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
| 73 | * in cases like this where there are no data dependencies. | ||
| 74 | */ | ||
| 75 | |||
| 76 | #define read_barrier_depends() do { } while(0) | ||
| 77 | |||
| 78 | #ifdef CONFIG_CPU_HAS_SYNC | ||
| 79 | #define __sync() \ | ||
| 80 | __asm__ __volatile__( \ | ||
| 81 | ".set push\n\t" \ | ||
| 82 | ".set noreorder\n\t" \ | ||
| 83 | ".set mips2\n\t" \ | ||
| 84 | "sync\n\t" \ | ||
| 85 | ".set pop" \ | ||
| 86 | : /* no output */ \ | ||
| 87 | : /* no input */ \ | ||
| 88 | : "memory") | ||
| 89 | #else | ||
| 90 | #define __sync() do { } while(0) | ||
| 91 | #endif | ||
| 92 | |||
| 93 | #define __fast_iob() \ | ||
| 94 | __asm__ __volatile__( \ | ||
| 95 | ".set push\n\t" \ | ||
| 96 | ".set noreorder\n\t" \ | ||
| 97 | "lw $0,%0\n\t" \ | ||
| 98 | "nop\n\t" \ | ||
| 99 | ".set pop" \ | ||
| 100 | : /* no output */ \ | ||
| 101 | : "m" (*(int *)CKSEG1) \ | ||
| 102 | : "memory") | ||
| 103 | |||
| 104 | #define fast_wmb() __sync() | ||
| 105 | #define fast_rmb() __sync() | ||
| 106 | #define fast_mb() __sync() | ||
| 107 | #define fast_iob() \ | ||
| 108 | do { \ | ||
| 109 | __sync(); \ | ||
| 110 | __fast_iob(); \ | ||
| 111 | } while (0) | ||
| 112 | |||
| 113 | #ifdef CONFIG_CPU_HAS_WB | ||
| 114 | |||
| 115 | #include <asm/wbflush.h> | ||
| 116 | |||
| 117 | #define wmb() fast_wmb() | ||
| 118 | #define rmb() fast_rmb() | ||
| 119 | #define mb() wbflush() | ||
| 120 | #define iob() wbflush() | ||
| 121 | |||
| 122 | #else /* !CONFIG_CPU_HAS_WB */ | ||
| 123 | |||
| 124 | #define wmb() fast_wmb() | ||
| 125 | #define rmb() fast_rmb() | ||
| 126 | #define mb() fast_mb() | ||
| 127 | #define iob() fast_iob() | ||
| 128 | |||
| 129 | #endif /* !CONFIG_CPU_HAS_WB */ | ||
| 130 | |||
| 131 | #ifdef CONFIG_SMP | ||
| 132 | #define smp_mb() mb() | ||
| 133 | #define smp_rmb() rmb() | ||
| 134 | #define smp_wmb() wmb() | ||
| 135 | #define smp_read_barrier_depends() read_barrier_depends() | ||
| 136 | #else | ||
| 137 | #define smp_mb() barrier() | ||
| 138 | #define smp_rmb() barrier() | ||
| 139 | #define smp_wmb() barrier() | ||
| 140 | #define smp_read_barrier_depends() do { } while(0) | ||
| 141 | #endif | ||
| 142 | |||
| 143 | #define set_mb(var, value) \ | ||
| 144 | do { var = value; mb(); } while (0) | ||
| 145 | 25 | ||
| 146 | /* | 26 | /* |
| 147 | * switch_to(n) should switch tasks to task nr n, first | 27 | * switch_to(n) should switch tasks to task nr n, first |
| @@ -217,9 +97,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
| 217 | " .set mips3 \n" | 97 | " .set mips3 \n" |
| 218 | " sc %2, %1 \n" | 98 | " sc %2, %1 \n" |
| 219 | " beqzl %2, 1b \n" | 99 | " beqzl %2, 1b \n" |
| 220 | #ifdef CONFIG_SMP | ||
| 221 | " sync \n" | ||
| 222 | #endif | ||
| 223 | " .set mips0 \n" | 100 | " .set mips0 \n" |
| 224 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 101 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 225 | : "R" (*m), "Jr" (val) | 102 | : "R" (*m), "Jr" (val) |
| @@ -235,9 +112,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
| 235 | " .set mips3 \n" | 112 | " .set mips3 \n" |
| 236 | " sc %2, %1 \n" | 113 | " sc %2, %1 \n" |
| 237 | " beqz %2, 1b \n" | 114 | " beqz %2, 1b \n" |
| 238 | #ifdef CONFIG_SMP | ||
| 239 | " sync \n" | ||
| 240 | #endif | ||
| 241 | " .set mips0 \n" | 115 | " .set mips0 \n" |
| 242 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 116 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 243 | : "R" (*m), "Jr" (val) | 117 | : "R" (*m), "Jr" (val) |
| @@ -251,6 +125,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
| 251 | local_irq_restore(flags); /* implies memory barrier */ | 125 | local_irq_restore(flags); /* implies memory barrier */ |
| 252 | } | 126 | } |
| 253 | 127 | ||
| 128 | smp_mb(); | ||
| 129 | |||
| 254 | return retval; | 130 | return retval; |
| 255 | } | 131 | } |
| 256 | 132 | ||
| @@ -268,9 +144,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
| 268 | " move %2, %z4 \n" | 144 | " move %2, %z4 \n" |
| 269 | " scd %2, %1 \n" | 145 | " scd %2, %1 \n" |
| 270 | " beqzl %2, 1b \n" | 146 | " beqzl %2, 1b \n" |
| 271 | #ifdef CONFIG_SMP | ||
| 272 | " sync \n" | ||
| 273 | #endif | ||
| 274 | " .set mips0 \n" | 147 | " .set mips0 \n" |
| 275 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 148 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 276 | : "R" (*m), "Jr" (val) | 149 | : "R" (*m), "Jr" (val) |
| @@ -284,9 +157,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
| 284 | " move %2, %z4 \n" | 157 | " move %2, %z4 \n" |
| 285 | " scd %2, %1 \n" | 158 | " scd %2, %1 \n" |
| 286 | " beqz %2, 1b \n" | 159 | " beqz %2, 1b \n" |
| 287 | #ifdef CONFIG_SMP | ||
| 288 | " sync \n" | ||
| 289 | #endif | ||
| 290 | " .set mips0 \n" | 160 | " .set mips0 \n" |
| 291 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 161 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 292 | : "R" (*m), "Jr" (val) | 162 | : "R" (*m), "Jr" (val) |
| @@ -300,6 +170,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
| 300 | local_irq_restore(flags); /* implies memory barrier */ | 170 | local_irq_restore(flags); /* implies memory barrier */ |
| 301 | } | 171 | } |
| 302 | 172 | ||
| 173 | smp_mb(); | ||
| 174 | |||
| 303 | return retval; | 175 | return retval; |
| 304 | } | 176 | } |
| 305 | #else | 177 | #else |
| @@ -345,9 +217,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
| 345 | " .set mips3 \n" | 217 | " .set mips3 \n" |
| 346 | " sc $1, %1 \n" | 218 | " sc $1, %1 \n" |
| 347 | " beqzl $1, 1b \n" | 219 | " beqzl $1, 1b \n" |
| 348 | #ifdef CONFIG_SMP | ||
| 349 | " sync \n" | ||
| 350 | #endif | ||
| 351 | "2: \n" | 220 | "2: \n" |
| 352 | " .set pop \n" | 221 | " .set pop \n" |
| 353 | : "=&r" (retval), "=R" (*m) | 222 | : "=&r" (retval), "=R" (*m) |
| @@ -365,9 +234,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
| 365 | " .set mips3 \n" | 234 | " .set mips3 \n" |
| 366 | " sc $1, %1 \n" | 235 | " sc $1, %1 \n" |
| 367 | " beqz $1, 1b \n" | 236 | " beqz $1, 1b \n" |
| 368 | #ifdef CONFIG_SMP | ||
| 369 | " sync \n" | ||
| 370 | #endif | ||
| 371 | "2: \n" | 237 | "2: \n" |
| 372 | " .set pop \n" | 238 | " .set pop \n" |
| 373 | : "=&r" (retval), "=R" (*m) | 239 | : "=&r" (retval), "=R" (*m) |
| @@ -383,6 +249,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
| 383 | local_irq_restore(flags); /* implies memory barrier */ | 249 | local_irq_restore(flags); /* implies memory barrier */ |
| 384 | } | 250 | } |
| 385 | 251 | ||
| 252 | smp_mb(); | ||
| 253 | |||
| 386 | return retval; | 254 | return retval; |
| 387 | } | 255 | } |
| 388 | 256 | ||
| @@ -402,9 +270,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
| 402 | " move $1, %z4 \n" | 270 | " move $1, %z4 \n" |
| 403 | " scd $1, %1 \n" | 271 | " scd $1, %1 \n" |
| 404 | " beqzl $1, 1b \n" | 272 | " beqzl $1, 1b \n" |
| 405 | #ifdef CONFIG_SMP | ||
| 406 | " sync \n" | ||
| 407 | #endif | ||
| 408 | "2: \n" | 273 | "2: \n" |
| 409 | " .set pop \n" | 274 | " .set pop \n" |
| 410 | : "=&r" (retval), "=R" (*m) | 275 | : "=&r" (retval), "=R" (*m) |
| @@ -420,9 +285,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
| 420 | " move $1, %z4 \n" | 285 | " move $1, %z4 \n" |
| 421 | " scd $1, %1 \n" | 286 | " scd $1, %1 \n" |
| 422 | " beqz $1, 1b \n" | 287 | " beqz $1, 1b \n" |
| 423 | #ifdef CONFIG_SMP | ||
| 424 | " sync \n" | ||
| 425 | #endif | ||
| 426 | "2: \n" | 288 | "2: \n" |
| 427 | " .set pop \n" | 289 | " .set pop \n" |
| 428 | : "=&r" (retval), "=R" (*m) | 290 | : "=&r" (retval), "=R" (*m) |
| @@ -438,6 +300,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
| 438 | local_irq_restore(flags); /* implies memory barrier */ | 300 | local_irq_restore(flags); /* implies memory barrier */ |
| 439 | } | 301 | } |
| 440 | 302 | ||
| 303 | smp_mb(); | ||
| 304 | |||
| 441 | return retval; | 305 | return retval; |
| 442 | } | 306 | } |
| 443 | #else | 307 | #else |
