diff options
Diffstat (limited to 'include/asm-mips/system.h')
-rw-r--r-- | include/asm-mips/system.h | 156 |
1 files changed, 10 insertions, 146 deletions
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 3056feed5a36..9428057a50cf 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle |
7 | * Copyright (C) 1996 by Paul M. Antoine | 7 | * Copyright (C) 1996 by Paul M. Antoine |
8 | * Copyright (C) 1999 Silicon Graphics | 8 | * Copyright (C) 1999 Silicon Graphics |
9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com | 9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com |
@@ -16,132 +16,12 @@ | |||
16 | #include <linux/irqflags.h> | 16 | #include <linux/irqflags.h> |
17 | 17 | ||
18 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
19 | #include <asm/barrier.h> | ||
19 | #include <asm/cpu-features.h> | 20 | #include <asm/cpu-features.h> |
20 | #include <asm/dsp.h> | 21 | #include <asm/dsp.h> |
21 | #include <asm/ptrace.h> | 22 | #include <asm/ptrace.h> |
22 | #include <asm/war.h> | 23 | #include <asm/war.h> |
23 | 24 | ||
24 | /* | ||
25 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
26 | * depend on. | ||
27 | * | ||
28 | * No data-dependent reads from memory-like regions are ever reordered | ||
29 | * over this barrier. All reads preceding this primitive are guaranteed | ||
30 | * to access memory (but not necessarily other CPUs' caches) before any | ||
31 | * reads following this primitive that depend on the data return by | ||
32 | * any of the preceding reads. This primitive is much lighter weight than | ||
33 | * rmb() on most CPUs, and is never heavier weight than is | ||
34 | * rmb(). | ||
35 | * | ||
36 | * These ordering constraints are respected by both the local CPU | ||
37 | * and the compiler. | ||
38 | * | ||
39 | * Ordering is not guaranteed by anything other than these primitives, | ||
40 | * not even by data dependencies. See the documentation for | ||
41 | * memory_barrier() for examples and URLs to more information. | ||
42 | * | ||
43 | * For example, the following code would force ordering (the initial | ||
44 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
45 | * | ||
46 | * <programlisting> | ||
47 | * CPU 0 CPU 1 | ||
48 | * | ||
49 | * b = 2; | ||
50 | * memory_barrier(); | ||
51 | * p = &b; q = p; | ||
52 | * read_barrier_depends(); | ||
53 | * d = *q; | ||
54 | * </programlisting> | ||
55 | * | ||
56 | * because the read of "*q" depends on the read of "p" and these | ||
57 | * two reads are separated by a read_barrier_depends(). However, | ||
58 | * the following code, with the same initial values for "a" and "b": | ||
59 | * | ||
60 | * <programlisting> | ||
61 | * CPU 0 CPU 1 | ||
62 | * | ||
63 | * a = 2; | ||
64 | * memory_barrier(); | ||
65 | * b = 3; y = b; | ||
66 | * read_barrier_depends(); | ||
67 | * x = a; | ||
68 | * </programlisting> | ||
69 | * | ||
70 | * does not enforce ordering, since there is no data dependency between | ||
71 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
72 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
73 | * in cases like this where there are no data dependencies. | ||
74 | */ | ||
75 | |||
76 | #define read_barrier_depends() do { } while(0) | ||
77 | |||
78 | #ifdef CONFIG_CPU_HAS_SYNC | ||
79 | #define __sync() \ | ||
80 | __asm__ __volatile__( \ | ||
81 | ".set push\n\t" \ | ||
82 | ".set noreorder\n\t" \ | ||
83 | ".set mips2\n\t" \ | ||
84 | "sync\n\t" \ | ||
85 | ".set pop" \ | ||
86 | : /* no output */ \ | ||
87 | : /* no input */ \ | ||
88 | : "memory") | ||
89 | #else | ||
90 | #define __sync() do { } while(0) | ||
91 | #endif | ||
92 | |||
93 | #define __fast_iob() \ | ||
94 | __asm__ __volatile__( \ | ||
95 | ".set push\n\t" \ | ||
96 | ".set noreorder\n\t" \ | ||
97 | "lw $0,%0\n\t" \ | ||
98 | "nop\n\t" \ | ||
99 | ".set pop" \ | ||
100 | : /* no output */ \ | ||
101 | : "m" (*(int *)CKSEG1) \ | ||
102 | : "memory") | ||
103 | |||
104 | #define fast_wmb() __sync() | ||
105 | #define fast_rmb() __sync() | ||
106 | #define fast_mb() __sync() | ||
107 | #define fast_iob() \ | ||
108 | do { \ | ||
109 | __sync(); \ | ||
110 | __fast_iob(); \ | ||
111 | } while (0) | ||
112 | |||
113 | #ifdef CONFIG_CPU_HAS_WB | ||
114 | |||
115 | #include <asm/wbflush.h> | ||
116 | |||
117 | #define wmb() fast_wmb() | ||
118 | #define rmb() fast_rmb() | ||
119 | #define mb() wbflush() | ||
120 | #define iob() wbflush() | ||
121 | |||
122 | #else /* !CONFIG_CPU_HAS_WB */ | ||
123 | |||
124 | #define wmb() fast_wmb() | ||
125 | #define rmb() fast_rmb() | ||
126 | #define mb() fast_mb() | ||
127 | #define iob() fast_iob() | ||
128 | |||
129 | #endif /* !CONFIG_CPU_HAS_WB */ | ||
130 | |||
131 | #ifdef CONFIG_SMP | ||
132 | #define smp_mb() mb() | ||
133 | #define smp_rmb() rmb() | ||
134 | #define smp_wmb() wmb() | ||
135 | #define smp_read_barrier_depends() read_barrier_depends() | ||
136 | #else | ||
137 | #define smp_mb() barrier() | ||
138 | #define smp_rmb() barrier() | ||
139 | #define smp_wmb() barrier() | ||
140 | #define smp_read_barrier_depends() do { } while(0) | ||
141 | #endif | ||
142 | |||
143 | #define set_mb(var, value) \ | ||
144 | do { var = value; mb(); } while (0) | ||
145 | 25 | ||
146 | /* | 26 | /* |
147 | * switch_to(n) should switch tasks to task nr n, first | 27 | * switch_to(n) should switch tasks to task nr n, first |
@@ -217,9 +97,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
217 | " .set mips3 \n" | 97 | " .set mips3 \n" |
218 | " sc %2, %1 \n" | 98 | " sc %2, %1 \n" |
219 | " beqzl %2, 1b \n" | 99 | " beqzl %2, 1b \n" |
220 | #ifdef CONFIG_SMP | ||
221 | " sync \n" | ||
222 | #endif | ||
223 | " .set mips0 \n" | 100 | " .set mips0 \n" |
224 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 101 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
225 | : "R" (*m), "Jr" (val) | 102 | : "R" (*m), "Jr" (val) |
@@ -235,9 +112,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
235 | " .set mips3 \n" | 112 | " .set mips3 \n" |
236 | " sc %2, %1 \n" | 113 | " sc %2, %1 \n" |
237 | " beqz %2, 1b \n" | 114 | " beqz %2, 1b \n" |
238 | #ifdef CONFIG_SMP | ||
239 | " sync \n" | ||
240 | #endif | ||
241 | " .set mips0 \n" | 115 | " .set mips0 \n" |
242 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 116 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
243 | : "R" (*m), "Jr" (val) | 117 | : "R" (*m), "Jr" (val) |
@@ -251,6 +125,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
251 | local_irq_restore(flags); /* implies memory barrier */ | 125 | local_irq_restore(flags); /* implies memory barrier */ |
252 | } | 126 | } |
253 | 127 | ||
128 | smp_mb(); | ||
129 | |||
254 | return retval; | 130 | return retval; |
255 | } | 131 | } |
256 | 132 | ||
@@ -268,9 +144,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
268 | " move %2, %z4 \n" | 144 | " move %2, %z4 \n" |
269 | " scd %2, %1 \n" | 145 | " scd %2, %1 \n" |
270 | " beqzl %2, 1b \n" | 146 | " beqzl %2, 1b \n" |
271 | #ifdef CONFIG_SMP | ||
272 | " sync \n" | ||
273 | #endif | ||
274 | " .set mips0 \n" | 147 | " .set mips0 \n" |
275 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 148 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
276 | : "R" (*m), "Jr" (val) | 149 | : "R" (*m), "Jr" (val) |
@@ -284,9 +157,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
284 | " move %2, %z4 \n" | 157 | " move %2, %z4 \n" |
285 | " scd %2, %1 \n" | 158 | " scd %2, %1 \n" |
286 | " beqz %2, 1b \n" | 159 | " beqz %2, 1b \n" |
287 | #ifdef CONFIG_SMP | ||
288 | " sync \n" | ||
289 | #endif | ||
290 | " .set mips0 \n" | 160 | " .set mips0 \n" |
291 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 161 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
292 | : "R" (*m), "Jr" (val) | 162 | : "R" (*m), "Jr" (val) |
@@ -300,6 +170,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
300 | local_irq_restore(flags); /* implies memory barrier */ | 170 | local_irq_restore(flags); /* implies memory barrier */ |
301 | } | 171 | } |
302 | 172 | ||
173 | smp_mb(); | ||
174 | |||
303 | return retval; | 175 | return retval; |
304 | } | 176 | } |
305 | #else | 177 | #else |
@@ -345,9 +217,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
345 | " .set mips3 \n" | 217 | " .set mips3 \n" |
346 | " sc $1, %1 \n" | 218 | " sc $1, %1 \n" |
347 | " beqzl $1, 1b \n" | 219 | " beqzl $1, 1b \n" |
348 | #ifdef CONFIG_SMP | ||
349 | " sync \n" | ||
350 | #endif | ||
351 | "2: \n" | 220 | "2: \n" |
352 | " .set pop \n" | 221 | " .set pop \n" |
353 | : "=&r" (retval), "=R" (*m) | 222 | : "=&r" (retval), "=R" (*m) |
@@ -365,9 +234,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
365 | " .set mips3 \n" | 234 | " .set mips3 \n" |
366 | " sc $1, %1 \n" | 235 | " sc $1, %1 \n" |
367 | " beqz $1, 1b \n" | 236 | " beqz $1, 1b \n" |
368 | #ifdef CONFIG_SMP | ||
369 | " sync \n" | ||
370 | #endif | ||
371 | "2: \n" | 237 | "2: \n" |
372 | " .set pop \n" | 238 | " .set pop \n" |
373 | : "=&r" (retval), "=R" (*m) | 239 | : "=&r" (retval), "=R" (*m) |
@@ -383,6 +249,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | |||
383 | local_irq_restore(flags); /* implies memory barrier */ | 249 | local_irq_restore(flags); /* implies memory barrier */ |
384 | } | 250 | } |
385 | 251 | ||
252 | smp_mb(); | ||
253 | |||
386 | return retval; | 254 | return retval; |
387 | } | 255 | } |
388 | 256 | ||
@@ -402,9 +270,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
402 | " move $1, %z4 \n" | 270 | " move $1, %z4 \n" |
403 | " scd $1, %1 \n" | 271 | " scd $1, %1 \n" |
404 | " beqzl $1, 1b \n" | 272 | " beqzl $1, 1b \n" |
405 | #ifdef CONFIG_SMP | ||
406 | " sync \n" | ||
407 | #endif | ||
408 | "2: \n" | 273 | "2: \n" |
409 | " .set pop \n" | 274 | " .set pop \n" |
410 | : "=&r" (retval), "=R" (*m) | 275 | : "=&r" (retval), "=R" (*m) |
@@ -420,9 +285,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
420 | " move $1, %z4 \n" | 285 | " move $1, %z4 \n" |
421 | " scd $1, %1 \n" | 286 | " scd $1, %1 \n" |
422 | " beqz $1, 1b \n" | 287 | " beqz $1, 1b \n" |
423 | #ifdef CONFIG_SMP | ||
424 | " sync \n" | ||
425 | #endif | ||
426 | "2: \n" | 288 | "2: \n" |
427 | " .set pop \n" | 289 | " .set pop \n" |
428 | : "=&r" (retval), "=R" (*m) | 290 | : "=&r" (retval), "=R" (*m) |
@@ -438,6 +300,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | |||
438 | local_irq_restore(flags); /* implies memory barrier */ | 300 | local_irq_restore(flags); /* implies memory barrier */ |
439 | } | 301 | } |
440 | 302 | ||
303 | smp_mb(); | ||
304 | |||
441 | return retval; | 305 | return retval; |
442 | } | 306 | } |
443 | #else | 307 | #else |