aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/atomic.h6
-rw-r--r--arch/mips/include/asm/barrier.h36
-rw-r--r--arch/mips/include/asm/bitops.h5
-rw-r--r--arch/mips/include/asm/futex.h3
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h2
-rw-r--r--arch/mips/include/asm/pgtable.h2
7 files changed, 52 insertions, 4 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 43fcd35e2957..94096299fc56 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
58 if (kernel_uses_llsc) { \ 58 if (kernel_uses_llsc) { \
59 int temp; \ 59 int temp; \
60 \ 60 \
61 loongson_llsc_mb(); \
61 __asm__ __volatile__( \ 62 __asm__ __volatile__( \
62 " .set push \n" \ 63 " .set push \n" \
63 " .set "MIPS_ISA_LEVEL" \n" \ 64 " .set "MIPS_ISA_LEVEL" \n" \
@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
85 if (kernel_uses_llsc) { \ 86 if (kernel_uses_llsc) { \
86 int temp; \ 87 int temp; \
87 \ 88 \
89 loongson_llsc_mb(); \
88 __asm__ __volatile__( \ 90 __asm__ __volatile__( \
89 " .set push \n" \ 91 " .set push \n" \
90 " .set "MIPS_ISA_LEVEL" \n" \ 92 " .set "MIPS_ISA_LEVEL" \n" \
@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
118 if (kernel_uses_llsc) { \ 120 if (kernel_uses_llsc) { \
119 int temp; \ 121 int temp; \
120 \ 122 \
123 loongson_llsc_mb(); \
121 __asm__ __volatile__( \ 124 __asm__ __volatile__( \
122 " .set push \n" \ 125 " .set push \n" \
123 " .set "MIPS_ISA_LEVEL" \n" \ 126 " .set "MIPS_ISA_LEVEL" \n" \
@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
256 if (kernel_uses_llsc) { \ 259 if (kernel_uses_llsc) { \
257 long temp; \ 260 long temp; \
258 \ 261 \
262 loongson_llsc_mb(); \
259 __asm__ __volatile__( \ 263 __asm__ __volatile__( \
260 " .set push \n" \ 264 " .set push \n" \
261 " .set "MIPS_ISA_LEVEL" \n" \ 265 " .set "MIPS_ISA_LEVEL" \n" \
@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
283 if (kernel_uses_llsc) { \ 287 if (kernel_uses_llsc) { \
284 long temp; \ 288 long temp; \
285 \ 289 \
290 loongson_llsc_mb(); \
286 __asm__ __volatile__( \ 291 __asm__ __volatile__( \
287 " .set push \n" \ 292 " .set push \n" \
288 " .set "MIPS_ISA_LEVEL" \n" \ 293 " .set "MIPS_ISA_LEVEL" \n" \
@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
316 if (kernel_uses_llsc) { \ 321 if (kernel_uses_llsc) { \
317 long temp; \ 322 long temp; \
318 \ 323 \
324 loongson_llsc_mb(); \
319 __asm__ __volatile__( \ 325 __asm__ __volatile__( \
320 " .set push \n" \ 326 " .set push \n" \
321 " .set "MIPS_ISA_LEVEL" \n" \ 327 " .set "MIPS_ISA_LEVEL" \n" \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index a5eb1bb199a7..b7f6ac5e513c 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -222,6 +222,42 @@
222#define __smp_mb__before_atomic() __smp_mb__before_llsc() 222#define __smp_mb__before_atomic() __smp_mb__before_llsc()
223#define __smp_mb__after_atomic() smp_llsc_mb() 223#define __smp_mb__after_atomic() smp_llsc_mb()
224 224
225/*
226 * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
227 * store or pref) in between an ll & sc can cause the sc instruction to
228 * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
229 * containing such sequences, this bug bites harder than we might otherwise
230 * expect due to reordering & speculation:
231 *
232 * 1) A memory access appearing prior to the ll in program order may actually
233 * be executed after the ll - this is the reordering case.
234 *
235 * In order to avoid this we need to place a memory barrier (ie. a sync
236 * instruction) prior to every ll instruction, in between it & any earlier
237 * memory access instructions. Many of these cases are already covered by
238 * smp_mb__before_llsc() but for the remaining cases, typically ones in
239 * which multiple CPUs may operate on a memory location but ordering is not
240 * usually guaranteed, we use loongson_llsc_mb() below.
241 *
242 * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
243 *
244 * 2) If a conditional branch exists between an ll & sc with a target outside
245 * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
246 * or similar, then misprediction of the branch may allow speculative
247 * execution of memory accesses from outside of the ll-sc loop.
248 *
249 * In order to avoid this we need a memory barrier (ie. a sync instruction)
250 * at each affected branch target, for which we also use loongson_llsc_mb()
251 * defined below.
252 *
253 * This case affects all current Loongson 3 CPUs.
254 */
255#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
256#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
257#else
258#define loongson_llsc_mb() do { } while (0)
259#endif
260
225#include <asm-generic/barrier.h> 261#include <asm-generic/barrier.h>
226 262
227#endif /* __ASM_BARRIER_H */ 263#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index c4675957b21b..830c93a010c3 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); 69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
70#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 70#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
72 loongson_llsc_mb();
72 do { 73 do {
73 __asm__ __volatile__( 74 __asm__ __volatile__(
74 " " __LL "%0, %1 # set_bit \n" 75 " " __LL "%0, %1 # set_bit \n"
@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 } while (unlikely(!temp)); 80 } while (unlikely(!temp));
80#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 81#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
81 } else if (kernel_uses_llsc) { 82 } else if (kernel_uses_llsc) {
83 loongson_llsc_mb();
82 do { 84 do {
83 __asm__ __volatile__( 85 __asm__ __volatile__(
84 " .set push \n" 86 " .set push \n"
@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
123 : "ir" (~(1UL << bit))); 125 : "ir" (~(1UL << bit)));
124#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 126#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
125 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 127 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
128 loongson_llsc_mb();
126 do { 129 do {
127 __asm__ __volatile__( 130 __asm__ __volatile__(
128 " " __LL "%0, %1 # clear_bit \n" 131 " " __LL "%0, %1 # clear_bit \n"
@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
133 } while (unlikely(!temp)); 136 } while (unlikely(!temp));
134#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 137#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
135 } else if (kernel_uses_llsc) { 138 } else if (kernel_uses_llsc) {
139 loongson_llsc_mb();
136 do { 140 do {
137 __asm__ __volatile__( 141 __asm__ __volatile__(
138 " .set push \n" 142 " .set push \n"
@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
194 unsigned long temp; 198 unsigned long temp;
195 199
200 loongson_llsc_mb();
196 do { 201 do {
197 __asm__ __volatile__( 202 __asm__ __volatile__(
198 " .set push \n" 203 " .set push \n"
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index c14d798f3888..b83b0397462d 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -50,6 +50,7 @@
50 "i" (-EFAULT) \ 50 "i" (-EFAULT) \
51 : "memory"); \ 51 : "memory"); \
52 } else if (cpu_has_llsc) { \ 52 } else if (cpu_has_llsc) { \
53 loongson_llsc_mb(); \
53 __asm__ __volatile__( \ 54 __asm__ __volatile__( \
54 " .set push \n" \ 55 " .set push \n" \
55 " .set noat \n" \ 56 " .set noat \n" \
@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
163 "i" (-EFAULT) 164 "i" (-EFAULT)
164 : "memory"); 165 : "memory");
165 } else if (cpu_has_llsc) { 166 } else if (cpu_has_llsc) {
167 loongson_llsc_mb();
166 __asm__ __volatile__( 168 __asm__ __volatile__(
167 "# futex_atomic_cmpxchg_inatomic \n" 169 "# futex_atomic_cmpxchg_inatomic \n"
168 " .set push \n" 170 " .set push \n"
@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
192 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 194 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
193 "i" (-EFAULT) 195 "i" (-EFAULT)
194 : "memory"); 196 : "memory");
197 loongson_llsc_mb();
195 } else 198 } else
196 return -ENOSYS; 199 return -ENOSYS;
197 200
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
index c6b63a409641..6dd8ad2409dc 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -18,8 +18,6 @@
18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) 18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) 19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
20 20
21#define MIPS_CPU_TIMER_IRQ 7
22
23#define MAX_IM 5 21#define MAX_IM 5
24 22
25#endif /* _FALCON_IRQ__ */ 23#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 141076325307..0b424214a5e9 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -19,8 +19,6 @@
19 19
20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) 20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
21 21
22#define MIPS_CPU_TIMER_IRQ 7
23
24#define MAX_IM 5 22#define MAX_IM 5
25 23
26#endif 24#endif
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 57933fc8fd98..910851c62db3 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
229 : [global] "r" (page_global)); 229 : [global] "r" (page_global));
230 } else if (kernel_uses_llsc) { 230 } else if (kernel_uses_llsc) {
231 loongson_llsc_mb();
231 __asm__ __volatile__ ( 232 __asm__ __volatile__ (
232 " .set push \n" 233 " .set push \n"
233 " .set "MIPS_ISA_ARCH_LEVEL" \n" 234 " .set "MIPS_ISA_ARCH_LEVEL" \n"
@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
242 " .set pop \n" 243 " .set pop \n"
243 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 244 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
244 : [global] "r" (page_global)); 245 : [global] "r" (page_global));
246 loongson_llsc_mb();
245 } 247 }
246#else /* !CONFIG_SMP */ 248#else /* !CONFIG_SMP */
247 if (pte_none(*buddy)) 249 if (pte_none(*buddy))