diff options
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
| -rw-r--r-- | arch/mips/include/asm/barrier.h | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b7f6ac5e513c 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h | |||
| @@ -222,6 +222,42 @@ | |||
| 222 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() | 222 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() |
| 223 | #define __smp_mb__after_atomic() smp_llsc_mb() | 223 | #define __smp_mb__after_atomic() smp_llsc_mb() |
| 224 | 224 | ||
| 225 | /* | ||
| 226 | * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, | ||
| 227 | * store or pref) in between an ll & sc can cause the sc instruction to | ||
| 228 | * erroneously succeed, breaking atomicity. Whilst it's unusual to write code | ||
| 229 | * containing such sequences, this bug bites harder than we might otherwise | ||
| 230 | * expect due to reordering & speculation: | ||
| 231 | * | ||
| 232 | * 1) A memory access appearing prior to the ll in program order may actually | ||
| 233 | * be executed after the ll - this is the reordering case. | ||
| 234 | * | ||
| 235 | * In order to avoid this we need to place a memory barrier (ie. a sync | ||
| 236 | * instruction) prior to every ll instruction, in between it & any earlier | ||
| 237 | * memory access instructions. Many of these cases are already covered by | ||
| 238 | * smp_mb__before_llsc() but for the remaining cases, typically ones in | ||
| 239 | * which multiple CPUs may operate on a memory location but ordering is not | ||
| 240 | * usually guaranteed, we use loongson_llsc_mb() below. | ||
| 241 | * | ||
| 242 | * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. | ||
| 243 | * | ||
| 244 | * 2) If a conditional branch exists between an ll & sc with a target outside | ||
| 245 | * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() | ||
| 246 | * or similar, then misprediction of the branch may allow speculative | ||
| 247 | * execution of memory accesses from outside of the ll-sc loop. | ||
| 248 | * | ||
| 249 | * In order to avoid this we need a memory barrier (ie. a sync instruction) | ||
| 250 | * at each affected branch target, for which we also use loongson_llsc_mb() | ||
| 251 | * defined below. | ||
| 252 | * | ||
| 253 | * This case affects all current Loongson 3 CPUs. | ||
| 254 | */ | ||
| 255 | #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ | ||
| 256 | #define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | ||
| 257 | #else | ||
| 258 | #define loongson_llsc_mb() do { } while (0) | ||
| 259 | #endif | ||
| 260 | |||
| 225 | #include <asm-generic/barrier.h> | 261 | #include <asm-generic/barrier.h> |
| 226 | 262 | ||
| 227 | #endif /* __ASM_BARRIER_H */ | 263 | #endif /* __ASM_BARRIER_H */ |
