diff options
| -rw-r--r-- | arch/mips/include/asm/hazards.h | 371 | ||||
| -rw-r--r-- | arch/mips/include/asm/irqflags.h | 153 | ||||
| -rw-r--r-- | arch/mips/lib/mips-atomic.c | 149 |
3 files changed, 403 insertions, 270 deletions
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index 44d6a5bde4a1..e3ee92d4dbe7 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h | |||
| @@ -10,34 +10,13 @@ | |||
| 10 | #ifndef _ASM_HAZARDS_H | 10 | #ifndef _ASM_HAZARDS_H |
| 11 | #define _ASM_HAZARDS_H | 11 | #define _ASM_HAZARDS_H |
| 12 | 12 | ||
| 13 | #ifdef __ASSEMBLY__ | 13 | #include <linux/stringify.h> |
| 14 | #define ASMMACRO(name, code...) .macro name; code; .endm | ||
| 15 | #else | ||
| 16 | |||
| 17 | #include <asm/cpu-features.h> | ||
| 18 | |||
| 19 | #define ASMMACRO(name, code...) \ | ||
| 20 | __asm__(".macro " #name "; " #code "; .endm"); \ | ||
| 21 | \ | ||
| 22 | static inline void name(void) \ | ||
| 23 | { \ | ||
| 24 | __asm__ __volatile__ (#name); \ | ||
| 25 | } | ||
| 26 | |||
| 27 | /* | ||
| 28 | * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. | ||
| 29 | */ | ||
| 30 | extern void mips_ihb(void); | ||
| 31 | |||
| 32 | #endif | ||
| 33 | 14 | ||
| 34 | ASMMACRO(_ssnop, | 15 | #define ___ssnop \ |
| 35 | sll $0, $0, 1 | 16 | sll $0, $0, 1 |
| 36 | ) | ||
| 37 | 17 | ||
| 38 | ASMMACRO(_ehb, | 18 | #define ___ehb \ |
| 39 | sll $0, $0, 3 | 19 | sll $0, $0, 3 |
| 40 | ) | ||
| 41 | 20 | ||
| 42 | /* | 21 | /* |
| 43 | * TLB hazards | 22 | * TLB hazards |
| @@ -48,24 +27,24 @@ ASMMACRO(_ehb, | |||
| 48 | * MIPSR2 defines ehb for hazard avoidance | 27 | * MIPSR2 defines ehb for hazard avoidance |
| 49 | */ | 28 | */ |
| 50 | 29 | ||
| 51 | ASMMACRO(mtc0_tlbw_hazard, | 30 | #define __mtc0_tlbw_hazard \ |
| 52 | _ehb | 31 | ___ehb |
| 53 | ) | 32 | |
| 54 | ASMMACRO(tlbw_use_hazard, | 33 | #define __tlbw_use_hazard \ |
| 55 | _ehb | 34 | ___ehb |
| 56 | ) | 35 | |
| 57 | ASMMACRO(tlb_probe_hazard, | 36 | #define __tlb_probe_hazard \ |
| 58 | _ehb | 37 | ___ehb |
| 59 | ) | 38 | |
| 60 | ASMMACRO(irq_enable_hazard, | 39 | #define __irq_enable_hazard \ |
| 61 | _ehb | 40 | ___ehb |
| 62 | ) | 41 | |
| 63 | ASMMACRO(irq_disable_hazard, | 42 | #define __irq_disable_hazard \ |
| 64 | _ehb | 43 | ___ehb |
| 65 | ) | 44 | |
| 66 | ASMMACRO(back_to_back_c0_hazard, | 45 | #define __back_to_back_c0_hazard \ |
| 67 | _ehb | 46 | ___ehb |
| 68 | ) | 47 | |
| 69 | /* | 48 | /* |
| 70 | * gcc has a tradition of misscompiling the previous construct using the | 49 | * gcc has a tradition of misscompiling the previous construct using the |
| 71 | * address of a label as argument to inline assembler. Gas otoh has the | 50 | * address of a label as argument to inline assembler. Gas otoh has the |
| @@ -94,24 +73,42 @@ do { \ | |||
| 94 | * These are slightly complicated by the fact that we guarantee R1 kernels to | 73 | * These are slightly complicated by the fact that we guarantee R1 kernels to |
| 95 | * run fine on R2 processors. | 74 | * run fine on R2 processors. |
| 96 | */ | 75 | */ |
| 97 | ASMMACRO(mtc0_tlbw_hazard, | 76 | |
| 98 | _ssnop; _ssnop; _ehb | 77 | #define __mtc0_tlbw_hazard \ |
| 99 | ) | 78 | ___ssnop; \ |
| 100 | ASMMACRO(tlbw_use_hazard, | 79 | ___ssnop; \ |
| 101 | _ssnop; _ssnop; _ssnop; _ehb | 80 | ___ehb |
| 102 | ) | 81 | |
| 103 | ASMMACRO(tlb_probe_hazard, | 82 | #define __tlbw_use_hazard \ |
| 104 | _ssnop; _ssnop; _ssnop; _ehb | 83 | ___ssnop; \ |
| 105 | ) | 84 | ___ssnop; \ |
| 106 | ASMMACRO(irq_enable_hazard, | 85 | ___ssnop; \ |
| 107 | _ssnop; _ssnop; _ssnop; _ehb | 86 | ___ehb |
| 108 | ) | 87 | |
| 109 | ASMMACRO(irq_disable_hazard, | 88 | #define __tlb_probe_hazard \ |
| 110 | _ssnop; _ssnop; _ssnop; _ehb | 89 | ___ssnop; \ |
| 111 | ) | 90 | ___ssnop; \ |
| 112 | ASMMACRO(back_to_back_c0_hazard, | 91 | ___ssnop; \ |
| 113 | _ssnop; _ssnop; _ssnop; _ehb | 92 | ___ehb |
| 114 | ) | 93 | |
| 94 | #define __irq_enable_hazard \ | ||
| 95 | ___ssnop; \ | ||
| 96 | ___ssnop; \ | ||
| 97 | ___ssnop; \ | ||
| 98 | ___ehb | ||
| 99 | |||
| 100 | #define __irq_disable_hazard \ | ||
| 101 | ___ssnop; \ | ||
| 102 | ___ssnop; \ | ||
| 103 | ___ssnop; \ | ||
| 104 | ___ehb | ||
| 105 | |||
| 106 | #define __back_to_back_c0_hazard \ | ||
| 107 | ___ssnop; \ | ||
| 108 | ___ssnop; \ | ||
| 109 | ___ssnop; \ | ||
| 110 | ___ehb | ||
| 111 | |||
| 115 | /* | 112 | /* |
| 116 | * gcc has a tradition of misscompiling the previous construct using the | 113 | * gcc has a tradition of misscompiling the previous construct using the |
| 117 | * address of a label as argument to inline assembler. Gas otoh has the | 114 | * address of a label as argument to inline assembler. Gas otoh has the |
| @@ -147,18 +144,18 @@ do { \ | |||
| 147 | * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. | 144 | * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. |
| 148 | */ | 145 | */ |
| 149 | 146 | ||
| 150 | ASMMACRO(mtc0_tlbw_hazard, | 147 | #define __mtc0_tlbw_hazard |
| 151 | ) | 148 | |
| 152 | ASMMACRO(tlbw_use_hazard, | 149 | #define __tlbw_use_hazard |
| 153 | ) | 150 | |
| 154 | ASMMACRO(tlb_probe_hazard, | 151 | #define __tlb_probe_hazard |
| 155 | ) | 152 | |
| 156 | ASMMACRO(irq_enable_hazard, | 153 | #define __irq_enable_hazard |
| 157 | ) | 154 | |
| 158 | ASMMACRO(irq_disable_hazard, | 155 | #define __irq_disable_hazard |
| 159 | ) | 156 | |
| 160 | ASMMACRO(back_to_back_c0_hazard, | 157 | #define __back_to_back_c0_hazard |
| 161 | ) | 158 | |
| 162 | #define instruction_hazard() do { } while (0) | 159 | #define instruction_hazard() do { } while (0) |
| 163 | 160 | ||
| 164 | #elif defined(CONFIG_CPU_SB1) | 161 | #elif defined(CONFIG_CPU_SB1) |
| @@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard, | |||
| 166 | /* | 163 | /* |
| 167 | * Mostly like R4000 for historic reasons | 164 | * Mostly like R4000 for historic reasons |
| 168 | */ | 165 | */ |
| 169 | ASMMACRO(mtc0_tlbw_hazard, | 166 | #define __mtc0_tlbw_hazard |
| 170 | ) | 167 | |
| 171 | ASMMACRO(tlbw_use_hazard, | 168 | #define __tlbw_use_hazard |
| 172 | ) | 169 | |
| 173 | ASMMACRO(tlb_probe_hazard, | 170 | #define __tlb_probe_hazard |
| 174 | ) | 171 | |
| 175 | ASMMACRO(irq_enable_hazard, | 172 | #define __irq_enable_hazard |
| 176 | ) | 173 | |
| 177 | ASMMACRO(irq_disable_hazard, | 174 | #define __irq_disable_hazard \ |
| 178 | _ssnop; _ssnop; _ssnop | 175 | ___ssnop; \ |
| 179 | ) | 176 | ___ssnop; \ |
| 180 | ASMMACRO(back_to_back_c0_hazard, | 177 | ___ssnop |
| 181 | ) | 178 | |
| 179 | #define __back_to_back_c0_hazard | ||
| 180 | |||
| 182 | #define instruction_hazard() do { } while (0) | 181 | #define instruction_hazard() do { } while (0) |
| 183 | 182 | ||
| 184 | #else | 183 | #else |
| @@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard, | |||
| 192 | * hazard so this is nice trick to have an optimal code for a range of | 191 | * hazard so this is nice trick to have an optimal code for a range of |
| 193 | * processors. | 192 | * processors. |
| 194 | */ | 193 | */ |
| 195 | ASMMACRO(mtc0_tlbw_hazard, | 194 | #define __mtc0_tlbw_hazard \ |
| 196 | nop; nop | 195 | nop; \ |
| 197 | ) | 196 | nop |
| 198 | ASMMACRO(tlbw_use_hazard, | 197 | |
| 199 | nop; nop; nop | 198 | #define __tlbw_use_hazard \ |
| 200 | ) | 199 | nop; \ |
| 201 | ASMMACRO(tlb_probe_hazard, | 200 | nop; \ |
| 202 | nop; nop; nop | 201 | nop |
| 203 | ) | 202 | |
| 204 | ASMMACRO(irq_enable_hazard, | 203 | #define __tlb_probe_hazard \ |
| 205 | _ssnop; _ssnop; _ssnop; | 204 | nop; \ |
| 206 | ) | 205 | nop; \ |
| 207 | ASMMACRO(irq_disable_hazard, | 206 | nop |
| 208 | nop; nop; nop | 207 | |
| 209 | ) | 208 | #define __irq_enable_hazard \ |
| 210 | ASMMACRO(back_to_back_c0_hazard, | 209 | ___ssnop; \ |
| 211 | _ssnop; _ssnop; _ssnop; | 210 | ___ssnop; \ |
| 212 | ) | 211 | ___ssnop |
| 212 | |||
| 213 | #define __irq_disable_hazard \ | ||
| 214 | nop; \ | ||
| 215 | nop; \ | ||
| 216 | nop | ||
| 217 | |||
| 218 | #define __back_to_back_c0_hazard \ | ||
| 219 | ___ssnop; \ | ||
| 220 | ___ssnop; \ | ||
| 221 | ___ssnop | ||
| 222 | |||
| 213 | #define instruction_hazard() do { } while (0) | 223 | #define instruction_hazard() do { } while (0) |
| 214 | 224 | ||
| 215 | #endif | 225 | #endif |
| @@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard, | |||
| 218 | /* FPU hazards */ | 228 | /* FPU hazards */ |
| 219 | 229 | ||
| 220 | #if defined(CONFIG_CPU_SB1) | 230 | #if defined(CONFIG_CPU_SB1) |
| 221 | ASMMACRO(enable_fpu_hazard, | 231 | |
| 222 | .set push; | 232 | #define __enable_fpu_hazard \ |
| 223 | .set mips64; | 233 | .set push; \ |
| 224 | .set noreorder; | 234 | .set mips64; \ |
| 225 | _ssnop; | 235 | .set noreorder; \ |
| 226 | bnezl $0, .+4; | 236 | ___ssnop; \ |
| 227 | _ssnop; | 237 | bnezl $0, .+4; \ |
| 228 | .set pop | 238 | ___ssnop; \ |
| 229 | ) | 239 | .set pop |
| 230 | ASMMACRO(disable_fpu_hazard, | 240 | |
| 231 | ) | 241 | #define __disable_fpu_hazard |
| 232 | 242 | ||
| 233 | #elif defined(CONFIG_CPU_MIPSR2) | 243 | #elif defined(CONFIG_CPU_MIPSR2) |
| 234 | ASMMACRO(enable_fpu_hazard, | 244 | |
| 235 | _ehb | 245 | #define __enable_fpu_hazard \ |
| 236 | ) | 246 | ___ehb |
| 237 | ASMMACRO(disable_fpu_hazard, | 247 | |
| 238 | _ehb | 248 | #define __disable_fpu_hazard \ |
| 239 | ) | 249 | ___ehb |
| 250 | |||
| 240 | #else | 251 | #else |
| 241 | ASMMACRO(enable_fpu_hazard, | 252 | |
| 242 | nop; nop; nop; nop | 253 | #define __enable_fpu_hazard \ |
| 243 | ) | 254 | nop; \ |
| 244 | ASMMACRO(disable_fpu_hazard, | 255 | nop; \ |
| 245 | _ehb | 256 | nop; \ |
| 246 | ) | 257 | nop |
| 258 | |||
| 259 | #define __disable_fpu_hazard \ | ||
| 260 | ___ehb | ||
| 261 | |||
| 247 | #endif | 262 | #endif |
| 248 | 263 | ||
| 264 | #ifdef __ASSEMBLY__ | ||
| 265 | |||
| 266 | #define _ssnop ___ssnop | ||
| 267 | #define _ehb ___ehb | ||
| 268 | #define mtc0_tlbw_hazard __mtc0_tlbw_hazard | ||
| 269 | #define tlbw_use_hazard __tlbw_use_hazard | ||
| 270 | #define tlb_probe_hazard __tlb_probe_hazard | ||
| 271 | #define irq_enable_hazard __irq_enable_hazard | ||
| 272 | #define irq_disable_hazard __irq_disable_hazard | ||
| 273 | #define back_to_back_c0_hazard __back_to_back_c0_hazard | ||
| 274 | #define enable_fpu_hazard __enable_fpu_hazard | ||
| 275 | #define disable_fpu_hazard __disable_fpu_hazard | ||
| 276 | |||
| 277 | #else | ||
| 278 | |||
| 279 | #define _ssnop() \ | ||
| 280 | do { \ | ||
| 281 | __asm__ __volatile__( \ | ||
| 282 | __stringify(___ssnop) \ | ||
| 283 | ); \ | ||
| 284 | } while (0) | ||
| 285 | |||
| 286 | #define _ehb() \ | ||
| 287 | do { \ | ||
| 288 | __asm__ __volatile__( \ | ||
| 289 | __stringify(___ehb) \ | ||
| 290 | ); \ | ||
| 291 | } while (0) | ||
| 292 | |||
| 293 | |||
| 294 | #define mtc0_tlbw_hazard() \ | ||
| 295 | do { \ | ||
| 296 | __asm__ __volatile__( \ | ||
| 297 | __stringify(__mtc0_tlbw_hazard) \ | ||
| 298 | ); \ | ||
| 299 | } while (0) | ||
| 300 | |||
| 301 | |||
| 302 | #define tlbw_use_hazard() \ | ||
| 303 | do { \ | ||
| 304 | __asm__ __volatile__( \ | ||
| 305 | __stringify(__tlbw_use_hazard) \ | ||
| 306 | ); \ | ||
| 307 | } while (0) | ||
| 308 | |||
| 309 | |||
| 310 | #define tlb_probe_hazard() \ | ||
| 311 | do { \ | ||
| 312 | __asm__ __volatile__( \ | ||
| 313 | __stringify(__tlb_probe_hazard) \ | ||
| 314 | ); \ | ||
| 315 | } while (0) | ||
| 316 | |||
| 317 | |||
| 318 | #define irq_enable_hazard() \ | ||
| 319 | do { \ | ||
| 320 | __asm__ __volatile__( \ | ||
| 321 | __stringify(__irq_enable_hazard) \ | ||
| 322 | ); \ | ||
| 323 | } while (0) | ||
| 324 | |||
| 325 | |||
| 326 | #define irq_disable_hazard() \ | ||
| 327 | do { \ | ||
| 328 | __asm__ __volatile__( \ | ||
| 329 | __stringify(__irq_disable_hazard) \ | ||
| 330 | ); \ | ||
| 331 | } while (0) | ||
| 332 | |||
| 333 | |||
| 334 | #define back_to_back_c0_hazard() \ | ||
| 335 | do { \ | ||
| 336 | __asm__ __volatile__( \ | ||
| 337 | __stringify(__back_to_back_c0_hazard) \ | ||
| 338 | ); \ | ||
| 339 | } while (0) | ||
| 340 | |||
| 341 | |||
| 342 | #define enable_fpu_hazard() \ | ||
| 343 | do { \ | ||
| 344 | __asm__ __volatile__( \ | ||
| 345 | __stringify(__enable_fpu_hazard) \ | ||
| 346 | ); \ | ||
| 347 | } while (0) | ||
| 348 | |||
| 349 | |||
| 350 | #define disable_fpu_hazard() \ | ||
| 351 | do { \ | ||
| 352 | __asm__ __volatile__( \ | ||
| 353 | __stringify(__disable_fpu_hazard) \ | ||
| 354 | ); \ | ||
| 355 | } while (0) | ||
| 356 | |||
| 357 | /* | ||
| 358 | * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. | ||
| 359 | */ | ||
| 360 | extern void mips_ihb(void); | ||
| 361 | |||
| 362 | #endif /* __ASSEMBLY__ */ | ||
| 363 | |||
| 249 | #endif /* _ASM_HAZARDS_H */ | 364 | #endif /* _ASM_HAZARDS_H */ |
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 9f3384c789d7..45c00951888b 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h | |||
| @@ -14,53 +14,48 @@ | |||
| 14 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
| 15 | 15 | ||
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/stringify.h> | ||
| 17 | #include <asm/hazards.h> | 18 | #include <asm/hazards.h> |
| 18 | 19 | ||
| 19 | #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) | 20 | #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) |
| 20 | 21 | ||
| 21 | __asm__( | 22 | static inline void arch_local_irq_disable(void) |
| 22 | " .macro arch_local_irq_disable\n" | 23 | { |
| 24 | __asm__ __volatile__( | ||
| 23 | " .set push \n" | 25 | " .set push \n" |
| 24 | " .set noat \n" | 26 | " .set noat \n" |
| 25 | " di \n" | 27 | " di \n" |
| 26 | " irq_disable_hazard \n" | 28 | " " __stringify(__irq_disable_hazard) " \n" |
| 27 | " .set pop \n" | 29 | " .set pop \n" |
| 28 | " .endm \n"); | 30 | : /* no outputs */ |
| 29 | 31 | : /* no inputs */ | |
| 30 | static inline void arch_local_irq_disable(void) | 32 | : "memory"); |
| 31 | { | ||
| 32 | __asm__ __volatile__( | ||
| 33 | "arch_local_irq_disable" | ||
| 34 | : /* no outputs */ | ||
| 35 | : /* no inputs */ | ||
| 36 | : "memory"); | ||
| 37 | } | 33 | } |
| 38 | 34 | ||
| 35 | static inline unsigned long arch_local_irq_save(void) | ||
| 36 | { | ||
| 37 | unsigned long flags; | ||
| 39 | 38 | ||
| 40 | __asm__( | 39 | asm __volatile__( |
| 41 | " .macro arch_local_irq_save result \n" | ||
| 42 | " .set push \n" | 40 | " .set push \n" |
| 43 | " .set reorder \n" | 41 | " .set reorder \n" |
| 44 | " .set noat \n" | 42 | " .set noat \n" |
| 45 | " di \\result \n" | 43 | " di %[flags] \n" |
| 46 | " andi \\result, 1 \n" | 44 | " andi %[flags], 1 \n" |
| 47 | " irq_disable_hazard \n" | 45 | " " __stringify(__irq_disable_hazard) " \n" |
| 48 | " .set pop \n" | 46 | " .set pop \n" |
| 49 | " .endm \n"); | 47 | : [flags] "=r" (flags) |
| 48 | : /* no inputs */ | ||
| 49 | : "memory"); | ||
| 50 | 50 | ||
| 51 | static inline unsigned long arch_local_irq_save(void) | ||
| 52 | { | ||
| 53 | unsigned long flags; | ||
| 54 | asm volatile("arch_local_irq_save\t%0" | ||
| 55 | : "=r" (flags) | ||
| 56 | : /* no inputs */ | ||
| 57 | : "memory"); | ||
| 58 | return flags; | 51 | return flags; |
| 59 | } | 52 | } |
| 60 | 53 | ||
| 54 | static inline void arch_local_irq_restore(unsigned long flags) | ||
| 55 | { | ||
| 56 | unsigned long __tmp1; | ||
| 61 | 57 | ||
| 62 | __asm__( | 58 | __asm__ __volatile__( |
| 63 | " .macro arch_local_irq_restore flags \n" | ||
| 64 | " .set push \n" | 59 | " .set push \n" |
| 65 | " .set noreorder \n" | 60 | " .set noreorder \n" |
| 66 | " .set noat \n" | 61 | " .set noat \n" |
| @@ -69,7 +64,7 @@ __asm__( | |||
| 69 | * Slow, but doesn't suffer from a relatively unlikely race | 64 | * Slow, but doesn't suffer from a relatively unlikely race |
| 70 | * condition we're having since days 1. | 65 | * condition we're having since days 1. |
| 71 | */ | 66 | */ |
| 72 | " beqz \\flags, 1f \n" | 67 | " beqz %[flags], 1f \n" |
| 73 | " di \n" | 68 | " di \n" |
| 74 | " ei \n" | 69 | " ei \n" |
| 75 | "1: \n" | 70 | "1: \n" |
| @@ -78,33 +73,44 @@ __asm__( | |||
| 78 | * Fast, dangerous. Life is fun, life is good. | 73 | * Fast, dangerous. Life is fun, life is good. |
| 79 | */ | 74 | */ |
| 80 | " mfc0 $1, $12 \n" | 75 | " mfc0 $1, $12 \n" |
| 81 | " ins $1, \\flags, 0, 1 \n" | 76 | " ins $1, %[flags], 0, 1 \n" |
| 82 | " mtc0 $1, $12 \n" | 77 | " mtc0 $1, $12 \n" |
| 83 | #endif | 78 | #endif |
| 84 | " irq_disable_hazard \n" | 79 | " " __stringify(__irq_disable_hazard) " \n" |
| 85 | " .set pop \n" | 80 | " .set pop \n" |
| 86 | " .endm \n"); | 81 | : [flags] "=r" (__tmp1) |
| 87 | 82 | : "0" (flags) | |
| 88 | static inline void arch_local_irq_restore(unsigned long flags) | 83 | : "memory"); |
| 89 | { | ||
| 90 | unsigned long __tmp1; | ||
| 91 | |||
| 92 | __asm__ __volatile__( | ||
| 93 | "arch_local_irq_restore\t%0" | ||
| 94 | : "=r" (__tmp1) | ||
| 95 | : "0" (flags) | ||
| 96 | : "memory"); | ||
| 97 | } | 84 | } |
| 98 | 85 | ||
| 99 | static inline void __arch_local_irq_restore(unsigned long flags) | 86 | static inline void __arch_local_irq_restore(unsigned long flags) |
| 100 | { | 87 | { |
| 101 | unsigned long __tmp1; | ||
| 102 | |||
| 103 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
| 104 | "arch_local_irq_restore\t%0" | 89 | " .set push \n" |
| 105 | : "=r" (__tmp1) | 90 | " .set noreorder \n" |
| 106 | : "0" (flags) | 91 | " .set noat \n" |
| 107 | : "memory"); | 92 | #if defined(CONFIG_IRQ_CPU) |
| 93 | /* | ||
| 94 | * Slow, but doesn't suffer from a relatively unlikely race | ||
| 95 | * condition we're having since days 1. | ||
| 96 | */ | ||
| 97 | " beqz %[flags], 1f \n" | ||
| 98 | " di \n" | ||
| 99 | " ei \n" | ||
| 100 | "1: \n" | ||
| 101 | #else | ||
| 102 | /* | ||
| 103 | * Fast, dangerous. Life is fun, life is good. | ||
| 104 | */ | ||
| 105 | " mfc0 $1, $12 \n" | ||
| 106 | " ins $1, %[flags], 0, 1 \n" | ||
| 107 | " mtc0 $1, $12 \n" | ||
| 108 | #endif | ||
| 109 | " " __stringify(__irq_disable_hazard) " \n" | ||
| 110 | " .set pop \n" | ||
| 111 | : [flags] "=r" (flags) | ||
| 112 | : "0" (flags) | ||
| 113 | : "memory"); | ||
| 108 | } | 114 | } |
| 109 | #else | 115 | #else |
| 110 | /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ | 116 | /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ |
| @@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags); | |||
| 115 | #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ | 121 | #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ |
| 116 | 122 | ||
| 117 | 123 | ||
| 118 | __asm__( | 124 | extern void smtc_ipi_replay(void); |
| 119 | " .macro arch_local_irq_enable \n" | 125 | |
| 126 | static inline void arch_local_irq_enable(void) | ||
| 127 | { | ||
| 128 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 129 | /* | ||
| 130 | * SMTC kernel needs to do a software replay of queued | ||
| 131 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
| 132 | */ | ||
| 133 | smtc_ipi_replay(); | ||
| 134 | #endif | ||
| 135 | __asm__ __volatile__( | ||
| 120 | " .set push \n" | 136 | " .set push \n" |
| 121 | " .set reorder \n" | 137 | " .set reorder \n" |
| 122 | " .set noat \n" | 138 | " .set noat \n" |
| @@ -133,45 +149,28 @@ __asm__( | |||
| 133 | " xori $1,0x1e \n" | 149 | " xori $1,0x1e \n" |
| 134 | " mtc0 $1,$12 \n" | 150 | " mtc0 $1,$12 \n" |
| 135 | #endif | 151 | #endif |
| 136 | " irq_enable_hazard \n" | 152 | " " __stringify(__irq_enable_hazard) " \n" |
| 137 | " .set pop \n" | 153 | " .set pop \n" |
| 138 | " .endm"); | 154 | : /* no outputs */ |
| 139 | 155 | : /* no inputs */ | |
| 140 | extern void smtc_ipi_replay(void); | 156 | : "memory"); |
| 141 | |||
| 142 | static inline void arch_local_irq_enable(void) | ||
| 143 | { | ||
| 144 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 145 | /* | ||
| 146 | * SMTC kernel needs to do a software replay of queued | ||
| 147 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
| 148 | */ | ||
| 149 | smtc_ipi_replay(); | ||
| 150 | #endif | ||
| 151 | __asm__ __volatile__( | ||
| 152 | "arch_local_irq_enable" | ||
| 153 | : /* no outputs */ | ||
| 154 | : /* no inputs */ | ||
| 155 | : "memory"); | ||
| 156 | } | 157 | } |
| 157 | 158 | ||
| 159 | static inline unsigned long arch_local_save_flags(void) | ||
| 160 | { | ||
| 161 | unsigned long flags; | ||
| 158 | 162 | ||
| 159 | __asm__( | 163 | asm __volatile__( |
| 160 | " .macro arch_local_save_flags flags \n" | ||
| 161 | " .set push \n" | 164 | " .set push \n" |
| 162 | " .set reorder \n" | 165 | " .set reorder \n" |
| 163 | #ifdef CONFIG_MIPS_MT_SMTC | 166 | #ifdef CONFIG_MIPS_MT_SMTC |
| 164 | " mfc0 \\flags, $2, 1 \n" | 167 | " mfc0 %[flags], $2, 1 \n" |
| 165 | #else | 168 | #else |
| 166 | " mfc0 \\flags, $12 \n" | 169 | " mfc0 %[flags], $12 \n" |
| 167 | #endif | 170 | #endif |
| 168 | " .set pop \n" | 171 | " .set pop \n" |
| 169 | " .endm \n"); | 172 | : [flags] "=r" (flags)); |
| 170 | 173 | ||
| 171 | static inline unsigned long arch_local_save_flags(void) | ||
| 172 | { | ||
| 173 | unsigned long flags; | ||
| 174 | asm volatile("arch_local_save_flags %0" : "=r" (flags)); | ||
| 175 | return flags; | 174 | return flags; |
| 176 | } | 175 | } |
| 177 | 176 | ||
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index cd160be3ce4d..6807f7172eaf 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
| 14 | #include <linux/preempt.h> | 14 | #include <linux/preempt.h> |
| 15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
| 16 | #include <linux/stringify.h> | ||
| 16 | 17 | ||
| 17 | #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) | 18 | #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) |
| 18 | 19 | ||
| @@ -34,8 +35,11 @@ | |||
| 34 | * | 35 | * |
| 35 | * Workaround: mask EXL bit of the result or place a nop before mfc0. | 36 | * Workaround: mask EXL bit of the result or place a nop before mfc0. |
| 36 | */ | 37 | */ |
| 37 | __asm__( | 38 | notrace void arch_local_irq_disable(void) |
| 38 | " .macro arch_local_irq_disable\n" | 39 | { |
| 40 | preempt_disable(); | ||
| 41 | |||
| 42 | __asm__ __volatile__( | ||
| 39 | " .set push \n" | 43 | " .set push \n" |
| 40 | " .set noat \n" | 44 | " .set noat \n" |
| 41 | #ifdef CONFIG_MIPS_MT_SMTC | 45 | #ifdef CONFIG_MIPS_MT_SMTC |
| @@ -52,108 +56,98 @@ __asm__( | |||
| 52 | " .set noreorder \n" | 56 | " .set noreorder \n" |
| 53 | " mtc0 $1,$12 \n" | 57 | " mtc0 $1,$12 \n" |
| 54 | #endif | 58 | #endif |
| 55 | " irq_disable_hazard \n" | 59 | " " __stringify(__irq_disable_hazard) " \n" |
| 56 | " .set pop \n" | 60 | " .set pop \n" |
| 57 | " .endm \n"); | 61 | : /* no outputs */ |
| 62 | : /* no inputs */ | ||
| 63 | : "memory"); | ||
| 58 | 64 | ||
| 59 | notrace void arch_local_irq_disable(void) | ||
| 60 | { | ||
| 61 | preempt_disable(); | ||
| 62 | __asm__ __volatile__( | ||
| 63 | "arch_local_irq_disable" | ||
| 64 | : /* no outputs */ | ||
| 65 | : /* no inputs */ | ||
| 66 | : "memory"); | ||
| 67 | preempt_enable(); | 65 | preempt_enable(); |
| 68 | } | 66 | } |
| 69 | EXPORT_SYMBOL(arch_local_irq_disable); | 67 | EXPORT_SYMBOL(arch_local_irq_disable); |
| 70 | 68 | ||
| 71 | 69 | ||
| 72 | __asm__( | 70 | notrace unsigned long arch_local_irq_save(void) |
| 73 | " .macro arch_local_irq_save result \n" | 71 | { |
| 72 | unsigned long flags; | ||
| 73 | |||
| 74 | preempt_disable(); | ||
| 75 | |||
| 76 | __asm__ __volatile__( | ||
| 74 | " .set push \n" | 77 | " .set push \n" |
| 75 | " .set reorder \n" | 78 | " .set reorder \n" |
| 76 | " .set noat \n" | 79 | " .set noat \n" |
| 77 | #ifdef CONFIG_MIPS_MT_SMTC | 80 | #ifdef CONFIG_MIPS_MT_SMTC |
| 78 | " mfc0 \\result, $2, 1 \n" | 81 | " mfc0 %[flags], $2, 1 \n" |
| 79 | " ori $1, \\result, 0x400 \n" | 82 | " ori $1, %[flags], 0x400 \n" |
| 80 | " .set noreorder \n" | 83 | " .set noreorder \n" |
| 81 | " mtc0 $1, $2, 1 \n" | 84 | " mtc0 $1, $2, 1 \n" |
| 82 | " andi \\result, \\result, 0x400 \n" | 85 | " andi %[flags], %[flags], 0x400 \n" |
| 83 | #elif defined(CONFIG_CPU_MIPSR2) | 86 | #elif defined(CONFIG_CPU_MIPSR2) |
| 84 | /* see irqflags.h for inline function */ | 87 | /* see irqflags.h for inline function */ |
| 85 | #else | 88 | #else |
| 86 | " mfc0 \\result, $12 \n" | 89 | " mfc0 %[flags], $12 \n" |
| 87 | " ori $1, \\result, 0x1f \n" | 90 | " ori $1, %[flags], 0x1f \n" |
| 88 | " xori $1, 0x1f \n" | 91 | " xori $1, 0x1f \n" |
| 89 | " .set noreorder \n" | 92 | " .set noreorder \n" |
| 90 | " mtc0 $1, $12 \n" | 93 | " mtc0 $1, $12 \n" |
| 91 | #endif | 94 | #endif |
| 92 | " irq_disable_hazard \n" | 95 | " " __stringify(__irq_disable_hazard) " \n" |
| 93 | " .set pop \n" | 96 | " .set pop \n" |
| 94 | " .endm \n"); | 97 | : [flags] "=r" (flags) |
| 98 | : /* no inputs */ | ||
| 99 | : "memory"); | ||
| 95 | 100 | ||
| 96 | notrace unsigned long arch_local_irq_save(void) | ||
| 97 | { | ||
| 98 | unsigned long flags; | ||
| 99 | preempt_disable(); | ||
| 100 | asm volatile("arch_local_irq_save\t%0" | ||
| 101 | : "=r" (flags) | ||
| 102 | : /* no inputs */ | ||
| 103 | : "memory"); | ||
| 104 | preempt_enable(); | 101 | preempt_enable(); |
| 102 | |||
| 105 | return flags; | 103 | return flags; |
| 106 | } | 104 | } |
| 107 | EXPORT_SYMBOL(arch_local_irq_save); | 105 | EXPORT_SYMBOL(arch_local_irq_save); |
| 108 | 106 | ||
| 107 | notrace void arch_local_irq_restore(unsigned long flags) | ||
| 108 | { | ||
| 109 | unsigned long __tmp1; | ||
| 110 | |||
| 111 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 112 | /* | ||
| 113 | * SMTC kernel needs to do a software replay of queued | ||
| 114 | * IPIs, at the cost of branch and call overhead on each | ||
| 115 | * local_irq_restore() | ||
| 116 | */ | ||
| 117 | if (unlikely(!(flags & 0x0400))) | ||
| 118 | smtc_ipi_replay(); | ||
| 119 | #endif | ||
| 120 | preempt_disable(); | ||
| 109 | 121 | ||
| 110 | __asm__( | 122 | __asm__ __volatile__( |
| 111 | " .macro arch_local_irq_restore flags \n" | ||
| 112 | " .set push \n" | 123 | " .set push \n" |
| 113 | " .set noreorder \n" | 124 | " .set noreorder \n" |
| 114 | " .set noat \n" | 125 | " .set noat \n" |
| 115 | #ifdef CONFIG_MIPS_MT_SMTC | 126 | #ifdef CONFIG_MIPS_MT_SMTC |
| 116 | "mfc0 $1, $2, 1 \n" | 127 | " mfc0 $1, $2, 1 \n" |
| 117 | "andi \\flags, 0x400 \n" | 128 | " andi %[flags], 0x400 \n" |
| 118 | "ori $1, 0x400 \n" | 129 | " ori $1, 0x400 \n" |
| 119 | "xori $1, 0x400 \n" | 130 | " xori $1, 0x400 \n" |
| 120 | "or \\flags, $1 \n" | 131 | " or %[flags], $1 \n" |
| 121 | "mtc0 \\flags, $2, 1 \n" | 132 | " mtc0 %[flags], $2, 1 \n" |
| 122 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | 133 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) |
| 123 | /* see irqflags.h for inline function */ | 134 | /* see irqflags.h for inline function */ |
| 124 | #elif defined(CONFIG_CPU_MIPSR2) | 135 | #elif defined(CONFIG_CPU_MIPSR2) |
| 125 | /* see irqflags.h for inline function */ | 136 | /* see irqflags.h for inline function */ |
| 126 | #else | 137 | #else |
| 127 | " mfc0 $1, $12 \n" | 138 | " mfc0 $1, $12 \n" |
| 128 | " andi \\flags, 1 \n" | 139 | " andi %[flags], 1 \n" |
| 129 | " ori $1, 0x1f \n" | 140 | " ori $1, 0x1f \n" |
| 130 | " xori $1, 0x1f \n" | 141 | " xori $1, 0x1f \n" |
| 131 | " or \\flags, $1 \n" | 142 | " or %[flags], $1 \n" |
| 132 | " mtc0 \\flags, $12 \n" | 143 | " mtc0 %[flags], $12 \n" |
| 133 | #endif | 144 | #endif |
| 134 | " irq_disable_hazard \n" | 145 | " " __stringify(__irq_disable_hazard) " \n" |
| 135 | " .set pop \n" | 146 | " .set pop \n" |
| 136 | " .endm \n"); | 147 | : [flags] "=r" (__tmp1) |
| 148 | : "0" (flags) | ||
| 149 | : "memory"); | ||
| 137 | 150 | ||
| 138 | notrace void arch_local_irq_restore(unsigned long flags) | ||
| 139 | { | ||
| 140 | unsigned long __tmp1; | ||
| 141 | |||
| 142 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 143 | /* | ||
| 144 | * SMTC kernel needs to do a software replay of queued | ||
| 145 | * IPIs, at the cost of branch and call overhead on each | ||
| 146 | * local_irq_restore() | ||
| 147 | */ | ||
| 148 | if (unlikely(!(flags & 0x0400))) | ||
| 149 | smtc_ipi_replay(); | ||
| 150 | #endif | ||
| 151 | preempt_disable(); | ||
| 152 | __asm__ __volatile__( | ||
| 153 | "arch_local_irq_restore\t%0" | ||
| 154 | : "=r" (__tmp1) | ||
| 155 | : "0" (flags) | ||
| 156 | : "memory"); | ||
| 157 | preempt_enable(); | 151 | preempt_enable(); |
| 158 | } | 152 | } |
| 159 | EXPORT_SYMBOL(arch_local_irq_restore); | 153 | EXPORT_SYMBOL(arch_local_irq_restore); |
| @@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags) | |||
| 164 | unsigned long __tmp1; | 158 | unsigned long __tmp1; |
| 165 | 159 | ||
| 166 | preempt_disable(); | 160 | preempt_disable(); |
| 161 | |||
| 167 | __asm__ __volatile__( | 162 | __asm__ __volatile__( |
| 168 | "arch_local_irq_restore\t%0" | 163 | " .set push \n" |
| 169 | : "=r" (__tmp1) | 164 | " .set noreorder \n" |
| 170 | : "0" (flags) | 165 | " .set noat \n" |
| 171 | : "memory"); | 166 | #ifdef CONFIG_MIPS_MT_SMTC |
| 167 | " mfc0 $1, $2, 1 \n" | ||
| 168 | " andi %[flags], 0x400 \n" | ||
| 169 | " ori $1, 0x400 \n" | ||
| 170 | " xori $1, 0x400 \n" | ||
| 171 | " or %[flags], $1 \n" | ||
| 172 | " mtc0 %[flags], $2, 1 \n" | ||
| 173 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
| 174 | /* see irqflags.h for inline function */ | ||
| 175 | #elif defined(CONFIG_CPU_MIPSR2) | ||
| 176 | /* see irqflags.h for inline function */ | ||
| 177 | #else | ||
| 178 | " mfc0 $1, $12 \n" | ||
| 179 | " andi %[flags], 1 \n" | ||
| 180 | " ori $1, 0x1f \n" | ||
| 181 | " xori $1, 0x1f \n" | ||
| 182 | " or %[flags], $1 \n" | ||
| 183 | " mtc0 %[flags], $12 \n" | ||
| 184 | #endif | ||
| 185 | " " __stringify(__irq_disable_hazard) " \n" | ||
| 186 | " .set pop \n" | ||
| 187 | : [flags] "=r" (__tmp1) | ||
| 188 | : "0" (flags) | ||
| 189 | : "memory"); | ||
| 190 | |||
| 172 | preempt_enable(); | 191 | preempt_enable(); |
| 173 | } | 192 | } |
| 174 | EXPORT_SYMBOL(__arch_local_irq_restore); | 193 | EXPORT_SYMBOL(__arch_local_irq_restore); |
