diff options
Diffstat (limited to 'arch/mips/include/asm/atomic.h')
-rw-r--r-- | arch/mips/include/asm/atomic.h | 374 |
1 files changed, 192 insertions, 182 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 6dd6bfc607e9..857da84cfc92 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/irqflags.h> | 17 | #include <linux/irqflags.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <asm/barrier.h> | 19 | #include <asm/barrier.h> |
20 | #include <asm/compiler.h> | ||
20 | #include <asm/cpu-features.h> | 21 | #include <asm/cpu-features.h> |
21 | #include <asm/cmpxchg.h> | 22 | #include <asm/cmpxchg.h> |
22 | #include <asm/war.h> | 23 | #include <asm/war.h> |
@@ -40,95 +41,97 @@ | |||
40 | */ | 41 | */ |
41 | #define atomic_set(v, i) ((v)->counter = (i)) | 42 | #define atomic_set(v, i) ((v)->counter = (i)) |
42 | 43 | ||
43 | #define ATOMIC_OP(op, c_op, asm_op) \ | 44 | #define ATOMIC_OP(op, c_op, asm_op) \ |
44 | static __inline__ void atomic_##op(int i, atomic_t * v) \ | 45 | static __inline__ void atomic_##op(int i, atomic_t * v) \ |
45 | { \ | 46 | { \ |
46 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | 47 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ |
47 | int temp; \ | 48 | int temp; \ |
48 | \ | 49 | \ |
49 | __asm__ __volatile__( \ | 50 | __asm__ __volatile__( \ |
50 | " .set arch=r4000 \n" \ | 51 | " .set arch=r4000 \n" \ |
51 | "1: ll %0, %1 # atomic_" #op " \n" \ | 52 | "1: ll %0, %1 # atomic_" #op " \n" \ |
52 | " " #asm_op " %0, %2 \n" \ | 53 | " " #asm_op " %0, %2 \n" \ |
53 | " sc %0, %1 \n" \ | 54 | " sc %0, %1 \n" \ |
54 | " beqzl %0, 1b \n" \ | 55 | " beqzl %0, 1b \n" \ |
55 | " .set mips0 \n" \ | 56 | " .set mips0 \n" \ |
56 | : "=&r" (temp), "+m" (v->counter) \ | 57 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ |
57 | : "Ir" (i)); \ | 58 | : "Ir" (i)); \ |
58 | } else if (kernel_uses_llsc) { \ | 59 | } else if (kernel_uses_llsc) { \ |
59 | int temp; \ | 60 | int temp; \ |
60 | \ | 61 | \ |
61 | do { \ | 62 | do { \ |
62 | __asm__ __volatile__( \ | 63 | __asm__ __volatile__( \ |
63 | " .set arch=r4000 \n" \ | 64 | " .set arch=r4000 \n" \ |
64 | " ll %0, %1 # atomic_" #op "\n" \ | 65 | " ll %0, %1 # atomic_" #op "\n" \ |
65 | " " #asm_op " %0, %2 \n" \ | 66 | " " #asm_op " %0, %2 \n" \ |
66 | " sc %0, %1 \n" \ | 67 | " sc %0, %1 \n" \ |
67 | " .set mips0 \n" \ | 68 | " .set mips0 \n" \ |
68 | : "=&r" (temp), "+m" (v->counter) \ | 69 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ |
69 | : "Ir" (i)); \ | 70 | : "Ir" (i)); \ |
70 | } while (unlikely(!temp)); \ | 71 | } while (unlikely(!temp)); \ |
71 | } else { \ | 72 | } else { \ |
72 | unsigned long flags; \ | 73 | unsigned long flags; \ |
73 | \ | 74 | \ |
74 | raw_local_irq_save(flags); \ | 75 | raw_local_irq_save(flags); \ |
75 | v->counter c_op i; \ | 76 | v->counter c_op i; \ |
76 | raw_local_irq_restore(flags); \ | 77 | raw_local_irq_restore(flags); \ |
77 | } \ | 78 | } \ |
78 | } \ | ||
79 | |||
80 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | ||
81 | static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | ||
82 | { \ | ||
83 | int result; \ | ||
84 | \ | ||
85 | smp_mb__before_llsc(); \ | ||
86 | \ | ||
87 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | ||
88 | int temp; \ | ||
89 | \ | ||
90 | __asm__ __volatile__( \ | ||
91 | " .set arch=r4000 \n" \ | ||
92 | "1: ll %1, %2 # atomic_" #op "_return \n" \ | ||
93 | " " #asm_op " %0, %1, %3 \n" \ | ||
94 | " sc %0, %2 \n" \ | ||
95 | " beqzl %0, 1b \n" \ | ||
96 | " " #asm_op " %0, %1, %3 \n" \ | ||
97 | " .set mips0 \n" \ | ||
98 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | ||
99 | : "Ir" (i)); \ | ||
100 | } else if (kernel_uses_llsc) { \ | ||
101 | int temp; \ | ||
102 | \ | ||
103 | do { \ | ||
104 | __asm__ __volatile__( \ | ||
105 | " .set arch=r4000 \n" \ | ||
106 | " ll %1, %2 # atomic_" #op "_return \n" \ | ||
107 | " " #asm_op " %0, %1, %3 \n" \ | ||
108 | " sc %0, %2 \n" \ | ||
109 | " .set mips0 \n" \ | ||
110 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | ||
111 | : "Ir" (i)); \ | ||
112 | } while (unlikely(!result)); \ | ||
113 | \ | ||
114 | result = temp; result c_op i; \ | ||
115 | } else { \ | ||
116 | unsigned long flags; \ | ||
117 | \ | ||
118 | raw_local_irq_save(flags); \ | ||
119 | result = v->counter; \ | ||
120 | result c_op i; \ | ||
121 | v->counter = result; \ | ||
122 | raw_local_irq_restore(flags); \ | ||
123 | } \ | ||
124 | \ | ||
125 | smp_llsc_mb(); \ | ||
126 | \ | ||
127 | return result; \ | ||
128 | } | 79 | } |
129 | 80 | ||
130 | #define ATOMIC_OPS(op, c_op, asm_op) \ | 81 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
131 | ATOMIC_OP(op, c_op, asm_op) \ | 82 | static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ |
83 | { \ | ||
84 | int result; \ | ||
85 | \ | ||
86 | smp_mb__before_llsc(); \ | ||
87 | \ | ||
88 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | ||
89 | int temp; \ | ||
90 | \ | ||
91 | __asm__ __volatile__( \ | ||
92 | " .set arch=r4000 \n" \ | ||
93 | "1: ll %1, %2 # atomic_" #op "_return \n" \ | ||
94 | " " #asm_op " %0, %1, %3 \n" \ | ||
95 | " sc %0, %2 \n" \ | ||
96 | " beqzl %0, 1b \n" \ | ||
97 | " " #asm_op " %0, %1, %3 \n" \ | ||
98 | " .set mips0 \n" \ | ||
99 | : "=&r" (result), "=&r" (temp), \ | ||
100 | "+" GCC_OFF12_ASM() (v->counter) \ | ||
101 | : "Ir" (i)); \ | ||
102 | } else if (kernel_uses_llsc) { \ | ||
103 | int temp; \ | ||
104 | \ | ||
105 | do { \ | ||
106 | __asm__ __volatile__( \ | ||
107 | " .set arch=r4000 \n" \ | ||
108 | " ll %1, %2 # atomic_" #op "_return \n" \ | ||
109 | " " #asm_op " %0, %1, %3 \n" \ | ||
110 | " sc %0, %2 \n" \ | ||
111 | " .set mips0 \n" \ | ||
112 | : "=&r" (result), "=&r" (temp), \ | ||
113 | "+" GCC_OFF12_ASM() (v->counter) \ | ||
114 | : "Ir" (i)); \ | ||
115 | } while (unlikely(!result)); \ | ||
116 | \ | ||
117 | result = temp; result c_op i; \ | ||
118 | } else { \ | ||
119 | unsigned long flags; \ | ||
120 | \ | ||
121 | raw_local_irq_save(flags); \ | ||
122 | result = v->counter; \ | ||
123 | result c_op i; \ | ||
124 | v->counter = result; \ | ||
125 | raw_local_irq_restore(flags); \ | ||
126 | } \ | ||
127 | \ | ||
128 | smp_llsc_mb(); \ | ||
129 | \ | ||
130 | return result; \ | ||
131 | } | ||
132 | |||
133 | #define ATOMIC_OPS(op, c_op, asm_op) \ | ||
134 | ATOMIC_OP(op, c_op, asm_op) \ | ||
132 | ATOMIC_OP_RETURN(op, c_op, asm_op) | 135 | ATOMIC_OP_RETURN(op, c_op, asm_op) |
133 | 136 | ||
134 | ATOMIC_OPS(add, +=, addu) | 137 | ATOMIC_OPS(add, +=, addu) |
@@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
167 | " .set reorder \n" | 170 | " .set reorder \n" |
168 | "1: \n" | 171 | "1: \n" |
169 | " .set mips0 \n" | 172 | " .set mips0 \n" |
170 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) | 173 | : "=&r" (result), "=&r" (temp), |
171 | : "Ir" (i), "m" (v->counter) | 174 | "+" GCC_OFF12_ASM() (v->counter) |
175 | : "Ir" (i), GCC_OFF12_ASM() (v->counter) | ||
172 | : "memory"); | 176 | : "memory"); |
173 | } else if (kernel_uses_llsc) { | 177 | } else if (kernel_uses_llsc) { |
174 | int temp; | 178 | int temp; |
@@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
185 | " .set reorder \n" | 189 | " .set reorder \n" |
186 | "1: \n" | 190 | "1: \n" |
187 | " .set mips0 \n" | 191 | " .set mips0 \n" |
188 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) | 192 | : "=&r" (result), "=&r" (temp), |
193 | "+" GCC_OFF12_ASM() (v->counter) | ||
189 | : "Ir" (i)); | 194 | : "Ir" (i)); |
190 | } else { | 195 | } else { |
191 | unsigned long flags; | 196 | unsigned long flags; |
@@ -315,96 +320,98 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
315 | */ | 320 | */ |
316 | #define atomic64_set(v, i) ((v)->counter = (i)) | 321 | #define atomic64_set(v, i) ((v)->counter = (i)) |
317 | 322 | ||
318 | #define ATOMIC64_OP(op, c_op, asm_op) \ | 323 | #define ATOMIC64_OP(op, c_op, asm_op) \ |
319 | static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | 324 | static __inline__ void atomic64_##op(long i, atomic64_t * v) \ |
320 | { \ | 325 | { \ |
321 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | 326 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ |
322 | long temp; \ | 327 | long temp; \ |
323 | \ | 328 | \ |
324 | __asm__ __volatile__( \ | 329 | __asm__ __volatile__( \ |
325 | " .set arch=r4000 \n" \ | 330 | " .set arch=r4000 \n" \ |
326 | "1: lld %0, %1 # atomic64_" #op " \n" \ | 331 | "1: lld %0, %1 # atomic64_" #op " \n" \ |
327 | " " #asm_op " %0, %2 \n" \ | 332 | " " #asm_op " %0, %2 \n" \ |
328 | " scd %0, %1 \n" \ | 333 | " scd %0, %1 \n" \ |
329 | " beqzl %0, 1b \n" \ | 334 | " beqzl %0, 1b \n" \ |
330 | " .set mips0 \n" \ | 335 | " .set mips0 \n" \ |
331 | : "=&r" (temp), "+m" (v->counter) \ | 336 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ |
332 | : "Ir" (i)); \ | 337 | : "Ir" (i)); \ |
333 | } else if (kernel_uses_llsc) { \ | 338 | } else if (kernel_uses_llsc) { \ |
334 | long temp; \ | 339 | long temp; \ |
335 | \ | 340 | \ |
336 | do { \ | 341 | do { \ |
337 | __asm__ __volatile__( \ | 342 | __asm__ __volatile__( \ |
338 | " .set arch=r4000 \n" \ | 343 | " .set arch=r4000 \n" \ |
339 | " lld %0, %1 # atomic64_" #op "\n" \ | 344 | " lld %0, %1 # atomic64_" #op "\n" \ |
340 | " " #asm_op " %0, %2 \n" \ | 345 | " " #asm_op " %0, %2 \n" \ |
341 | " scd %0, %1 \n" \ | 346 | " scd %0, %1 \n" \ |
342 | " .set mips0 \n" \ | 347 | " .set mips0 \n" \ |
343 | : "=&r" (temp), "+m" (v->counter) \ | 348 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ |
344 | : "Ir" (i)); \ | 349 | : "Ir" (i)); \ |
345 | } while (unlikely(!temp)); \ | 350 | } while (unlikely(!temp)); \ |
346 | } else { \ | 351 | } else { \ |
347 | unsigned long flags; \ | 352 | unsigned long flags; \ |
348 | \ | 353 | \ |
349 | raw_local_irq_save(flags); \ | 354 | raw_local_irq_save(flags); \ |
350 | v->counter c_op i; \ | 355 | v->counter c_op i; \ |
351 | raw_local_irq_restore(flags); \ | 356 | raw_local_irq_restore(flags); \ |
352 | } \ | 357 | } \ |
353 | } \ | 358 | } |
354 | 359 | ||
355 | #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ | 360 | #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ |
356 | static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | 361 | static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ |
357 | { \ | 362 | { \ |
358 | long result; \ | 363 | long result; \ |
359 | \ | 364 | \ |
360 | smp_mb__before_llsc(); \ | 365 | smp_mb__before_llsc(); \ |
361 | \ | 366 | \ |
362 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | 367 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ |
363 | long temp; \ | 368 | long temp; \ |
364 | \ | 369 | \ |
365 | __asm__ __volatile__( \ | 370 | __asm__ __volatile__( \ |
366 | " .set arch=r4000 \n" \ | 371 | " .set arch=r4000 \n" \ |
367 | "1: lld %1, %2 # atomic64_" #op "_return\n" \ | 372 | "1: lld %1, %2 # atomic64_" #op "_return\n" \ |
368 | " " #asm_op " %0, %1, %3 \n" \ | 373 | " " #asm_op " %0, %1, %3 \n" \ |
369 | " scd %0, %2 \n" \ | 374 | " scd %0, %2 \n" \ |
370 | " beqzl %0, 1b \n" \ | 375 | " beqzl %0, 1b \n" \ |
371 | " " #asm_op " %0, %1, %3 \n" \ | 376 | " " #asm_op " %0, %1, %3 \n" \ |
372 | " .set mips0 \n" \ | 377 | " .set mips0 \n" \ |
373 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | 378 | : "=&r" (result), "=&r" (temp), \ |
374 | : "Ir" (i)); \ | 379 | "+" GCC_OFF12_ASM() (v->counter) \ |
375 | } else if (kernel_uses_llsc) { \ | 380 | : "Ir" (i)); \ |
376 | long temp; \ | 381 | } else if (kernel_uses_llsc) { \ |
377 | \ | 382 | long temp; \ |
378 | do { \ | 383 | \ |
379 | __asm__ __volatile__( \ | 384 | do { \ |
380 | " .set arch=r4000 \n" \ | 385 | __asm__ __volatile__( \ |
381 | " lld %1, %2 # atomic64_" #op "_return\n" \ | 386 | " .set arch=r4000 \n" \ |
382 | " " #asm_op " %0, %1, %3 \n" \ | 387 | " lld %1, %2 # atomic64_" #op "_return\n" \ |
383 | " scd %0, %2 \n" \ | 388 | " " #asm_op " %0, %1, %3 \n" \ |
384 | " .set mips0 \n" \ | 389 | " scd %0, %2 \n" \ |
385 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ | 390 | " .set mips0 \n" \ |
386 | : "Ir" (i), "m" (v->counter) \ | 391 | : "=&r" (result), "=&r" (temp), \ |
387 | : "memory"); \ | 392 | "=" GCC_OFF12_ASM() (v->counter) \ |
388 | } while (unlikely(!result)); \ | 393 | : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ |
389 | \ | 394 | : "memory"); \ |
390 | result = temp; result c_op i; \ | 395 | } while (unlikely(!result)); \ |
391 | } else { \ | 396 | \ |
392 | unsigned long flags; \ | 397 | result = temp; result c_op i; \ |
393 | \ | 398 | } else { \ |
394 | raw_local_irq_save(flags); \ | 399 | unsigned long flags; \ |
395 | result = v->counter; \ | 400 | \ |
396 | result c_op i; \ | 401 | raw_local_irq_save(flags); \ |
397 | v->counter = result; \ | 402 | result = v->counter; \ |
398 | raw_local_irq_restore(flags); \ | 403 | result c_op i; \ |
399 | } \ | 404 | v->counter = result; \ |
400 | \ | 405 | raw_local_irq_restore(flags); \ |
401 | smp_llsc_mb(); \ | 406 | } \ |
402 | \ | 407 | \ |
403 | return result; \ | 408 | smp_llsc_mb(); \ |
409 | \ | ||
410 | return result; \ | ||
404 | } | 411 | } |
405 | 412 | ||
406 | #define ATOMIC64_OPS(op, c_op, asm_op) \ | 413 | #define ATOMIC64_OPS(op, c_op, asm_op) \ |
407 | ATOMIC64_OP(op, c_op, asm_op) \ | 414 | ATOMIC64_OP(op, c_op, asm_op) \ |
408 | ATOMIC64_OP_RETURN(op, c_op, asm_op) | 415 | ATOMIC64_OP_RETURN(op, c_op, asm_op) |
409 | 416 | ||
410 | ATOMIC64_OPS(add, +=, daddu) | 417 | ATOMIC64_OPS(add, +=, daddu) |
@@ -415,7 +422,8 @@ ATOMIC64_OPS(sub, -=, dsubu) | |||
415 | #undef ATOMIC64_OP | 422 | #undef ATOMIC64_OP |
416 | 423 | ||
417 | /* | 424 | /* |
418 | * atomic64_sub_if_positive - conditionally subtract integer from atomic variable | 425 | * atomic64_sub_if_positive - conditionally subtract integer from atomic |
426 | * variable | ||
419 | * @i: integer value to subtract | 427 | * @i: integer value to subtract |
420 | * @v: pointer of type atomic64_t | 428 | * @v: pointer of type atomic64_t |
421 | * | 429 | * |
@@ -443,8 +451,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
443 | " .set reorder \n" | 451 | " .set reorder \n" |
444 | "1: \n" | 452 | "1: \n" |
445 | " .set mips0 \n" | 453 | " .set mips0 \n" |
446 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) | 454 | : "=&r" (result), "=&r" (temp), |
447 | : "Ir" (i), "m" (v->counter) | 455 | "=" GCC_OFF12_ASM() (v->counter) |
456 | : "Ir" (i), GCC_OFF12_ASM() (v->counter) | ||
448 | : "memory"); | 457 | : "memory"); |
449 | } else if (kernel_uses_llsc) { | 458 | } else if (kernel_uses_llsc) { |
450 | long temp; | 459 | long temp; |
@@ -461,7 +470,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
461 | " .set reorder \n" | 470 | " .set reorder \n" |
462 | "1: \n" | 471 | "1: \n" |
463 | " .set mips0 \n" | 472 | " .set mips0 \n" |
464 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) | 473 | : "=&r" (result), "=&r" (temp), |
474 | "+" GCC_OFF12_ASM() (v->counter) | ||
465 | : "Ir" (i)); | 475 | : "Ir" (i)); |
466 | } else { | 476 | } else { |
467 | unsigned long flags; | 477 | unsigned long flags; |