aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/mips
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/atomic.h561
1 files changed, 189 insertions, 372 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 37b2befe651a..6dd6bfc607e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -29,7 +29,7 @@
29 * 29 *
30 * Atomically reads the value of @v. 30 * Atomically reads the value of @v.
31 */ 31 */
32#define atomic_read(v) (*(volatile int *)&(v)->counter) 32#define atomic_read(v) ACCESS_ONCE((v)->counter)
33 33
34/* 34/*
35 * atomic_set - set atomic variable 35 * atomic_set - set atomic variable
@@ -40,195 +40,103 @@
40 */ 40 */
41#define atomic_set(v, i) ((v)->counter = (i)) 41#define atomic_set(v, i) ((v)->counter = (i))
42 42
43/* 43#define ATOMIC_OP(op, c_op, asm_op) \
44 * atomic_add - add integer to atomic variable 44static __inline__ void atomic_##op(int i, atomic_t * v) \
45 * @i: integer value to add 45{ \
46 * @v: pointer of type atomic_t 46 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
47 * 47 int temp; \
48 * Atomically adds @i to @v. 48 \
49 */ 49 __asm__ __volatile__( \
50static __inline__ void atomic_add(int i, atomic_t * v) 50 " .set arch=r4000 \n" \
51{ 51 "1: ll %0, %1 # atomic_" #op " \n" \
52 if (kernel_uses_llsc && R10000_LLSC_WAR) { 52 " " #asm_op " %0, %2 \n" \
53 int temp; 53 " sc %0, %1 \n" \
54 54 " beqzl %0, 1b \n" \
55 __asm__ __volatile__( 55 " .set mips0 \n" \
56 " .set arch=r4000 \n" 56 : "=&r" (temp), "+m" (v->counter) \
57 "1: ll %0, %1 # atomic_add \n" 57 : "Ir" (i)); \
58 " addu %0, %2 \n" 58 } else if (kernel_uses_llsc) { \
59 " sc %0, %1 \n" 59 int temp; \
60 " beqzl %0, 1b \n" 60 \
61 " .set mips0 \n" 61 do { \
62 : "=&r" (temp), "+m" (v->counter) 62 __asm__ __volatile__( \
63 : "Ir" (i)); 63 " .set arch=r4000 \n" \
64 } else if (kernel_uses_llsc) { 64 " ll %0, %1 # atomic_" #op "\n" \
65 int temp; 65 " " #asm_op " %0, %2 \n" \
66 66 " sc %0, %1 \n" \
67 do { 67 " .set mips0 \n" \
68 __asm__ __volatile__( 68 : "=&r" (temp), "+m" (v->counter) \
69 " .set arch=r4000 \n" 69 : "Ir" (i)); \
70 " ll %0, %1 # atomic_add \n" 70 } while (unlikely(!temp)); \
71 " addu %0, %2 \n" 71 } else { \
72 " sc %0, %1 \n" 72 unsigned long flags; \
73 " .set mips0 \n" 73 \
74 : "=&r" (temp), "+m" (v->counter) 74 raw_local_irq_save(flags); \
75 : "Ir" (i)); 75 v->counter c_op i; \
76 } while (unlikely(!temp)); 76 raw_local_irq_restore(flags); \
77 } else { 77 } \
78 unsigned long flags; 78} \
79 79
80 raw_local_irq_save(flags); 80#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
81 v->counter += i; 81static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
82 raw_local_irq_restore(flags); 82{ \
83 } 83 int result; \
84} 84 \
85 85 smp_mb__before_llsc(); \
86/* 86 \
87 * atomic_sub - subtract the atomic variable 87 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
88 * @i: integer value to subtract 88 int temp; \
89 * @v: pointer of type atomic_t 89 \
90 * 90 __asm__ __volatile__( \
91 * Atomically subtracts @i from @v. 91 " .set arch=r4000 \n" \
92 */ 92 "1: ll %1, %2 # atomic_" #op "_return \n" \
93static __inline__ void atomic_sub(int i, atomic_t * v) 93 " " #asm_op " %0, %1, %3 \n" \
94{ 94 " sc %0, %2 \n" \
95 if (kernel_uses_llsc && R10000_LLSC_WAR) { 95 " beqzl %0, 1b \n" \
96 int temp; 96 " " #asm_op " %0, %1, %3 \n" \
97 97 " .set mips0 \n" \
98 __asm__ __volatile__( 98 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
99 " .set arch=r4000 \n" 99 : "Ir" (i)); \
100 "1: ll %0, %1 # atomic_sub \n" 100 } else if (kernel_uses_llsc) { \
101 " subu %0, %2 \n" 101 int temp; \
102 " sc %0, %1 \n" 102 \
103 " beqzl %0, 1b \n" 103 do { \
104 " .set mips0 \n" 104 __asm__ __volatile__( \
105 : "=&r" (temp), "+m" (v->counter) 105 " .set arch=r4000 \n" \
106 : "Ir" (i)); 106 " ll %1, %2 # atomic_" #op "_return \n" \
107 } else if (kernel_uses_llsc) { 107 " " #asm_op " %0, %1, %3 \n" \
108 int temp; 108 " sc %0, %2 \n" \
109 109 " .set mips0 \n" \
110 do { 110 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
111 __asm__ __volatile__( 111 : "Ir" (i)); \
112 " .set arch=r4000 \n" 112 } while (unlikely(!result)); \
113 " ll %0, %1 # atomic_sub \n" 113 \
114 " subu %0, %2 \n" 114 result = temp; result c_op i; \
115 " sc %0, %1 \n" 115 } else { \
116 " .set mips0 \n" 116 unsigned long flags; \
117 : "=&r" (temp), "+m" (v->counter) 117 \
118 : "Ir" (i)); 118 raw_local_irq_save(flags); \
119 } while (unlikely(!temp)); 119 result = v->counter; \
120 } else { 120 result c_op i; \
121 unsigned long flags; 121 v->counter = result; \
122 122 raw_local_irq_restore(flags); \
123 raw_local_irq_save(flags); 123 } \
124 v->counter -= i; 124 \
125 raw_local_irq_restore(flags); 125 smp_llsc_mb(); \
126 } 126 \
127} 127 return result; \
128
129/*
130 * Same as above, but return the result value
131 */
132static __inline__ int atomic_add_return(int i, atomic_t * v)
133{
134 int result;
135
136 smp_mb__before_llsc();
137
138 if (kernel_uses_llsc && R10000_LLSC_WAR) {
139 int temp;
140
141 __asm__ __volatile__(
142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n"
146 " beqzl %0, 1b \n"
147 " addu %0, %1, %3 \n"
148 " .set mips0 \n"
149 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
150 : "Ir" (i));
151 } else if (kernel_uses_llsc) {
152 int temp;
153
154 do {
155 __asm__ __volatile__(
156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n"
160 " .set mips0 \n"
161 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
162 : "Ir" (i));
163 } while (unlikely(!result));
164
165 result = temp + i;
166 } else {
167 unsigned long flags;
168
169 raw_local_irq_save(flags);
170 result = v->counter;
171 result += i;
172 v->counter = result;
173 raw_local_irq_restore(flags);
174 }
175
176 smp_llsc_mb();
177
178 return result;
179} 128}
180 129
181static __inline__ int atomic_sub_return(int i, atomic_t * v) 130#define ATOMIC_OPS(op, c_op, asm_op) \
182{ 131 ATOMIC_OP(op, c_op, asm_op) \
183 int result; 132 ATOMIC_OP_RETURN(op, c_op, asm_op)
184 133
185 smp_mb__before_llsc(); 134ATOMIC_OPS(add, +=, addu)
135ATOMIC_OPS(sub, -=, subu)
186 136
187 if (kernel_uses_llsc && R10000_LLSC_WAR) { 137#undef ATOMIC_OPS
188 int temp; 138#undef ATOMIC_OP_RETURN
189 139#undef ATOMIC_OP
190 __asm__ __volatile__(
191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n"
195 " beqzl %0, 1b \n"
196 " subu %0, %1, %3 \n"
197 " .set mips0 \n"
198 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
199 : "Ir" (i), "m" (v->counter)
200 : "memory");
201
202 result = temp - i;
203 } else if (kernel_uses_llsc) {
204 int temp;
205
206 do {
207 __asm__ __volatile__(
208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n"
212 " .set mips0 \n"
213 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
214 : "Ir" (i));
215 } while (unlikely(!result));
216
217 result = temp - i;
218 } else {
219 unsigned long flags;
220
221 raw_local_irq_save(flags);
222 result = v->counter;
223 result -= i;
224 v->counter = result;
225 raw_local_irq_restore(flags);
226 }
227
228 smp_llsc_mb();
229
230 return result;
231}
232 140
233/* 141/*
234 * atomic_sub_if_positive - conditionally subtract integer from atomic variable 142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
@@ -398,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
398 * @v: pointer of type atomic64_t 306 * @v: pointer of type atomic64_t
399 * 307 *
400 */ 308 */
401#define atomic64_read(v) (*(volatile long *)&(v)->counter) 309#define atomic64_read(v) ACCESS_ONCE((v)->counter)
402 310
403/* 311/*
404 * atomic64_set - set atomic variable 312 * atomic64_set - set atomic variable
@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
407 */ 315 */
408#define atomic64_set(v, i) ((v)->counter = (i)) 316#define atomic64_set(v, i) ((v)->counter = (i))
409 317
410/* 318#define ATOMIC64_OP(op, c_op, asm_op) \
411 * atomic64_add - add integer to atomic variable 319static __inline__ void atomic64_##op(long i, atomic64_t * v) \
412 * @i: integer value to add 320{ \
413 * @v: pointer of type atomic64_t 321 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
414 * 322 long temp; \
415 * Atomically adds @i to @v. 323 \
416 */ 324 __asm__ __volatile__( \
417static __inline__ void atomic64_add(long i, atomic64_t * v) 325 " .set arch=r4000 \n" \
418{ 326 "1: lld %0, %1 # atomic64_" #op " \n" \
419 if (kernel_uses_llsc && R10000_LLSC_WAR) { 327 " " #asm_op " %0, %2 \n" \
420 long temp; 328 " scd %0, %1 \n" \
421 329 " beqzl %0, 1b \n" \
422 __asm__ __volatile__( 330 " .set mips0 \n" \
423 " .set arch=r4000 \n" 331 : "=&r" (temp), "+m" (v->counter) \
424 "1: lld %0, %1 # atomic64_add \n" 332 : "Ir" (i)); \
425 " daddu %0, %2 \n" 333 } else if (kernel_uses_llsc) { \
426 " scd %0, %1 \n" 334 long temp; \
427 " beqzl %0, 1b \n" 335 \
428 " .set mips0 \n" 336 do { \
429 : "=&r" (temp), "+m" (v->counter) 337 __asm__ __volatile__( \
430 : "Ir" (i)); 338 " .set arch=r4000 \n" \
431 } else if (kernel_uses_llsc) { 339 " lld %0, %1 # atomic64_" #op "\n" \
432 long temp; 340 " " #asm_op " %0, %2 \n" \
433 341 " scd %0, %1 \n" \
434 do { 342 " .set mips0 \n" \
435 __asm__ __volatile__( 343 : "=&r" (temp), "+m" (v->counter) \
436 " .set arch=r4000 \n" 344 : "Ir" (i)); \
437 " lld %0, %1 # atomic64_add \n" 345 } while (unlikely(!temp)); \
438 " daddu %0, %2 \n" 346 } else { \
439 " scd %0, %1 \n" 347 unsigned long flags; \
440 " .set mips0 \n" 348 \
441 : "=&r" (temp), "+m" (v->counter) 349 raw_local_irq_save(flags); \
442 : "Ir" (i)); 350 v->counter c_op i; \
443 } while (unlikely(!temp)); 351 raw_local_irq_restore(flags); \
444 } else { 352 } \
445 unsigned long flags; 353} \
446 354
447 raw_local_irq_save(flags); 355#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
448 v->counter += i; 356static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
449 raw_local_irq_restore(flags); 357{ \
450 } 358 long result; \
359 \
360 smp_mb__before_llsc(); \
361 \
362 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
363 long temp; \
364 \
365 __asm__ __volatile__( \
366 " .set arch=r4000 \n" \
367 "1: lld %1, %2 # atomic64_" #op "_return\n" \
368 " " #asm_op " %0, %1, %3 \n" \
369 " scd %0, %2 \n" \
370 " beqzl %0, 1b \n" \
371 " " #asm_op " %0, %1, %3 \n" \
372 " .set mips0 \n" \
373 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
374 : "Ir" (i)); \
375 } else if (kernel_uses_llsc) { \
376 long temp; \
377 \
378 do { \
379 __asm__ __volatile__( \
380 " .set arch=r4000 \n" \
381 " lld %1, %2 # atomic64_" #op "_return\n" \
382 " " #asm_op " %0, %1, %3 \n" \
383 " scd %0, %2 \n" \
384 " .set mips0 \n" \
385 : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
386 : "Ir" (i), "m" (v->counter) \
387 : "memory"); \
388 } while (unlikely(!result)); \
389 \
390 result = temp; result c_op i; \
391 } else { \
392 unsigned long flags; \
393 \
394 raw_local_irq_save(flags); \
395 result = v->counter; \
396 result c_op i; \
397 v->counter = result; \
398 raw_local_irq_restore(flags); \
399 } \
400 \
401 smp_llsc_mb(); \
402 \
403 return result; \
451} 404}
452 405
453/* 406#define ATOMIC64_OPS(op, c_op, asm_op) \
454 * atomic64_sub - subtract the atomic variable 407 ATOMIC64_OP(op, c_op, asm_op) \
455 * @i: integer value to subtract 408 ATOMIC64_OP_RETURN(op, c_op, asm_op)
456 * @v: pointer of type atomic64_t
457 *
458 * Atomically subtracts @i from @v.
459 */
460static __inline__ void atomic64_sub(long i, atomic64_t * v)
461{
462 if (kernel_uses_llsc && R10000_LLSC_WAR) {
463 long temp;
464
465 __asm__ __volatile__(
466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n"
470 " beqzl %0, 1b \n"
471 " .set mips0 \n"
472 : "=&r" (temp), "+m" (v->counter)
473 : "Ir" (i));
474 } else if (kernel_uses_llsc) {
475 long temp;
476
477 do {
478 __asm__ __volatile__(
479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n"
483 " .set mips0 \n"
484 : "=&r" (temp), "+m" (v->counter)
485 : "Ir" (i));
486 } while (unlikely(!temp));
487 } else {
488 unsigned long flags;
489
490 raw_local_irq_save(flags);
491 v->counter -= i;
492 raw_local_irq_restore(flags);
493 }
494}
495
496/*
497 * Same as above, but return the result value
498 */
499static __inline__ long atomic64_add_return(long i, atomic64_t * v)
500{
501 long result;
502 409
503 smp_mb__before_llsc(); 410ATOMIC64_OPS(add, +=, daddu)
411ATOMIC64_OPS(sub, -=, dsubu)
504 412
505 if (kernel_uses_llsc && R10000_LLSC_WAR) { 413#undef ATOMIC64_OPS
506 long temp; 414#undef ATOMIC64_OP_RETURN
507 415#undef ATOMIC64_OP
508 __asm__ __volatile__(
509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n"
513 " beqzl %0, 1b \n"
514 " daddu %0, %1, %3 \n"
515 " .set mips0 \n"
516 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
517 : "Ir" (i));
518 } else if (kernel_uses_llsc) {
519 long temp;
520
521 do {
522 __asm__ __volatile__(
523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n"
527 " .set mips0 \n"
528 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
529 : "Ir" (i), "m" (v->counter)
530 : "memory");
531 } while (unlikely(!result));
532
533 result = temp + i;
534 } else {
535 unsigned long flags;
536
537 raw_local_irq_save(flags);
538 result = v->counter;
539 result += i;
540 v->counter = result;
541 raw_local_irq_restore(flags);
542 }
543
544 smp_llsc_mb();
545
546 return result;
547}
548
549static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
550{
551 long result;
552
553 smp_mb__before_llsc();
554
555 if (kernel_uses_llsc && R10000_LLSC_WAR) {
556 long temp;
557
558 __asm__ __volatile__(
559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n"
563 " beqzl %0, 1b \n"
564 " dsubu %0, %1, %3 \n"
565 " .set mips0 \n"
566 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
567 : "Ir" (i), "m" (v->counter)
568 : "memory");
569 } else if (kernel_uses_llsc) {
570 long temp;
571
572 do {
573 __asm__ __volatile__(
574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n"
578 " .set mips0 \n"
579 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
580 : "Ir" (i), "m" (v->counter)
581 : "memory");
582 } while (unlikely(!result));
583
584 result = temp - i;
585 } else {
586 unsigned long flags;
587
588 raw_local_irq_save(flags);
589 result = v->counter;
590 result -= i;
591 v->counter = result;
592 raw_local_irq_restore(flags);
593 }
594
595 smp_llsc_mb();
596
597 return result;
598}
599 416
600/* 417/*
601 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable 418 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable