aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorMaciej W. Rozycki <macro@codesourcery.com>2014-11-15 17:09:54 -0500
committerRalf Baechle <ralf@linux-mips.org>2014-11-24 01:45:37 -0500
commitddb3108e30e45f5d58737f6fb8256df10c2c515b (patch)
tree262bb6f5f8bd3540958784bc60d18d002f911734 /arch/mips
parent0e525e48f73cc8a4df8da0be77e4146bea85e1a5 (diff)
MIPS: atomic.h: Reformat to fit in 79 columns
Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8484/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/atomic.h361
1 files changed, 181 insertions, 180 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index ec4b4d658bc4..857da84cfc92 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -41,97 +41,97 @@
41 */ 41 */
42#define atomic_set(v, i) ((v)->counter = (i)) 42#define atomic_set(v, i) ((v)->counter = (i))
43 43
44#define ATOMIC_OP(op, c_op, asm_op) \ 44#define ATOMIC_OP(op, c_op, asm_op) \
45static __inline__ void atomic_##op(int i, atomic_t * v) \ 45static __inline__ void atomic_##op(int i, atomic_t * v) \
46{ \ 46{ \
47 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 47 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
48 int temp; \ 48 int temp; \
49 \ 49 \
50 __asm__ __volatile__( \ 50 __asm__ __volatile__( \
51 " .set arch=r4000 \n" \ 51 " .set arch=r4000 \n" \
52 "1: ll %0, %1 # atomic_" #op " \n" \ 52 "1: ll %0, %1 # atomic_" #op " \n" \
53 " " #asm_op " %0, %2 \n" \ 53 " " #asm_op " %0, %2 \n" \
54 " sc %0, %1 \n" \ 54 " sc %0, %1 \n" \
55 " beqzl %0, 1b \n" \ 55 " beqzl %0, 1b \n" \
56 " .set mips0 \n" \ 56 " .set mips0 \n" \
57 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 57 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
58 : "Ir" (i)); \ 58 : "Ir" (i)); \
59 } else if (kernel_uses_llsc) { \ 59 } else if (kernel_uses_llsc) { \
60 int temp; \ 60 int temp; \
61 \ 61 \
62 do { \ 62 do { \
63 __asm__ __volatile__( \ 63 __asm__ __volatile__( \
64 " .set arch=r4000 \n" \ 64 " .set arch=r4000 \n" \
65 " ll %0, %1 # atomic_" #op "\n" \ 65 " ll %0, %1 # atomic_" #op "\n" \
66 " " #asm_op " %0, %2 \n" \ 66 " " #asm_op " %0, %2 \n" \
67 " sc %0, %1 \n" \ 67 " sc %0, %1 \n" \
68 " .set mips0 \n" \ 68 " .set mips0 \n" \
69 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 69 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
70 : "Ir" (i)); \ 70 : "Ir" (i)); \
71 } while (unlikely(!temp)); \ 71 } while (unlikely(!temp)); \
72 } else { \ 72 } else { \
73 unsigned long flags; \ 73 unsigned long flags; \
74 \ 74 \
75 raw_local_irq_save(flags); \ 75 raw_local_irq_save(flags); \
76 v->counter c_op i; \ 76 v->counter c_op i; \
77 raw_local_irq_restore(flags); \ 77 raw_local_irq_restore(flags); \
78 } \ 78 } \
79} \
80
81#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
82static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
83{ \
84 int result; \
85 \
86 smp_mb__before_llsc(); \
87 \
88 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
89 int temp; \
90 \
91 __asm__ __volatile__( \
92 " .set arch=r4000 \n" \
93 "1: ll %1, %2 # atomic_" #op "_return \n" \
94 " " #asm_op " %0, %1, %3 \n" \
95 " sc %0, %2 \n" \
96 " beqzl %0, 1b \n" \
97 " " #asm_op " %0, %1, %3 \n" \
98 " .set mips0 \n" \
99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF12_ASM() (v->counter) \
101 : "Ir" (i)); \
102 } else if (kernel_uses_llsc) { \
103 int temp; \
104 \
105 do { \
106 __asm__ __volatile__( \
107 " .set arch=r4000 \n" \
108 " ll %1, %2 # atomic_" #op "_return \n" \
109 " " #asm_op " %0, %1, %3 \n" \
110 " sc %0, %2 \n" \
111 " .set mips0 \n" \
112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF12_ASM() (v->counter) \
114 : "Ir" (i)); \
115 } while (unlikely(!result)); \
116 \
117 result = temp; result c_op i; \
118 } else { \
119 unsigned long flags; \
120 \
121 raw_local_irq_save(flags); \
122 result = v->counter; \
123 result c_op i; \
124 v->counter = result; \
125 raw_local_irq_restore(flags); \
126 } \
127 \
128 smp_llsc_mb(); \
129 \
130 return result; \
131} 79}
132 80
133#define ATOMIC_OPS(op, c_op, asm_op) \ 81#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
134 ATOMIC_OP(op, c_op, asm_op) \ 82static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
83{ \
84 int result; \
85 \
86 smp_mb__before_llsc(); \
87 \
88 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
89 int temp; \
90 \
91 __asm__ __volatile__( \
92 " .set arch=r4000 \n" \
93 "1: ll %1, %2 # atomic_" #op "_return \n" \
94 " " #asm_op " %0, %1, %3 \n" \
95 " sc %0, %2 \n" \
96 " beqzl %0, 1b \n" \
97 " " #asm_op " %0, %1, %3 \n" \
98 " .set mips0 \n" \
99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF12_ASM() (v->counter) \
101 : "Ir" (i)); \
102 } else if (kernel_uses_llsc) { \
103 int temp; \
104 \
105 do { \
106 __asm__ __volatile__( \
107 " .set arch=r4000 \n" \
108 " ll %1, %2 # atomic_" #op "_return \n" \
109 " " #asm_op " %0, %1, %3 \n" \
110 " sc %0, %2 \n" \
111 " .set mips0 \n" \
112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF12_ASM() (v->counter) \
114 : "Ir" (i)); \
115 } while (unlikely(!result)); \
116 \
117 result = temp; result c_op i; \
118 } else { \
119 unsigned long flags; \
120 \
121 raw_local_irq_save(flags); \
122 result = v->counter; \
123 result c_op i; \
124 v->counter = result; \
125 raw_local_irq_restore(flags); \
126 } \
127 \
128 smp_llsc_mb(); \
129 \
130 return result; \
131}
132
133#define ATOMIC_OPS(op, c_op, asm_op) \
134 ATOMIC_OP(op, c_op, asm_op) \
135 ATOMIC_OP_RETURN(op, c_op, asm_op) 135 ATOMIC_OP_RETURN(op, c_op, asm_op)
136 136
137ATOMIC_OPS(add, +=, addu) 137ATOMIC_OPS(add, +=, addu)
@@ -320,98 +320,98 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
320 */ 320 */
321#define atomic64_set(v, i) ((v)->counter = (i)) 321#define atomic64_set(v, i) ((v)->counter = (i))
322 322
323#define ATOMIC64_OP(op, c_op, asm_op) \ 323#define ATOMIC64_OP(op, c_op, asm_op) \
324static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 324static __inline__ void atomic64_##op(long i, atomic64_t * v) \
325{ \ 325{ \
326 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 326 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
327 long temp; \ 327 long temp; \
328 \ 328 \
329 __asm__ __volatile__( \ 329 __asm__ __volatile__( \
330 " .set arch=r4000 \n" \ 330 " .set arch=r4000 \n" \
331 "1: lld %0, %1 # atomic64_" #op " \n" \ 331 "1: lld %0, %1 # atomic64_" #op " \n" \
332 " " #asm_op " %0, %2 \n" \ 332 " " #asm_op " %0, %2 \n" \
333 " scd %0, %1 \n" \ 333 " scd %0, %1 \n" \
334 " beqzl %0, 1b \n" \ 334 " beqzl %0, 1b \n" \
335 " .set mips0 \n" \ 335 " .set mips0 \n" \
336 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 336 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
337 : "Ir" (i)); \ 337 : "Ir" (i)); \
338 } else if (kernel_uses_llsc) { \ 338 } else if (kernel_uses_llsc) { \
339 long temp; \ 339 long temp; \
340 \ 340 \
341 do { \ 341 do { \
342 __asm__ __volatile__( \ 342 __asm__ __volatile__( \
343 " .set arch=r4000 \n" \ 343 " .set arch=r4000 \n" \
344 " lld %0, %1 # atomic64_" #op "\n" \ 344 " lld %0, %1 # atomic64_" #op "\n" \
345 " " #asm_op " %0, %2 \n" \ 345 " " #asm_op " %0, %2 \n" \
346 " scd %0, %1 \n" \ 346 " scd %0, %1 \n" \
347 " .set mips0 \n" \ 347 " .set mips0 \n" \
348 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 348 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
349 : "Ir" (i)); \ 349 : "Ir" (i)); \
350 } while (unlikely(!temp)); \ 350 } while (unlikely(!temp)); \
351 } else { \ 351 } else { \
352 unsigned long flags; \ 352 unsigned long flags; \
353 \ 353 \
354 raw_local_irq_save(flags); \ 354 raw_local_irq_save(flags); \
355 v->counter c_op i; \ 355 v->counter c_op i; \
356 raw_local_irq_restore(flags); \ 356 raw_local_irq_restore(flags); \
357 } \ 357 } \
358} \ 358}
359 359
360#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 360#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
361static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 361static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
362{ \ 362{ \
363 long result; \ 363 long result; \
364 \ 364 \
365 smp_mb__before_llsc(); \ 365 smp_mb__before_llsc(); \
366 \ 366 \
367 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 367 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
368 long temp; \ 368 long temp; \
369 \ 369 \
370 __asm__ __volatile__( \ 370 __asm__ __volatile__( \
371 " .set arch=r4000 \n" \ 371 " .set arch=r4000 \n" \
372 "1: lld %1, %2 # atomic64_" #op "_return\n" \ 372 "1: lld %1, %2 # atomic64_" #op "_return\n" \
373 " " #asm_op " %0, %1, %3 \n" \ 373 " " #asm_op " %0, %1, %3 \n" \
374 " scd %0, %2 \n" \ 374 " scd %0, %2 \n" \
375 " beqzl %0, 1b \n" \ 375 " beqzl %0, 1b \n" \
376 " " #asm_op " %0, %1, %3 \n" \ 376 " " #asm_op " %0, %1, %3 \n" \
377 " .set mips0 \n" \ 377 " .set mips0 \n" \
378 : "=&r" (result), "=&r" (temp), \ 378 : "=&r" (result), "=&r" (temp), \
379 "+" GCC_OFF12_ASM() (v->counter) \ 379 "+" GCC_OFF12_ASM() (v->counter) \
380 : "Ir" (i)); \ 380 : "Ir" (i)); \
381 } else if (kernel_uses_llsc) { \ 381 } else if (kernel_uses_llsc) { \
382 long temp; \ 382 long temp; \
383 \ 383 \
384 do { \ 384 do { \
385 __asm__ __volatile__( \ 385 __asm__ __volatile__( \
386 " .set arch=r4000 \n" \ 386 " .set arch=r4000 \n" \
387 " lld %1, %2 # atomic64_" #op "_return\n" \ 387 " lld %1, %2 # atomic64_" #op "_return\n" \
388 " " #asm_op " %0, %1, %3 \n" \ 388 " " #asm_op " %0, %1, %3 \n" \
389 " scd %0, %2 \n" \ 389 " scd %0, %2 \n" \
390 " .set mips0 \n" \ 390 " .set mips0 \n" \
391 : "=&r" (result), "=&r" (temp), \ 391 : "=&r" (result), "=&r" (temp), \
392 "=" GCC_OFF12_ASM() (v->counter) \ 392 "=" GCC_OFF12_ASM() (v->counter) \
393 : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 393 : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
394 : "memory"); \ 394 : "memory"); \
395 } while (unlikely(!result)); \ 395 } while (unlikely(!result)); \
396 \ 396 \
397 result = temp; result c_op i; \ 397 result = temp; result c_op i; \
398 } else { \ 398 } else { \
399 unsigned long flags; \ 399 unsigned long flags; \
400 \ 400 \
401 raw_local_irq_save(flags); \ 401 raw_local_irq_save(flags); \
402 result = v->counter; \ 402 result = v->counter; \
403 result c_op i; \ 403 result c_op i; \
404 v->counter = result; \ 404 v->counter = result; \
405 raw_local_irq_restore(flags); \ 405 raw_local_irq_restore(flags); \
406 } \ 406 } \
407 \ 407 \
408 smp_llsc_mb(); \ 408 smp_llsc_mb(); \
409 \ 409 \
410 return result; \ 410 return result; \
411} 411}
412 412
413#define ATOMIC64_OPS(op, c_op, asm_op) \ 413#define ATOMIC64_OPS(op, c_op, asm_op) \
414 ATOMIC64_OP(op, c_op, asm_op) \ 414 ATOMIC64_OP(op, c_op, asm_op) \
415 ATOMIC64_OP_RETURN(op, c_op, asm_op) 415 ATOMIC64_OP_RETURN(op, c_op, asm_op)
416 416
417ATOMIC64_OPS(add, +=, daddu) 417ATOMIC64_OPS(add, +=, daddu)
@@ -422,7 +422,8 @@ ATOMIC64_OPS(sub, -=, dsubu)
422#undef ATOMIC64_OP 422#undef ATOMIC64_OP
423 423
424/* 424/*
425 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable 425 * atomic64_sub_if_positive - conditionally subtract integer from atomic
426 * variable
426 * @i: integer value to subtract 427 * @i: integer value to subtract
427 * @v: pointer of type atomic64_t 428 * @v: pointer of type atomic64_t
428 * 429 *