aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-26 13:12:45 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-14 06:48:12 -0400
commitc6470150dff9aff682063890c9b8eac71b695def (patch)
tree8c38eda25c4e350e9d0dd9e04818c3b7b2d7f9a7 /arch/sh
parentaf095dd60bdc52b11c186c3151e8e38d6faa094c (diff)
locking,arch,sh: Fold atomic_ops
Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-sh@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.770036493@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/atomic-grb.h119
-rw-r--r--arch/sh/include/asm/atomic-irq.h62
-rw-r--r--arch/sh/include/asm/atomic-llsc.h101
3 files changed, 112 insertions, 170 deletions
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index a273c88578fc..97a5fda83450 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -1,85 +1,56 @@
1#ifndef __ASM_SH_ATOMIC_GRB_H 1#ifndef __ASM_SH_ATOMIC_GRB_H
2#define __ASM_SH_ATOMIC_GRB_H 2#define __ASM_SH_ATOMIC_GRB_H
3 3
4static inline void atomic_add(int i, atomic_t *v) 4#define ATOMIC_OP(op) \
5{ 5static inline void atomic_##op(int i, atomic_t *v) \
6 int tmp; 6{ \
7 7 int tmp; \
8 __asm__ __volatile__ ( 8 \
9 " .align 2 \n\t" 9 __asm__ __volatile__ ( \
10 " mova 1f, r0 \n\t" /* r0 = end point */ 10 " .align 2 \n\t" \
11 " mov r15, r1 \n\t" /* r1 = saved sp */ 11 " mova 1f, r0 \n\t" /* r0 = end point */ \
12 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 12 " mov r15, r1 \n\t" /* r1 = saved sp */ \
13 " mov.l @%1, %0 \n\t" /* load old value */ 13 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
14 " add %2, %0 \n\t" /* add */ 14 " mov.l @%1, %0 \n\t" /* load old value */ \
15 " mov.l %0, @%1 \n\t" /* store new value */ 15 " " #op " %2, %0 \n\t" /* $op */ \
16 "1: mov r1, r15 \n\t" /* LOGOUT */ 16 " mov.l %0, @%1 \n\t" /* store new value */ \
17 : "=&r" (tmp), 17 "1: mov r1, r15 \n\t" /* LOGOUT */ \
18 "+r" (v) 18 : "=&r" (tmp), \
19 : "r" (i) 19 "+r" (v) \
20 : "memory" , "r0", "r1"); 20 : "r" (i) \
21} 21 : "memory" , "r0", "r1"); \
22 22} \
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 int tmp;
26
27 __asm__ __volatile__ (
28 " .align 2 \n\t"
29 " mova 1f, r0 \n\t" /* r0 = end point */
30 " mov r15, r1 \n\t" /* r1 = saved sp */
31 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
32 " mov.l @%1, %0 \n\t" /* load old value */
33 " sub %2, %0 \n\t" /* sub */
34 " mov.l %0, @%1 \n\t" /* store new value */
35 "1: mov r1, r15 \n\t" /* LOGOUT */
36 : "=&r" (tmp),
37 "+r" (v)
38 : "r" (i)
39 : "memory" , "r0", "r1");
40}
41
42static inline int atomic_add_return(int i, atomic_t *v)
43{
44 int tmp;
45 23
46 __asm__ __volatile__ ( 24#define ATOMIC_OP_RETURN(op) \
47 " .align 2 \n\t" 25static inline int atomic_##op##_return(int i, atomic_t *v) \
48 " mova 1f, r0 \n\t" /* r0 = end point */ 26{ \
49 " mov r15, r1 \n\t" /* r1 = saved sp */ 27 int tmp; \
50 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 28 \
51 " mov.l @%1, %0 \n\t" /* load old value */ 29 __asm__ __volatile__ ( \
52 " add %2, %0 \n\t" /* add */ 30 " .align 2 \n\t" \
53 " mov.l %0, @%1 \n\t" /* store new value */ 31 " mova 1f, r0 \n\t" /* r0 = end point */ \
54 "1: mov r1, r15 \n\t" /* LOGOUT */ 32 " mov r15, r1 \n\t" /* r1 = saved sp */ \
55 : "=&r" (tmp), 33 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
56 "+r" (v) 34 " mov.l @%1, %0 \n\t" /* load old value */ \
57 : "r" (i) 35 " " #op " %2, %0 \n\t" /* $op */ \
58 : "memory" , "r0", "r1"); 36 " mov.l %0, @%1 \n\t" /* store new value */ \
59 37 "1: mov r1, r15 \n\t" /* LOGOUT */ \
60 return tmp; 38 : "=&r" (tmp), \
39 "+r" (v) \
40 : "r" (i) \
41 : "memory" , "r0", "r1"); \
42 \
43 return tmp; \
61} 44}
62 45
63static inline int atomic_sub_return(int i, atomic_t *v) 46#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
64{
65 int tmp;
66 47
67 __asm__ __volatile__ ( 48ATOMIC_OPS(add)
68 " .align 2 \n\t" 49ATOMIC_OPS(sub)
69 " mova 1f, r0 \n\t" /* r0 = end point */
70 " mov r15, r1 \n\t" /* r1 = saved sp */
71 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
72 " mov.l @%1, %0 \n\t" /* load old value */
73 " sub %2, %0 \n\t" /* sub */
74 " mov.l %0, @%1 \n\t" /* store new value */
75 "1: mov r1, r15 \n\t" /* LOGOUT */
76 : "=&r" (tmp),
77 "+r" (v)
78 : "r" (i)
79 : "memory", "r0", "r1");
80 50
81 return tmp; 51#undef ATOMIC_OPS
82} 52#undef ATOMIC_OP_RETURN
53#undef ATOMIC_OP
83 54
84static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 55static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
85{ 56{
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 9f7c56609e53..61d107523f06 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -8,49 +8,39 @@
8 * forward to code at the end of this object's .text section, then 8 * forward to code at the end of this object's .text section, then
9 * branch back to restart the operation. 9 * branch back to restart the operation.
10 */ 10 */
11static inline void atomic_add(int i, atomic_t *v)
12{
13 unsigned long flags;
14
15 raw_local_irq_save(flags);
16 v->counter += i;
17 raw_local_irq_restore(flags);
18}
19 11
20static inline void atomic_sub(int i, atomic_t *v) 12#define ATOMIC_OP(op, c_op) \
21{ 13static inline void atomic_##op(int i, atomic_t *v) \
22 unsigned long flags; 14{ \
23 15 unsigned long flags; \
24 raw_local_irq_save(flags); 16 \
25 v->counter -= i; 17 raw_local_irq_save(flags); \
26 raw_local_irq_restore(flags); 18 v->counter c_op i; \
19 raw_local_irq_restore(flags); \
27} 20}
28 21
29static inline int atomic_add_return(int i, atomic_t *v) 22#define ATOMIC_OP_RETURN(op, c_op) \
30{ 23static inline int atomic_##op##_return(int i, atomic_t *v) \
31 unsigned long temp, flags; 24{ \
32 25 unsigned long temp, flags; \
33 raw_local_irq_save(flags); 26 \
34 temp = v->counter; 27 raw_local_irq_save(flags); \
35 temp += i; 28 temp = v->counter; \
36 v->counter = temp; 29 temp c_op i; \
37 raw_local_irq_restore(flags); 30 v->counter = temp; \
38 31 raw_local_irq_restore(flags); \
39 return temp; 32 \
33 return temp; \
40} 34}
41 35
42static inline int atomic_sub_return(int i, atomic_t *v) 36#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
43{
44 unsigned long temp, flags;
45 37
46 raw_local_irq_save(flags); 38ATOMIC_OPS(add, +=)
47 temp = v->counter; 39ATOMIC_OPS(sub, -=)
48 temp -= i;
49 v->counter = temp;
50 raw_local_irq_restore(flags);
51 40
52 return temp; 41#undef ATOMIC_OPS
53} 42#undef ATOMIC_OP_RETURN
43#undef ATOMIC_OP
54 44
55static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 45static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
56{ 46{
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 4b00b78e3f4f..8575dccb9ef7 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -2,39 +2,6 @@
2#define __ASM_SH_ATOMIC_LLSC_H 2#define __ASM_SH_ATOMIC_LLSC_H
3 3
4/* 4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long tmp;
12
13 __asm__ __volatile__ (
14"1: movli.l @%2, %0 ! atomic_add \n"
15" add %1, %0 \n"
16" movco.l %0, @%2 \n"
17" bf 1b \n"
18 : "=&z" (tmp)
19 : "r" (i), "r" (&v->counter)
20 : "t");
21}
22
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 unsigned long tmp;
26
27 __asm__ __volatile__ (
28"1: movli.l @%2, %0 ! atomic_sub \n"
29" sub %1, %0 \n"
30" movco.l %0, @%2 \n"
31" bf 1b \n"
32 : "=&z" (tmp)
33 : "r" (i), "r" (&v->counter)
34 : "t");
35}
36
37/*
38 * SH-4A note: 5 * SH-4A note:
39 * 6 *
40 * We basically get atomic_xxx_return() for free compared with 7 * We basically get atomic_xxx_return() for free compared with
@@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v)
42 * encoding, so the retval is automatically set without having to 9 * encoding, so the retval is automatically set without having to
43 * do any special work. 10 * do any special work.
44 */ 11 */
45static inline int atomic_add_return(int i, atomic_t *v) 12/*
46{ 13 * To get proper branch prediction for the main line, we must branch
47 unsigned long temp; 14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
16 */
48 17
49 __asm__ __volatile__ ( 18#define ATOMIC_OP(op) \
50"1: movli.l @%2, %0 ! atomic_add_return \n" 19static inline void atomic_##op(int i, atomic_t *v) \
51" add %1, %0 \n" 20{ \
52" movco.l %0, @%2 \n" 21 unsigned long tmp; \
53" bf 1b \n" 22 \
54" synco \n" 23 __asm__ __volatile__ ( \
55 : "=&z" (temp) 24"1: movli.l @%2, %0 ! atomic_" #op "\n" \
56 : "r" (i), "r" (&v->counter) 25" " #op " %1, %0 \n" \
57 : "t"); 26" movco.l %0, @%2 \n" \
27" bf 1b \n" \
28 : "=&z" (tmp) \
29 : "r" (i), "r" (&v->counter) \
30 : "t"); \
31}
58 32
59 return temp; 33#define ATOMIC_OP_RETURN(op) \
34static inline int atomic_##op##_return(int i, atomic_t *v) \
35{ \
36 unsigned long temp; \
37 \
38 __asm__ __volatile__ ( \
39"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
40" " #op " %1, %0 \n" \
41" movco.l %0, @%2 \n" \
42" bf 1b \n" \
43" synco \n" \
44 : "=&z" (temp) \
45 : "r" (i), "r" (&v->counter) \
46 : "t"); \
47 \
48 return temp; \
60} 49}
61 50
62static inline int atomic_sub_return(int i, atomic_t *v) 51#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
63{
64 unsigned long temp;
65 52
66 __asm__ __volatile__ ( 53ATOMIC_OPS(add)
67"1: movli.l @%2, %0 ! atomic_sub_return \n" 54ATOMIC_OPS(sub)
68" sub %1, %0 \n"
69" movco.l %0, @%2 \n"
70" bf 1b \n"
71" synco \n"
72 : "=&z" (temp)
73 : "r" (i), "r" (&v->counter)
74 : "t");
75 55
76 return temp; 56#undef ATOMIC_OPS
77} 57#undef ATOMIC_OP_RETURN
58#undef ATOMIC_OP
78 59
79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 60static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80{ 61{