diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-07-10 06:55:45 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 08:06:24 -0400 |
commit | 73ada3700bbb0a4c7cc06ea8d74e93c689f90cdb (patch) | |
tree | 7080670f15d0087e409b3ed4ac0ce24829c0d7e1 | |
parent | b0d8003ef405c4148b703cdaab1171045c6c3bbd (diff) |
h8300: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}
These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Also rework the atomic implementation in terms of CPP macros to avoid
the typical repetition -- I seem to have missed this arch the last
time around when I did that.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/h8300/include/asm/atomic.h | 135 |
1 files changed, 40 insertions, 95 deletions
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 7ca73f8546cc..f181f820be33 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h | |||
@@ -16,83 +16,54 @@ | |||
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | 18 | ||
19 | static inline int atomic_add_return(int i, atomic_t *v) | 19 | #define ATOMIC_OP_RETURN(op, c_op) \ |
20 | { | 20 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
21 | h8300flags flags; | 21 | { \ |
22 | int ret; | 22 | h8300flags flags; \ |
23 | 23 | int ret; \ | |
24 | flags = arch_local_irq_save(); | 24 | \ |
25 | ret = v->counter += i; | 25 | flags = arch_local_irq_save(); \ |
26 | arch_local_irq_restore(flags); | 26 | ret = v->counter c_op i; \ |
27 | return ret; | 27 | arch_local_irq_restore(flags); \ |
28 | return ret; \ | ||
28 | } | 29 | } |
29 | 30 | ||
30 | #define atomic_add(i, v) atomic_add_return(i, v) | 31 | #define ATOMIC_OP(op, c_op) \ |
31 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | 32 | static inline void atomic_##op(int i, atomic_t *v) \ |
32 | 33 | { \ | |
33 | static inline int atomic_sub_return(int i, atomic_t *v) | 34 | h8300flags flags; \ |
34 | { | 35 | \ |
35 | h8300flags flags; | 36 | flags = arch_local_irq_save(); \ |
36 | int ret; | 37 | v->counter c_op i; \ |
37 | 38 | arch_local_irq_restore(flags); \ | |
38 | flags = arch_local_irq_save(); | ||
39 | ret = v->counter -= i; | ||
40 | arch_local_irq_restore(flags); | ||
41 | return ret; | ||
42 | } | 39 | } |
43 | 40 | ||
44 | #define atomic_sub(i, v) atomic_sub_return(i, v) | 41 | ATOMIC_OP_RETURN(add, +=) |
45 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | 42 | ATOMIC_OP_RETURN(sub, -=) |
46 | 43 | ||
47 | static inline int atomic_inc_return(atomic_t *v) | 44 | #define CONFIG_ARCH_HAS_ATOMIC_OR |
48 | { | ||
49 | h8300flags flags; | ||
50 | int ret; | ||
51 | |||
52 | flags = arch_local_irq_save(); | ||
53 | v->counter++; | ||
54 | ret = v->counter; | ||
55 | arch_local_irq_restore(flags); | ||
56 | return ret; | ||
57 | } | ||
58 | 45 | ||
59 | #define atomic_inc(v) atomic_inc_return(v) | 46 | ATOMIC_OP(and, &=) |
47 | ATOMIC_OP(or, |=) | ||
48 | ATOMIC_OP(xor, ^=) | ||
60 | 49 | ||
61 | /* | 50 | #undef ATOMIC_OP_RETURN |
62 | * atomic_inc_and_test - increment and test | 51 | #undef ATOMIC_OP |
63 | * @v: pointer of type atomic_t | ||
64 | * | ||
65 | * Atomically increments @v by 1 | ||
66 | * and returns true if the result is zero, or false for all | ||
67 | * other cases. | ||
68 | */ | ||
69 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
70 | 52 | ||
71 | static inline int atomic_dec_return(atomic_t *v) | 53 | #define atomic_add(i, v) (void)atomic_add_return(i, v) |
72 | { | 54 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
73 | h8300flags flags; | ||
74 | int ret; | ||
75 | 55 | ||
76 | flags = arch_local_irq_save(); | 56 | #define atomic_sub(i, v) (void)atomic_sub_return(i, v) |
77 | --v->counter; | 57 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
78 | ret = v->counter; | ||
79 | arch_local_irq_restore(flags); | ||
80 | return ret; | ||
81 | } | ||
82 | 58 | ||
83 | #define atomic_dec(v) atomic_dec_return(v) | 59 | #define atomic_inc_return(v) atomic_add_return(1, v) |
60 | #define atomic_dec_return(v) atomic_sub_return(1, v) | ||
84 | 61 | ||
85 | static inline int atomic_dec_and_test(atomic_t *v) | 62 | #define atomic_inc(v) (void)atomic_inc_return(v) |
86 | { | 63 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
87 | h8300flags flags; | ||
88 | int ret; | ||
89 | 64 | ||
90 | flags = arch_local_irq_save(); | 65 | #define atomic_dec(v) (void)atomic_dec_return(v) |
91 | --v->counter; | 66 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) |
92 | ret = v->counter; | ||
93 | arch_local_irq_restore(flags); | ||
94 | return ret == 0; | ||
95 | } | ||
96 | 67 | ||
97 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 68 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
98 | { | 69 | { |
@@ -120,40 +91,14 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
120 | return ret; | 91 | return ret; |
121 | } | 92 | } |
122 | 93 | ||
123 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) | 94 | static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) |
124 | { | 95 | { |
125 | unsigned char ccr; | 96 | atomic_and(~mask, v); |
126 | unsigned long tmp; | ||
127 | |||
128 | __asm__ __volatile__("stc ccr,%w3\n\t" | ||
129 | "orc #0x80,ccr\n\t" | ||
130 | "mov.l %0,%1\n\t" | ||
131 | "and.l %2,%1\n\t" | ||
132 | "mov.l %1,%0\n\t" | ||
133 | "ldc %w3,ccr" | ||
134 | : "=m"(*v), "=r"(tmp) | ||
135 | : "g"(~(mask)), "r"(ccr)); | ||
136 | } | 97 | } |
137 | 98 | ||
138 | static inline void atomic_set_mask(unsigned long mask, unsigned long *v) | 99 | static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) |
139 | { | 100 | { |
140 | unsigned char ccr; | 101 | atomic_or(mask, v); |
141 | unsigned long tmp; | ||
142 | |||
143 | __asm__ __volatile__("stc ccr,%w3\n\t" | ||
144 | "orc #0x80,ccr\n\t" | ||
145 | "mov.l %0,%1\n\t" | ||
146 | "or.l %2,%1\n\t" | ||
147 | "mov.l %1,%0\n\t" | ||
148 | "ldc %w3,ccr" | ||
149 | : "=m"(*v), "=r"(tmp) | ||
150 | : "g"(~(mask)), "r"(ccr)); | ||
151 | } | 102 | } |
152 | 103 | ||
153 | /* Atomic operations are already serializing */ | ||
154 | #define smp_mb__before_atomic_dec() barrier() | ||
155 | #define smp_mb__after_atomic_dec() barrier() | ||
156 | #define smp_mb__before_atomic_inc() barrier() | ||
157 | #define smp_mb__after_atomic_inc() barrier() | ||
158 | |||
159 | #endif /* __ARCH_H8300_ATOMIC __ */ | 104 | #endif /* __ARCH_H8300_ATOMIC __ */ |