aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k
diff options
context:
space:
mode:
authorGreg Ungerer <gerg@snapgear.com>2010-09-07 20:31:11 -0400
committerGeert Uytterhoeven <geert@linux-m68k.org>2010-10-22 03:43:24 -0400
commit69f99746a2cfd88b9caed8e320ad86405b228ada (patch)
tree96711f779d18d9f00a6a2b50cb60e1c301e28dac /arch/m68k
parent138ff3462f53a7370bef15443e623ecba1c350bf (diff)
m68k/m68knommu: merge MMU and non-MMU atomic.h
The only difference between the MMU and non-MMU versions of atomic.h is some extra support needed by ColdFire family processors. So merge this into the MMU version of atomic.h. Signed-off-by: Greg Ungerer <gerg@uclinux.org> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k')
-rw-r--r--arch/m68k/include/asm/atomic.h210
-rw-r--r--arch/m68k/include/asm/atomic_mm.h200
-rw-r--r--arch/m68k/include/asm/atomic_no.h155
3 files changed, 207 insertions, 358 deletions
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index eab36dcacf6c..03ae3d14cd4a 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -1,7 +1,211 @@
1#ifdef __uClinux__ 1#ifndef __ARCH_M68K_ATOMIC__
2#include "atomic_no.h" 2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21/*
22 * The ColdFire parts cannot do some immediate to memory operations,
23 * so for them we do not specify the "i" asm constraint.
24 */
25#ifdef CONFIG_COLDFIRE
26#define ASM_DI "d"
3#else 27#else
4#include "atomic_mm.h" 28#define ASM_DI "di"
5#endif 29#endif
6 30
31static inline void atomic_add(int i, atomic_t *v)
32{
33 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
34}
35
36static inline void atomic_sub(int i, atomic_t *v)
37{
38 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
39}
40
41static inline void atomic_inc(atomic_t *v)
42{
43 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
44}
45
46static inline void atomic_dec(atomic_t *v)
47{
48 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
49}
50
51static inline int atomic_dec_and_test(atomic_t *v)
52{
53 char c;
54 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
55 return c != 0;
56}
57
58static inline int atomic_inc_and_test(atomic_t *v)
59{
60 char c;
61 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
62 return c != 0;
63}
64
65#ifdef CONFIG_RMW_INSNS
66
67static inline int atomic_add_return(int i, atomic_t *v)
68{
69 int t, tmp;
70
71 __asm__ __volatile__(
72 "1: movel %2,%1\n"
73 " addl %3,%1\n"
74 " casl %2,%1,%0\n"
75 " jne 1b"
76 : "+m" (*v), "=&d" (t), "=&d" (tmp)
77 : "g" (i), "2" (atomic_read(v)));
78 return t;
79}
80
81static inline int atomic_sub_return(int i, atomic_t *v)
82{
83 int t, tmp;
84
85 __asm__ __volatile__(
86 "1: movel %2,%1\n"
87 " subl %3,%1\n"
88 " casl %2,%1,%0\n"
89 " jne 1b"
90 : "+m" (*v), "=&d" (t), "=&d" (tmp)
91 : "g" (i), "2" (atomic_read(v)));
92 return t;
93}
94
95#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
96#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
97
98#else /* !CONFIG_RMW_INSNS */
99
100static inline int atomic_add_return(int i, atomic_t * v)
101{
102 unsigned long flags;
103 int t;
104
105 local_irq_save(flags);
106 t = atomic_read(v);
107 t += i;
108 atomic_set(v, t);
109 local_irq_restore(flags);
110
111 return t;
112}
113
114static inline int atomic_sub_return(int i, atomic_t * v)
115{
116 unsigned long flags;
117 int t;
118
119 local_irq_save(flags);
120 t = atomic_read(v);
121 t -= i;
122 atomic_set(v, t);
123 local_irq_restore(flags);
124
125 return t;
126}
127
128static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
129{
130 unsigned long flags;
131 int prev;
132
133 local_irq_save(flags);
134 prev = atomic_read(v);
135 if (prev == old)
136 atomic_set(v, new);
137 local_irq_restore(flags);
138 return prev;
139}
140
141static inline int atomic_xchg(atomic_t *v, int new)
142{
143 unsigned long flags;
144 int prev;
145
146 local_irq_save(flags);
147 prev = atomic_read(v);
148 atomic_set(v, new);
149 local_irq_restore(flags);
150 return prev;
151}
152
153#endif /* !CONFIG_RMW_INSNS */
154
155#define atomic_dec_return(v) atomic_sub_return(1, (v))
156#define atomic_inc_return(v) atomic_add_return(1, (v))
157
158static inline int atomic_sub_and_test(int i, atomic_t *v)
159{
160 char c;
161 __asm__ __volatile__("subl %2,%1; seq %0"
162 : "=d" (c), "+m" (*v)
163 : ASM_DI (i));
164 return c != 0;
165}
166
167static inline int atomic_add_negative(int i, atomic_t *v)
168{
169 char c;
170 __asm__ __volatile__("addl %2,%1; smi %0"
171 : "=d" (c), "+m" (*v)
172 : "id" (i));
173 return c != 0;
174}
175
176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
177{
178 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
179}
180
181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
182{
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
184}
185
186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
187{
188 int c, old;
189 c = atomic_read(v);
190 for (;;) {
191 if (unlikely(c == (u)))
192 break;
193 old = atomic_cmpxchg((v), c, c + (a));
194 if (likely(old == c))
195 break;
196 c = old;
197 }
198 return c != (u);
199}
200
201#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
202
203/* Atomic operations are already serializing */
204#define smp_mb__before_atomic_dec() barrier()
205#define smp_mb__after_atomic_dec() barrier()
206#define smp_mb__before_atomic_inc() barrier()
207#define smp_mb__after_atomic_inc() barrier()
208
209#include <asm-generic/atomic-long.h>
7#include <asm-generic/atomic64.h> 210#include <asm-generic/atomic64.h>
211#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h
deleted file mode 100644
index 6a223b3f7e74..000000000000
--- a/arch/m68k/include/asm/atomic_mm.h
+++ /dev/null
@@ -1,200 +0,0 @@
1#ifndef __ARCH_M68K_ATOMIC__
2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21static inline void atomic_add(int i, atomic_t *v)
22{
23 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
24}
25
26static inline void atomic_sub(int i, atomic_t *v)
27{
28 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
29}
30
31static inline void atomic_inc(atomic_t *v)
32{
33 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
34}
35
36static inline void atomic_dec(atomic_t *v)
37{
38 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
39}
40
41static inline int atomic_dec_and_test(atomic_t *v)
42{
43 char c;
44 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
45 return c != 0;
46}
47
48static inline int atomic_inc_and_test(atomic_t *v)
49{
50 char c;
51 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
52 return c != 0;
53}
54
55#ifdef CONFIG_RMW_INSNS
56
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 int t, tmp;
60
61 __asm__ __volatile__(
62 "1: movel %2,%1\n"
63 " addl %3,%1\n"
64 " casl %2,%1,%0\n"
65 " jne 1b"
66 : "+m" (*v), "=&d" (t), "=&d" (tmp)
67 : "g" (i), "2" (atomic_read(v)));
68 return t;
69}
70
71static inline int atomic_sub_return(int i, atomic_t *v)
72{
73 int t, tmp;
74
75 __asm__ __volatile__(
76 "1: movel %2,%1\n"
77 " subl %3,%1\n"
78 " casl %2,%1,%0\n"
79 " jne 1b"
80 : "+m" (*v), "=&d" (t), "=&d" (tmp)
81 : "g" (i), "2" (atomic_read(v)));
82 return t;
83}
84
85#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88#else /* !CONFIG_RMW_INSNS */
89
90static inline int atomic_add_return(int i, atomic_t * v)
91{
92 unsigned long flags;
93 int t;
94
95 local_irq_save(flags);
96 t = atomic_read(v);
97 t += i;
98 atomic_set(v, t);
99 local_irq_restore(flags);
100
101 return t;
102}
103
104static inline int atomic_sub_return(int i, atomic_t * v)
105{
106 unsigned long flags;
107 int t;
108
109 local_irq_save(flags);
110 t = atomic_read(v);
111 t -= i;
112 atomic_set(v, t);
113 local_irq_restore(flags);
114
115 return t;
116}
117
118static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
119{
120 unsigned long flags;
121 int prev;
122
123 local_irq_save(flags);
124 prev = atomic_read(v);
125 if (prev == old)
126 atomic_set(v, new);
127 local_irq_restore(flags);
128 return prev;
129}
130
131static inline int atomic_xchg(atomic_t *v, int new)
132{
133 unsigned long flags;
134 int prev;
135
136 local_irq_save(flags);
137 prev = atomic_read(v);
138 atomic_set(v, new);
139 local_irq_restore(flags);
140 return prev;
141}
142
143#endif /* !CONFIG_RMW_INSNS */
144
145#define atomic_dec_return(v) atomic_sub_return(1, (v))
146#define atomic_inc_return(v) atomic_add_return(1, (v))
147
148static inline int atomic_sub_and_test(int i, atomic_t *v)
149{
150 char c;
151 __asm__ __volatile__("subl %2,%1; seq %0"
152 : "=d" (c), "+m" (*v)
153 : "id" (i));
154 return c != 0;
155}
156
157static inline int atomic_add_negative(int i, atomic_t *v)
158{
159 char c;
160 __asm__ __volatile__("addl %2,%1; smi %0"
161 : "=d" (c), "+m" (*v)
162 : "id" (i));
163 return c != 0;
164}
165
166static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
167{
168 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
169}
170
171static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
172{
173 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
174}
175
176static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
177{
178 int c, old;
179 c = atomic_read(v);
180 for (;;) {
181 if (unlikely(c == (u)))
182 break;
183 old = atomic_cmpxchg((v), c, c + (a));
184 if (likely(old == c))
185 break;
186 c = old;
187 }
188 return c != (u);
189}
190
191#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
192
193/* Atomic operations are already serializing */
194#define smp_mb__before_atomic_dec() barrier()
195#define smp_mb__after_atomic_dec() barrier()
196#define smp_mb__before_atomic_inc() barrier()
197#define smp_mb__after_atomic_inc() barrier()
198
199#include <asm-generic/atomic-long.h>
200#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h
deleted file mode 100644
index 289310c63a8a..000000000000
--- a/arch/m68k/include/asm/atomic_no.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#ifndef __ARCH_M68KNOMMU_ATOMIC__
2#define __ARCH_M68KNOMMU_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21static __inline__ void atomic_add(int i, atomic_t *v)
22{
23#ifdef CONFIG_COLDFIRE
24 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i));
25#else
26 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i));
27#endif
28}
29
30static __inline__ void atomic_sub(int i, atomic_t *v)
31{
32#ifdef CONFIG_COLDFIRE
33 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i));
34#else
35 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i));
36#endif
37}
38
39static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
40{
41 char c;
42#ifdef CONFIG_COLDFIRE
43 __asm__ __volatile__("subl %2,%1; seq %0"
44 : "=d" (c), "+m" (*v)
45 : "d" (i));
46#else
47 __asm__ __volatile__("subl %2,%1; seq %0"
48 : "=d" (c), "+m" (*v)
49 : "di" (i));
50#endif
51 return c != 0;
52}
53
54static __inline__ void atomic_inc(volatile atomic_t *v)
55{
56 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
57}
58
59/*
60 * atomic_inc_and_test - increment and test
61 * @v: pointer of type atomic_t
62 *
63 * Atomically increments @v by 1
64 * and returns true if the result is zero, or false for all
65 * other cases.
66 */
67
68static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
69{
70 char c;
71 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
72 return c != 0;
73}
74
75static __inline__ void atomic_dec(volatile atomic_t *v)
76{
77 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
78}
79
80static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
81{
82 char c;
83 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
84 return c != 0;
85}
86
87static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
88{
89 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
90}
91
92static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
93{
94 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
95}
96
97/* Atomic operations are already serializing */
98#define smp_mb__before_atomic_dec() barrier()
99#define smp_mb__after_atomic_dec() barrier()
100#define smp_mb__before_atomic_inc() barrier()
101#define smp_mb__after_atomic_inc() barrier()
102
103static inline int atomic_add_return(int i, atomic_t * v)
104{
105 unsigned long temp, flags;
106
107 local_irq_save(flags);
108 temp = *(long *)v;
109 temp += i;
110 *(long *)v = temp;
111 local_irq_restore(flags);
112
113 return temp;
114}
115
116#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
117
118static inline int atomic_sub_return(int i, atomic_t * v)
119{
120 unsigned long temp, flags;
121
122 local_irq_save(flags);
123 temp = *(long *)v;
124 temp -= i;
125 *(long *)v = temp;
126 local_irq_restore(flags);
127
128 return temp;
129}
130
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133
134static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
135{
136 int c, old;
137 c = atomic_read(v);
138 for (;;) {
139 if (unlikely(c == (u)))
140 break;
141 old = atomic_cmpxchg((v), c, c + (a));
142 if (likely(old == c))
143 break;
144 c = old;
145 }
146 return c != (u);
147}
148
149#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
150
151#define atomic_dec_return(v) atomic_sub_return(1,(v))
152#define atomic_inc_return(v) atomic_add_return(1,(v))
153
154#include <asm-generic/atomic-long.h>
155#endif /* __ARCH_M68KNOMMU_ATOMIC __ */