aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2009-06-13 11:21:51 -0400
committerMike Frysinger <vapier@gentoo.org>2009-06-18 21:41:22 -0400
commit3d150630930c500926bd80d2c07872c9f0ee5db8 (patch)
treec1fcff5db616f92e4ba9586cc322d96e1ce90528 /arch
parent22a151c1bcfe28d8d9aea515155b2d5edada9811 (diff)
Blackfin: convert locking primitives to asm-generic
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/blackfin/include/asm/atomic.h109
-rw-r--r--arch/blackfin/include/asm/bitops.h198
-rw-r--r--arch/blackfin/include/asm/mutex.h2
-rw-r--r--arch/blackfin/include/asm/spinlock.h6
-rw-r--r--arch/blackfin/include/asm/swab.h6
-rw-r--r--arch/blackfin/include/asm/unaligned.h12
6 files changed, 25 insertions, 308 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index b1d92f13ef96..88f36d599fe8 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,24 +1,21 @@
1#ifndef __ARCH_BLACKFIN_ATOMIC__ 1#ifndef __ARCH_BLACKFIN_ATOMIC__
2#define __ARCH_BLACKFIN_ATOMIC__ 2#define __ARCH_BLACKFIN_ATOMIC__
3 3
4#ifndef CONFIG_SMP
5# include <asm-generic/atomic.h>
6#else
7
4#include <linux/types.h> 8#include <linux/types.h>
5#include <asm/system.h> /* local_irq_XXX() */ 9#include <asm/system.h> /* local_irq_XXX() */
6 10
7/* 11/*
8 * Atomic operations that C can't guarantee us. Useful for 12 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc.. 13 * resource counting etc..
10 *
11 * Generally we do not concern about SMP BFIN systems, so we don't have
12 * to deal with that.
13 *
14 * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
15 */ 14 */
16 15
17#define ATOMIC_INIT(i) { (i) } 16#define ATOMIC_INIT(i) { (i) }
18#define atomic_set(v, i) (((v)->counter) = i) 17#define atomic_set(v, i) (((v)->counter) = i)
19 18
20#ifdef CONFIG_SMP
21
22#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 19#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
23 20
24asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 21asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
@@ -84,100 +81,6 @@ static inline int atomic_test_mask(int mask, atomic_t *v)
84#define smp_mb__before_atomic_inc() barrier() 81#define smp_mb__before_atomic_inc() barrier()
85#define smp_mb__after_atomic_inc() barrier() 82#define smp_mb__after_atomic_inc() barrier()
86 83
87#else /* !CONFIG_SMP */
88
89#define atomic_read(v) ((v)->counter)
90
91static inline void atomic_add(int i, atomic_t *v)
92{
93 unsigned long flags;
94
95 local_irq_save_hw(flags);
96 v->counter += i;
97 local_irq_restore_hw(flags);
98}
99
100static inline void atomic_sub(int i, atomic_t *v)
101{
102 unsigned long flags;
103
104 local_irq_save_hw(flags);
105 v->counter -= i;
106 local_irq_restore_hw(flags);
107
108}
109
110static inline int atomic_add_return(int i, atomic_t *v)
111{
112 int __temp = 0;
113 unsigned long flags;
114
115 local_irq_save_hw(flags);
116 v->counter += i;
117 __temp = v->counter;
118 local_irq_restore_hw(flags);
119
120
121 return __temp;
122}
123
124static inline int atomic_sub_return(int i, atomic_t *v)
125{
126 int __temp = 0;
127 unsigned long flags;
128
129 local_irq_save_hw(flags);
130 v->counter -= i;
131 __temp = v->counter;
132 local_irq_restore_hw(flags);
133
134 return __temp;
135}
136
137static inline void atomic_inc(volatile atomic_t *v)
138{
139 unsigned long flags;
140
141 local_irq_save_hw(flags);
142 v->counter++;
143 local_irq_restore_hw(flags);
144}
145
146static inline void atomic_dec(volatile atomic_t *v)
147{
148 unsigned long flags;
149
150 local_irq_save_hw(flags);
151 v->counter--;
152 local_irq_restore_hw(flags);
153}
154
155static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
156{
157 unsigned long flags;
158
159 local_irq_save_hw(flags);
160 v->counter &= ~mask;
161 local_irq_restore_hw(flags);
162}
163
164static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
165{
166 unsigned long flags;
167
168 local_irq_save_hw(flags);
169 v->counter |= mask;
170 local_irq_restore_hw(flags);
171}
172
173/* Atomic operations are already serializing */
174#define smp_mb__before_atomic_dec() barrier()
175#define smp_mb__after_atomic_dec() barrier()
176#define smp_mb__before_atomic_inc() barrier()
177#define smp_mb__after_atomic_inc() barrier()
178
179#endif /* !CONFIG_SMP */
180
181#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 84#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
182#define atomic_dec_return(v) atomic_sub_return(1,(v)) 85#define atomic_dec_return(v) atomic_sub_return(1,(v))
183#define atomic_inc_return(v) atomic_add_return(1,(v)) 86#define atomic_inc_return(v) atomic_add_return(1,(v))
@@ -210,4 +113,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
210 113
211#include <asm-generic/atomic-long.h> 114#include <asm-generic/atomic-long.h>
212 115
213#endif /* __ARCH_BLACKFIN_ATOMIC __ */ 116#endif
117
118#endif
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 75fee2f7d9f2..daffa71576d4 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -1,26 +1,22 @@
1#ifndef _BLACKFIN_BITOPS_H 1#ifndef _BLACKFIN_BITOPS_H
2#define _BLACKFIN_BITOPS_H 2#define _BLACKFIN_BITOPS_H
3 3
4/* 4#ifndef CONFIG_SMP
5 * Copyright 1992, Linus Torvalds. 5# include <asm-generic/bitops.h>
6 */ 6#else
7
8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
10
11#ifdef __KERNEL__
12 7
13#ifndef _LINUX_BITOPS_H 8#ifndef _LINUX_BITOPS_H
14#error only <linux/bitops.h> can be included directly 9#error only <linux/bitops.h> can be included directly
15#endif 10#endif
16 11
12#include <linux/compiler.h>
13#include <asm/byteorder.h> /* swab32 */
14
17#include <asm-generic/bitops/ffs.h> 15#include <asm-generic/bitops/ffs.h>
18#include <asm-generic/bitops/__ffs.h> 16#include <asm-generic/bitops/__ffs.h>
19#include <asm-generic/bitops/sched.h> 17#include <asm-generic/bitops/sched.h>
20#include <asm-generic/bitops/ffz.h> 18#include <asm-generic/bitops/ffz.h>
21 19
22#ifdef CONFIG_SMP
23
24#include <linux/linkage.h> 20#include <linux/linkage.h>
25 21
26asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); 22asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
@@ -79,189 +75,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
79 return __raw_bit_test_toggle_asm(a, nr & 0x1f); 75 return __raw_bit_test_toggle_asm(a, nr & 0x1f);
80} 76}
81 77
82#else /* !CONFIG_SMP */
83
84#include <asm/system.h> /* save_flags */
85
86static inline void set_bit(int nr, volatile unsigned long *addr)
87{
88 int *a = (int *)addr;
89 int mask;
90 unsigned long flags;
91 a += nr >> 5;
92 mask = 1 << (nr & 0x1f);
93 local_irq_save_hw(flags);
94 *a |= mask;
95 local_irq_restore_hw(flags);
96}
97
98static inline void clear_bit(int nr, volatile unsigned long *addr)
99{
100 int *a = (int *)addr;
101 int mask;
102 unsigned long flags;
103 a += nr >> 5;
104 mask = 1 << (nr & 0x1f);
105 local_irq_save_hw(flags);
106 *a &= ~mask;
107 local_irq_restore_hw(flags);
108}
109
110static inline void change_bit(int nr, volatile unsigned long *addr)
111{
112 int mask;
113 unsigned long flags;
114 unsigned long *ADDR = (unsigned long *)addr;
115
116 ADDR += nr >> 5;
117 mask = 1 << (nr & 31);
118 local_irq_save_hw(flags);
119 *ADDR ^= mask;
120 local_irq_restore_hw(flags);
121}
122
123static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
124{
125 int mask, retval;
126 volatile unsigned int *a = (volatile unsigned int *)addr;
127 unsigned long flags;
128
129 a += nr >> 5;
130 mask = 1 << (nr & 0x1f);
131 local_irq_save_hw(flags);
132 retval = (mask & *a) != 0;
133 *a |= mask;
134 local_irq_restore_hw(flags);
135
136 return retval;
137}
138
139static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
140{
141 int mask, retval;
142 volatile unsigned int *a = (volatile unsigned int *)addr;
143 unsigned long flags;
144
145 a += nr >> 5;
146 mask = 1 << (nr & 0x1f);
147 local_irq_save_hw(flags);
148 retval = (mask & *a) != 0;
149 *a &= ~mask;
150 local_irq_restore_hw(flags);
151
152 return retval;
153}
154
155static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
156{
157 int mask, retval;
158 volatile unsigned int *a = (volatile unsigned int *)addr;
159 unsigned long flags;
160
161 a += nr >> 5;
162 mask = 1 << (nr & 0x1f);
163 local_irq_save_hw(flags);
164 retval = (mask & *a) != 0;
165 *a ^= mask;
166 local_irq_restore_hw(flags);
167 return retval;
168}
169
170#endif /* CONFIG_SMP */
171
172/* 78/*
173 * clear_bit() doesn't provide any barrier for the compiler. 79 * clear_bit() doesn't provide any barrier for the compiler.
174 */ 80 */
175#define smp_mb__before_clear_bit() barrier() 81#define smp_mb__before_clear_bit() barrier()
176#define smp_mb__after_clear_bit() barrier() 82#define smp_mb__after_clear_bit() barrier()
177 83
178static inline void __set_bit(int nr, volatile unsigned long *addr) 84#include <asm-generic/bitops/non-atomic.h>
179{
180 int *a = (int *)addr;
181 int mask;
182
183 a += nr >> 5;
184 mask = 1 << (nr & 0x1f);
185 *a |= mask;
186}
187
188static inline void __clear_bit(int nr, volatile unsigned long *addr)
189{
190 int *a = (int *)addr;
191 int mask;
192
193 a += nr >> 5;
194 mask = 1 << (nr & 0x1f);
195 *a &= ~mask;
196}
197
198static inline void __change_bit(int nr, volatile unsigned long *addr)
199{
200 int mask;
201 unsigned long *ADDR = (unsigned long *)addr;
202
203 ADDR += nr >> 5;
204 mask = 1 << (nr & 31);
205 *ADDR ^= mask;
206}
207
208static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
209{
210 int mask, retval;
211 volatile unsigned int *a = (volatile unsigned int *)addr;
212
213 a += nr >> 5;
214 mask = 1 << (nr & 0x1f);
215 retval = (mask & *a) != 0;
216 *a |= mask;
217 return retval;
218}
219
220static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
221{
222 int mask, retval;
223 volatile unsigned int *a = (volatile unsigned int *)addr;
224
225 a += nr >> 5;
226 mask = 1 << (nr & 0x1f);
227 retval = (mask & *a) != 0;
228 *a &= ~mask;
229 return retval;
230}
231
232static inline int __test_and_change_bit(int nr,
233 volatile unsigned long *addr)
234{
235 int mask, retval;
236 volatile unsigned int *a = (volatile unsigned int *)addr;
237
238 a += nr >> 5;
239 mask = 1 << (nr & 0x1f);
240 retval = (mask & *a) != 0;
241 *a ^= mask;
242 return retval;
243}
244
245static inline int __test_bit(int nr, const void *addr)
246{
247 int *a = (int *)addr;
248 int mask;
249
250 a += nr >> 5;
251 mask = 1 << (nr & 0x1f);
252 return ((mask & *a) != 0);
253}
254
255#ifndef CONFIG_SMP
256/*
257 * This routine doesn't need irq save and restore ops in UP
258 * context.
259 */
260static inline int test_bit(int nr, const void *addr)
261{
262 return __test_bit(nr, addr);
263}
264#endif
265 85
266#include <asm-generic/bitops/find.h> 86#include <asm-generic/bitops/find.h>
267#include <asm-generic/bitops/hweight.h> 87#include <asm-generic/bitops/hweight.h>
@@ -272,10 +92,10 @@ static inline int test_bit(int nr, const void *addr)
272 92
273#include <asm-generic/bitops/minix.h> 93#include <asm-generic/bitops/minix.h>
274 94
275#endif /* __KERNEL__ */
276
277#include <asm-generic/bitops/fls.h> 95#include <asm-generic/bitops/fls.h>
278#include <asm-generic/bitops/__fls.h> 96#include <asm-generic/bitops/__fls.h>
279#include <asm-generic/bitops/fls64.h> 97#include <asm-generic/bitops/fls64.h>
280 98
99#endif /* CONFIG_SMP */
100
281#endif /* _BLACKFIN_BITOPS_H */ 101#endif /* _BLACKFIN_BITOPS_H */
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
index 5d399256bf06..5cc641c50834 100644
--- a/arch/blackfin/include/asm/mutex.h
+++ b/arch/blackfin/include/asm/mutex.h
@@ -10,7 +10,7 @@
10#define _ASM_MUTEX_H 10#define _ASM_MUTEX_H
11 11
12#ifndef CONFIG_SMP 12#ifndef CONFIG_SMP
13#include <asm-generic/mutex-dec.h> 13#include <asm-generic/mutex.h>
14#else 14#else
15 15
16static inline void 16static inline void
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 0249ac319476..d6ff4b59fcb1 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -1,6 +1,10 @@
1#ifndef __BFIN_SPINLOCK_H 1#ifndef __BFIN_SPINLOCK_H
2#define __BFIN_SPINLOCK_H 2#define __BFIN_SPINLOCK_H
3 3
4#ifndef CONFIG_SMP
5# include <asm-generic/spinlock.h>
6#else
7
4#include <asm/atomic.h> 8#include <asm/atomic.h>
5 9
6asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); 10asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
@@ -86,4 +90,6 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
86#define _raw_read_relax(lock) cpu_relax() 90#define _raw_read_relax(lock) cpu_relax()
87#define _raw_write_relax(lock) cpu_relax() 91#define _raw_write_relax(lock) cpu_relax()
88 92
93#endif
94
89#endif /* !__BFIN_SPINLOCK_H */ 95#endif /* !__BFIN_SPINLOCK_H */
diff --git a/arch/blackfin/include/asm/swab.h b/arch/blackfin/include/asm/swab.h
index 6403ad2932eb..d442113de515 100644
--- a/arch/blackfin/include/asm/swab.h
+++ b/arch/blackfin/include/asm/swab.h
@@ -2,11 +2,7 @@
2#define _BLACKFIN_SWAB_H 2#define _BLACKFIN_SWAB_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <asm-generic/swab.h>
6
7#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
8# define __SWAB_64_THRU_32__
9#endif
10 6
11#ifdef __GNUC__ 7#ifdef __GNUC__
12 8
diff --git a/arch/blackfin/include/asm/unaligned.h b/arch/blackfin/include/asm/unaligned.h
index fd8a1d634945..6cecbbb2111f 100644
--- a/arch/blackfin/include/asm/unaligned.h
+++ b/arch/blackfin/include/asm/unaligned.h
@@ -1,11 +1 @@
1#ifndef _ASM_BLACKFIN_UNALIGNED_H #include <asm-generic/unaligned.h>
2#define _ASM_BLACKFIN_UNALIGNED_H
3
4#include <linux/unaligned/le_struct.h>
5#include <linux/unaligned/be_byteshift.h>
6#include <linux/unaligned/generic.h>
7
8#define get_unaligned __get_unaligned_le
9#define put_unaligned __put_unaligned_le
10
11#endif /* _ASM_BLACKFIN_UNALIGNED_H */