aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/include/asm/bitops.h')
-rw-r--r--arch/blackfin/include/asm/bitops.h204
1 files changed, 133 insertions, 71 deletions
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index b39a175c79c..21b036eadab 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -7,7 +7,6 @@
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */ 9#include <asm/byteorder.h> /* swab32 */
10#include <asm/system.h> /* save_flags */
11 10
12#ifdef __KERNEL__ 11#ifdef __KERNEL__
13 12
@@ -20,80 +19,107 @@
20#include <asm-generic/bitops/sched.h> 19#include <asm-generic/bitops/sched.h>
21#include <asm-generic/bitops/ffz.h> 20#include <asm-generic/bitops/ffz.h>
22 21
23static __inline__ void set_bit(int nr, volatile unsigned long *addr) 22#ifdef CONFIG_SMP
23
24#include <linux/linkage.h>
25
26asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
27
28asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
29
30asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
31
32asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
33
34asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
35
36asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
37
38asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
39
40static inline void set_bit(int nr, volatile unsigned long *addr)
24{ 41{
25 int *a = (int *)addr; 42 volatile unsigned long *a = addr + (nr >> 5);
26 int mask; 43 __raw_bit_set_asm(a, nr & 0x1f);
27 unsigned long flags; 44}
28 45
29 a += nr >> 5; 46static inline void clear_bit(int nr, volatile unsigned long *addr)
30 mask = 1 << (nr & 0x1f); 47{
31 local_irq_save(flags); 48 volatile unsigned long *a = addr + (nr >> 5);
32 *a |= mask; 49 __raw_bit_clear_asm(a, nr & 0x1f);
33 local_irq_restore(flags);
34} 50}
35 51
36static __inline__ void __set_bit(int nr, volatile unsigned long *addr) 52static inline void change_bit(int nr, volatile unsigned long *addr)
37{ 53{
38 int *a = (int *)addr; 54 volatile unsigned long *a = addr + (nr >> 5);
39 int mask; 55 __raw_bit_toggle_asm(a, nr & 0x1f);
56}
40 57
41 a += nr >> 5; 58static inline int test_bit(int nr, const volatile unsigned long *addr)
42 mask = 1 << (nr & 0x1f); 59{
43 *a |= mask; 60 volatile const unsigned long *a = addr + (nr >> 5);
61 return __raw_bit_test_asm(a, nr & 0x1f) != 0;
44} 62}
45 63
46/* 64static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
47 * clear_bit() doesn't provide any barrier for the compiler. 65{
48 */ 66 volatile unsigned long *a = addr + (nr >> 5);
49#define smp_mb__before_clear_bit() barrier() 67 return __raw_bit_test_set_asm(a, nr & 0x1f);
50#define smp_mb__after_clear_bit() barrier() 68}
69
70static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
71{
72 volatile unsigned long *a = addr + (nr >> 5);
73 return __raw_bit_test_clear_asm(a, nr & 0x1f);
74}
75
76static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
77{
78 volatile unsigned long *a = addr + (nr >> 5);
79 return __raw_bit_test_toggle_asm(a, nr & 0x1f);
80}
81
82#else /* !CONFIG_SMP */
83
84#include <asm/system.h> /* save_flags */
51 85
52static __inline__ void clear_bit(int nr, volatile unsigned long *addr) 86static inline void set_bit(int nr, volatile unsigned long *addr)
53{ 87{
54 int *a = (int *)addr; 88 int *a = (int *)addr;
55 int mask; 89 int mask;
56 unsigned long flags; 90 unsigned long flags;
57 a += nr >> 5; 91 a += nr >> 5;
58 mask = 1 << (nr & 0x1f); 92 mask = 1 << (nr & 0x1f);
59 local_irq_save(flags); 93 local_irq_save_hw(flags);
60 *a &= ~mask; 94 *a |= mask;
61 local_irq_restore(flags); 95 local_irq_restore_hw(flags);
62} 96}
63 97
64static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) 98static inline void clear_bit(int nr, volatile unsigned long *addr)
65{ 99{
66 int *a = (int *)addr; 100 int *a = (int *)addr;
67 int mask; 101 int mask;
68 102 unsigned long flags;
69 a += nr >> 5; 103 a += nr >> 5;
70 mask = 1 << (nr & 0x1f); 104 mask = 1 << (nr & 0x1f);
105 local_irq_save_hw(flags);
71 *a &= ~mask; 106 *a &= ~mask;
107 local_irq_restore_hw(flags);
72} 108}
73 109
74static __inline__ void change_bit(int nr, volatile unsigned long *addr) 110static inline void change_bit(int nr, volatile unsigned long *addr)
75{ 111{
76 int mask, flags; 112 int mask, flags;
77 unsigned long *ADDR = (unsigned long *)addr; 113 unsigned long *ADDR = (unsigned long *)addr;
78 114
79 ADDR += nr >> 5; 115 ADDR += nr >> 5;
80 mask = 1 << (nr & 31); 116 mask = 1 << (nr & 31);
81 local_irq_save(flags); 117 local_irq_save_hw(flags);
82 *ADDR ^= mask;
83 local_irq_restore(flags);
84}
85
86static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
87{
88 int mask;
89 unsigned long *ADDR = (unsigned long *)addr;
90
91 ADDR += nr >> 5;
92 mask = 1 << (nr & 31);
93 *ADDR ^= mask; 118 *ADDR ^= mask;
119 local_irq_restore_hw(flags);
94} 120}
95 121
96static __inline__ int test_and_set_bit(int nr, void *addr) 122static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
97{ 123{
98 int mask, retval; 124 int mask, retval;
99 volatile unsigned int *a = (volatile unsigned int *)addr; 125 volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -101,27 +127,31 @@ static __inline__ int test_and_set_bit(int nr, void *addr)
101 127
102 a += nr >> 5; 128 a += nr >> 5;
103 mask = 1 << (nr & 0x1f); 129 mask = 1 << (nr & 0x1f);
104 local_irq_save(flags); 130 local_irq_save_hw(flags);
105 retval = (mask & *a) != 0; 131 retval = (mask & *a) != 0;
106 *a |= mask; 132 *a |= mask;
107 local_irq_restore(flags); 133 local_irq_restore_hw(flags);
108 134
109 return retval; 135 return retval;
110} 136}
111 137
112static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) 138static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
113{ 139{
114 int mask, retval; 140 int mask, retval;
115 volatile unsigned int *a = (volatile unsigned int *)addr; 141 volatile unsigned int *a = (volatile unsigned int *)addr;
142 unsigned long flags;
116 143
117 a += nr >> 5; 144 a += nr >> 5;
118 mask = 1 << (nr & 0x1f); 145 mask = 1 << (nr & 0x1f);
146 local_irq_save_hw(flags);
119 retval = (mask & *a) != 0; 147 retval = (mask & *a) != 0;
120 *a |= mask; 148 *a &= ~mask;
149 local_irq_restore_hw(flags);
150
121 return retval; 151 return retval;
122} 152}
123 153
124static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) 154static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
125{ 155{
126 int mask, retval; 156 int mask, retval;
127 volatile unsigned int *a = (volatile unsigned int *)addr; 157 volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -129,15 +159,52 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
129 159
130 a += nr >> 5; 160 a += nr >> 5;
131 mask = 1 << (nr & 0x1f); 161 mask = 1 << (nr & 0x1f);
132 local_irq_save(flags); 162 local_irq_save_hw(flags);
133 retval = (mask & *a) != 0; 163 retval = (mask & *a) != 0;
164 *a ^= mask;
165 local_irq_restore_hw(flags);
166 return retval;
167}
168
169#endif /* CONFIG_SMP */
170
171/*
172 * clear_bit() doesn't provide any barrier for the compiler.
173 */
174#define smp_mb__before_clear_bit() barrier()
175#define smp_mb__after_clear_bit() barrier()
176
177static inline void __set_bit(int nr, volatile unsigned long *addr)
178{
179 int *a = (int *)addr;
180 int mask;
181
182 a += nr >> 5;
183 mask = 1 << (nr & 0x1f);
184 *a |= mask;
185}
186
187static inline void __clear_bit(int nr, volatile unsigned long *addr)
188{
189 int *a = (int *)addr;
190 int mask;
191
192 a += nr >> 5;
193 mask = 1 << (nr & 0x1f);
134 *a &= ~mask; 194 *a &= ~mask;
135 local_irq_restore(flags); 195}
136 196
137 return retval; 197static inline void __change_bit(int nr, volatile unsigned long *addr)
198{
199 int mask;
200 unsigned long *ADDR = (unsigned long *)addr;
201
202 ADDR += nr >> 5;
203 mask = 1 << (nr & 31);
204 *ADDR ^= mask;
138} 205}
139 206
140static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) 207static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
141{ 208{
142 int mask, retval; 209 int mask, retval;
143 volatile unsigned int *a = (volatile unsigned int *)addr; 210 volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -145,26 +212,23 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
145 a += nr >> 5; 212 a += nr >> 5;
146 mask = 1 << (nr & 0x1f); 213 mask = 1 << (nr & 0x1f);
147 retval = (mask & *a) != 0; 214 retval = (mask & *a) != 0;
148 *a &= ~mask; 215 *a |= mask;
149 return retval; 216 return retval;
150} 217}
151 218
152static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) 219static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
153{ 220{
154 int mask, retval; 221 int mask, retval;
155 volatile unsigned int *a = (volatile unsigned int *)addr; 222 volatile unsigned int *a = (volatile unsigned int *)addr;
156 unsigned long flags;
157 223
158 a += nr >> 5; 224 a += nr >> 5;
159 mask = 1 << (nr & 0x1f); 225 mask = 1 << (nr & 0x1f);
160 local_irq_save(flags);
161 retval = (mask & *a) != 0; 226 retval = (mask & *a) != 0;
162 *a ^= mask; 227 *a &= ~mask;
163 local_irq_restore(flags);
164 return retval; 228 return retval;
165} 229}
166 230
167static __inline__ int __test_and_change_bit(int nr, 231static inline int __test_and_change_bit(int nr,
168 volatile unsigned long *addr) 232 volatile unsigned long *addr)
169{ 233{
170 int mask, retval; 234 int mask, retval;
@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr,
177 return retval; 241 return retval;
178} 242}
179 243
180/* 244static inline int __test_bit(int nr, const void *addr)
181 * This routine doesn't need to be atomic.
182 */
183static __inline__ int __constant_test_bit(int nr, const void *addr)
184{
185 return ((1UL << (nr & 31)) &
186 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
187}
188
189static __inline__ int __test_bit(int nr, const void *addr)
190{ 245{
191 int *a = (int *)addr; 246 int *a = (int *)addr;
192 int mask; 247 int mask;
@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr)
196 return ((mask & *a) != 0); 251 return ((mask & *a) != 0);
197} 252}
198 253
199#define test_bit(nr,addr) \ 254#ifndef CONFIG_SMP
200(__builtin_constant_p(nr) ? \ 255/*
201 __constant_test_bit((nr),(addr)) : \ 256 * This routine doesn't need irq save and restore ops in UP
202 __test_bit((nr),(addr))) 257 * context.
258 */
259static inline int test_bit(int nr, const void *addr)
260{
261 return __test_bit(nr, addr);
262}
263#endif
203 264
204#include <asm-generic/bitops/find.h> 265#include <asm-generic/bitops/find.h>
205#include <asm-generic/bitops/hweight.h> 266#include <asm-generic/bitops/hweight.h>
@@ -213,6 +274,7 @@ static __inline__ int __test_bit(int nr, const void *addr)
213#endif /* __KERNEL__ */ 274#endif /* __KERNEL__ */
214 275
215#include <asm-generic/bitops/fls.h> 276#include <asm-generic/bitops/fls.h>
277#include <asm-generic/bitops/__fls.h>
216#include <asm-generic/bitops/fls64.h> 278#include <asm-generic/bitops/fls64.h>
217 279
218#endif /* _BLACKFIN_BITOPS_H */ 280#endif /* _BLACKFIN_BITOPS_H */