aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/include/asm/bitops.h')
-rw-r--r--arch/blackfin/include/asm/bitops.h218
1 files changed, 218 insertions, 0 deletions
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
new file mode 100644
index 000000000000..b39a175c79c1
--- /dev/null
+++ b/arch/blackfin/include/asm/bitops.h
@@ -0,0 +1,218 @@
1#ifndef _BLACKFIN_BITOPS_H
2#define _BLACKFIN_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
10#include <asm/system.h> /* save_flags */
11
12#ifdef __KERNEL__
13
14#ifndef _LINUX_BITOPS_H
15#error only <linux/bitops.h> can be included directly
16#endif
17
18#include <asm-generic/bitops/ffs.h>
19#include <asm-generic/bitops/__ffs.h>
20#include <asm-generic/bitops/sched.h>
21#include <asm-generic/bitops/ffz.h>
22
23static __inline__ void set_bit(int nr, volatile unsigned long *addr)
24{
25 int *a = (int *)addr;
26 int mask;
27 unsigned long flags;
28
29 a += nr >> 5;
30 mask = 1 << (nr & 0x1f);
31 local_irq_save(flags);
32 *a |= mask;
33 local_irq_restore(flags);
34}
35
36static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
37{
38 int *a = (int *)addr;
39 int mask;
40
41 a += nr >> 5;
42 mask = 1 << (nr & 0x1f);
43 *a |= mask;
44}
45
46/*
47 * clear_bit() doesn't provide any barrier for the compiler.
48 */
49#define smp_mb__before_clear_bit() barrier()
50#define smp_mb__after_clear_bit() barrier()
51
52static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
53{
54 int *a = (int *)addr;
55 int mask;
56 unsigned long flags;
57 a += nr >> 5;
58 mask = 1 << (nr & 0x1f);
59 local_irq_save(flags);
60 *a &= ~mask;
61 local_irq_restore(flags);
62}
63
64static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
65{
66 int *a = (int *)addr;
67 int mask;
68
69 a += nr >> 5;
70 mask = 1 << (nr & 0x1f);
71 *a &= ~mask;
72}
73
74static __inline__ void change_bit(int nr, volatile unsigned long *addr)
75{
76 int mask, flags;
77 unsigned long *ADDR = (unsigned long *)addr;
78
79 ADDR += nr >> 5;
80 mask = 1 << (nr & 31);
81 local_irq_save(flags);
82 *ADDR ^= mask;
83 local_irq_restore(flags);
84}
85
86static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
87{
88 int mask;
89 unsigned long *ADDR = (unsigned long *)addr;
90
91 ADDR += nr >> 5;
92 mask = 1 << (nr & 31);
93 *ADDR ^= mask;
94}
95
96static __inline__ int test_and_set_bit(int nr, void *addr)
97{
98 int mask, retval;
99 volatile unsigned int *a = (volatile unsigned int *)addr;
100 unsigned long flags;
101
102 a += nr >> 5;
103 mask = 1 << (nr & 0x1f);
104 local_irq_save(flags);
105 retval = (mask & *a) != 0;
106 *a |= mask;
107 local_irq_restore(flags);
108
109 return retval;
110}
111
112static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
113{
114 int mask, retval;
115 volatile unsigned int *a = (volatile unsigned int *)addr;
116
117 a += nr >> 5;
118 mask = 1 << (nr & 0x1f);
119 retval = (mask & *a) != 0;
120 *a |= mask;
121 return retval;
122}
123
124static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
125{
126 int mask, retval;
127 volatile unsigned int *a = (volatile unsigned int *)addr;
128 unsigned long flags;
129
130 a += nr >> 5;
131 mask = 1 << (nr & 0x1f);
132 local_irq_save(flags);
133 retval = (mask & *a) != 0;
134 *a &= ~mask;
135 local_irq_restore(flags);
136
137 return retval;
138}
139
140static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
141{
142 int mask, retval;
143 volatile unsigned int *a = (volatile unsigned int *)addr;
144
145 a += nr >> 5;
146 mask = 1 << (nr & 0x1f);
147 retval = (mask & *a) != 0;
148 *a &= ~mask;
149 return retval;
150}
151
152static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
153{
154 int mask, retval;
155 volatile unsigned int *a = (volatile unsigned int *)addr;
156 unsigned long flags;
157
158 a += nr >> 5;
159 mask = 1 << (nr & 0x1f);
160 local_irq_save(flags);
161 retval = (mask & *a) != 0;
162 *a ^= mask;
163 local_irq_restore(flags);
164 return retval;
165}
166
167static __inline__ int __test_and_change_bit(int nr,
168 volatile unsigned long *addr)
169{
170 int mask, retval;
171 volatile unsigned int *a = (volatile unsigned int *)addr;
172
173 a += nr >> 5;
174 mask = 1 << (nr & 0x1f);
175 retval = (mask & *a) != 0;
176 *a ^= mask;
177 return retval;
178}
179
180/*
181 * This routine doesn't need to be atomic.
182 */
183static __inline__ int __constant_test_bit(int nr, const void *addr)
184{
185 return ((1UL << (nr & 31)) &
186 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
187}
188
189static __inline__ int __test_bit(int nr, const void *addr)
190{
191 int *a = (int *)addr;
192 int mask;
193
194 a += nr >> 5;
195 mask = 1 << (nr & 0x1f);
196 return ((mask & *a) != 0);
197}
198
199#define test_bit(nr,addr) \
200(__builtin_constant_p(nr) ? \
201 __constant_test_bit((nr),(addr)) : \
202 __test_bit((nr),(addr)))
203
204#include <asm-generic/bitops/find.h>
205#include <asm-generic/bitops/hweight.h>
206#include <asm-generic/bitops/lock.h>
207
208#include <asm-generic/bitops/ext2-atomic.h>
209#include <asm-generic/bitops/ext2-non-atomic.h>
210
211#include <asm-generic/bitops/minix.h>
212
213#endif /* __KERNEL__ */
214
215#include <asm-generic/bitops/fls.h>
216#include <asm-generic/bitops/fls64.h>
217
218#endif /* _BLACKFIN_BITOPS_H */