aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/bitops
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/bitops')
-rw-r--r--include/asm-generic/bitops/__ffs.h43
-rw-r--r--include/asm-generic/bitops/atomic.h191
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h22
-rw-r--r--include/asm-generic/bitops/ext2-non-atomic.h18
-rw-r--r--include/asm-generic/bitops/ffs.h41
-rw-r--r--include/asm-generic/bitops/ffz.h12
-rw-r--r--include/asm-generic/bitops/find.h13
-rw-r--r--include/asm-generic/bitops/fls.h41
-rw-r--r--include/asm-generic/bitops/fls64.h14
-rw-r--r--include/asm-generic/bitops/hweight.h11
-rw-r--r--include/asm-generic/bitops/le.h53
-rw-r--r--include/asm-generic/bitops/minix-le.h17
-rw-r--r--include/asm-generic/bitops/minix.h15
-rw-r--r--include/asm-generic/bitops/non-atomic.h111
-rw-r--r--include/asm-generic/bitops/sched.h36
15 files changed, 638 insertions, 0 deletions
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 000000000000..9a3274aecf83
--- /dev/null
+++ b/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,43 @@
1#ifndef _ASM_GENERIC_BITOPS___FFS_H_
2#define _ASM_GENERIC_BITOPS___FFS_H_
3
4#include <asm/types.h>
5
6/**
7 * __ffs - find first bit in word.
8 * @word: The word to search
9 *
10 * Undefined if no bit exists, so code should check against 0 first.
11 */
12static inline unsigned long __ffs(unsigned long word)
13{
14 int num = 0;
15
16#if BITS_PER_LONG == 64
17 if ((word & 0xffffffff) == 0) {
18 num += 32;
19 word >>= 32;
20 }
21#endif
22 if ((word & 0xffff) == 0) {
23 num += 16;
24 word >>= 16;
25 }
26 if ((word & 0xff) == 0) {
27 num += 8;
28 word >>= 8;
29 }
30 if ((word & 0xf) == 0) {
31 num += 4;
32 word >>= 4;
33 }
34 if ((word & 0x3) == 0) {
35 num += 2;
36 word >>= 2;
37 }
38 if ((word & 0x1) == 0)
39 num += 1;
40 return num;
41}
42
43#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
new file mode 100644
index 000000000000..78339319ba02
--- /dev/null
+++ b/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,191 @@
1#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_ATOMIC_H_
3
4#include <asm/types.h>
5
6#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8
9#ifdef CONFIG_SMP
10#include <asm/spinlock.h>
11#include <asm/cache.h> /* we use L1_CACHE_BYTES */
12
13/* Use an array of spinlocks for our atomic_ts.
14 * Hash function to index into a different SPINLOCK.
15 * Since "a" is usually an address, use one spinlock per cacheline.
16 */
17# define ATOMIC_HASH_SIZE 4
18# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
19
20extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
21
22/* Can't use raw_spin_lock_irq because of #include problems, so
23 * this is the substitute */
24#define _atomic_spin_lock_irqsave(l,f) do { \
25 raw_spinlock_t *s = ATOMIC_HASH(l); \
26 local_irq_save(f); \
27 __raw_spin_lock(s); \
28} while(0)
29
30#define _atomic_spin_unlock_irqrestore(l,f) do { \
31 raw_spinlock_t *s = ATOMIC_HASH(l); \
32 __raw_spin_unlock(s); \
33 local_irq_restore(f); \
34} while(0)
35
36
37#else
38# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
39# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
40#endif
41
42/*
43 * NMI events can occur at any time, including when interrupts have been
44 * disabled by *_irqsave(). So you can get NMI events occurring while a
45 * *_bit function is holding a spin lock. If the NMI handler also wants
46 * to do bit manipulation (and they do) then you can get a deadlock
47 * between the original caller of *_bit() and the NMI handler.
48 *
49 * by Keith Owens
50 */
51
52/**
53 * set_bit - Atomically set a bit in memory
54 * @nr: the bit to set
55 * @addr: the address to start counting from
56 *
57 * This function is atomic and may not be reordered. See __set_bit()
58 * if you do not require the atomic guarantees.
59 *
60 * Note: there are no guarantees that this function will not be reordered
61 * on non x86 architectures, so if you are writting portable code,
62 * make sure not to rely on its reordering guarantees.
63 *
64 * Note that @nr may be almost arbitrarily large; this function is not
65 * restricted to acting on a single-word quantity.
66 */
67static inline void set_bit(int nr, volatile unsigned long *addr)
68{
69 unsigned long mask = BITOP_MASK(nr);
70 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
71 unsigned long flags;
72
73 _atomic_spin_lock_irqsave(p, flags);
74 *p |= mask;
75 _atomic_spin_unlock_irqrestore(p, flags);
76}
77
78/**
79 * clear_bit - Clears a bit in memory
80 * @nr: Bit to clear
81 * @addr: Address to start counting from
82 *
83 * clear_bit() is atomic and may not be reordered. However, it does
84 * not contain a memory barrier, so if it is used for locking purposes,
85 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
86 * in order to ensure changes are visible on other processors.
87 */
88static inline void clear_bit(int nr, volatile unsigned long *addr)
89{
90 unsigned long mask = BITOP_MASK(nr);
91 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
92 unsigned long flags;
93
94 _atomic_spin_lock_irqsave(p, flags);
95 *p &= ~mask;
96 _atomic_spin_unlock_irqrestore(p, flags);
97}
98
99/**
100 * change_bit - Toggle a bit in memory
101 * @nr: Bit to change
102 * @addr: Address to start counting from
103 *
104 * change_bit() is atomic and may not be reordered. It may be
105 * reordered on other architectures than x86.
106 * Note that @nr may be almost arbitrarily large; this function is not
107 * restricted to acting on a single-word quantity.
108 */
109static inline void change_bit(int nr, volatile unsigned long *addr)
110{
111 unsigned long mask = BITOP_MASK(nr);
112 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
113 unsigned long flags;
114
115 _atomic_spin_lock_irqsave(p, flags);
116 *p ^= mask;
117 _atomic_spin_unlock_irqrestore(p, flags);
118}
119
120/**
121 * test_and_set_bit - Set a bit and return its old value
122 * @nr: Bit to set
123 * @addr: Address to count from
124 *
125 * This operation is atomic and cannot be reordered.
126 * It may be reordered on other architectures than x86.
127 * It also implies a memory barrier.
128 */
129static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
130{
131 unsigned long mask = BITOP_MASK(nr);
132 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
133 unsigned long old;
134 unsigned long flags;
135
136 _atomic_spin_lock_irqsave(p, flags);
137 old = *p;
138 *p = old | mask;
139 _atomic_spin_unlock_irqrestore(p, flags);
140
141 return (old & mask) != 0;
142}
143
144/**
145 * test_and_clear_bit - Clear a bit and return its old value
146 * @nr: Bit to clear
147 * @addr: Address to count from
148 *
149 * This operation is atomic and cannot be reordered.
150 * It can be reorderdered on other architectures other than x86.
151 * It also implies a memory barrier.
152 */
153static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
154{
155 unsigned long mask = BITOP_MASK(nr);
156 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
157 unsigned long old;
158 unsigned long flags;
159
160 _atomic_spin_lock_irqsave(p, flags);
161 old = *p;
162 *p = old & ~mask;
163 _atomic_spin_unlock_irqrestore(p, flags);
164
165 return (old & mask) != 0;
166}
167
168/**
169 * test_and_change_bit - Change a bit and return its old value
170 * @nr: Bit to change
171 * @addr: Address to count from
172 *
173 * This operation is atomic and cannot be reordered.
174 * It also implies a memory barrier.
175 */
176static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
177{
178 unsigned long mask = BITOP_MASK(nr);
179 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
180 unsigned long old;
181 unsigned long flags;
182
183 _atomic_spin_lock_irqsave(p, flags);
184 old = *p;
185 *p = old ^ mask;
186 _atomic_spin_unlock_irqrestore(p, flags);
187
188 return (old & mask) != 0;
189}
190
191#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
new file mode 100644
index 000000000000..ab1c875efb74
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
3
4#define ext2_set_bit_atomic(lock, nr, addr) \
5 ({ \
6 int ret; \
7 spin_lock(lock); \
8 ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
9 spin_unlock(lock); \
10 ret; \
11 })
12
13#define ext2_clear_bit_atomic(lock, nr, addr) \
14 ({ \
15 int ret; \
16 spin_lock(lock); \
17 ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
18 spin_unlock(lock); \
19 ret; \
20 })
21
22#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h
new file mode 100644
index 000000000000..1697404afa05
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-non-atomic.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
3
4#include <asm-generic/bitops/le.h>
5
6#define ext2_set_bit(nr,addr) \
7 generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
8#define ext2_clear_bit(nr,addr) \
9 generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
10
11#define ext2_test_bit(nr,addr) \
12 generic_test_le_bit((nr),(unsigned long *)(addr))
13#define ext2_find_first_zero_bit(addr, size) \
14 generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
15#define ext2_find_next_zero_bit(addr, size, off) \
16 generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
17
18#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
new file mode 100644
index 000000000000..fbbb43af7dc0
--- /dev/null
+++ b/include/asm-generic/bitops/ffs.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_GENERIC_BITOPS_FFS_H_
2#define _ASM_GENERIC_BITOPS_FFS_H_
3
4/**
5 * ffs - find first bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as
9 * the libc and compiler builtin ffs routines, therefore
10 * differs in spirit from the above ffz (man ffs).
11 */
12static inline int ffs(int x)
13{
14 int r = 1;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff)) {
19 x >>= 16;
20 r += 16;
21 }
22 if (!(x & 0xff)) {
23 x >>= 8;
24 r += 8;
25 }
26 if (!(x & 0xf)) {
27 x >>= 4;
28 r += 4;
29 }
30 if (!(x & 3)) {
31 x >>= 2;
32 r += 2;
33 }
34 if (!(x & 1)) {
35 x >>= 1;
36 r += 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h
new file mode 100644
index 000000000000..6744bd4cdf46
--- /dev/null
+++ b/include/asm-generic/bitops/ffz.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
2#define _ASM_GENERIC_BITOPS_FFZ_H_
3
4/*
5 * ffz - find first zero in word.
6 * @word: The word to search
7 *
8 * Undefined if no zero exists, so code should check against ~0UL first.
9 */
10#define ffz(x) __ffs(~(x))
11
12#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
new file mode 100644
index 000000000000..72a51e5a12ef
--- /dev/null
+++ b/include/asm-generic/bitops/find.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_
3
4extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
5 size, unsigned long offset);
6
7extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
8 long size, unsigned long offset);
9
10#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
11#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
12
13#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
new file mode 100644
index 000000000000..850859bc5069
--- /dev/null
+++ b/include/asm-generic/bitops/fls.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS_H_
2#define _ASM_GENERIC_BITOPS_FLS_H_
3
4/**
5 * fls - find last (most-significant) bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as ffs.
9 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
10 */
11
12static inline int fls(int x)
13{
14 int r = 32;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff0000u)) {
19 x <<= 16;
20 r -= 16;
21 }
22 if (!(x & 0xff000000u)) {
23 x <<= 8;
24 r -= 8;
25 }
26 if (!(x & 0xf0000000u)) {
27 x <<= 4;
28 r -= 4;
29 }
30 if (!(x & 0xc0000000u)) {
31 x <<= 2;
32 r -= 2;
33 }
34 if (!(x & 0x80000000u)) {
35 x <<= 1;
36 r -= 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
new file mode 100644
index 000000000000..1b6b17ce2428
--- /dev/null
+++ b/include/asm-generic/bitops/fls64.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
2#define _ASM_GENERIC_BITOPS_FLS64_H_
3
4#include <asm/types.h>
5
6static inline int fls64(__u64 x)
7{
8 __u32 h = x >> 32;
9 if (h)
10 return fls(h) + 32;
11 return fls(x);
12}
13
14#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000000..fbbc383771da
--- /dev/null
+++ b/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
3
4#include <asm/types.h>
5
6extern unsigned int hweight32(unsigned int w);
7extern unsigned int hweight16(unsigned int w);
8extern unsigned int hweight8(unsigned int w);
9extern unsigned long hweight64(__u64 w);
10
11#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
new file mode 100644
index 000000000000..b9c7e5d2d2ad
--- /dev/null
+++ b/include/asm-generic/bitops/le.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_GENERIC_BITOPS_LE_H_
2#define _ASM_GENERIC_BITOPS_LE_H_
3
4#include <asm/types.h>
5#include <asm/byteorder.h>
6
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
9
10#if defined(__LITTLE_ENDIAN)
11
12#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
13#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
14#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
15
16#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
17#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
18
19#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
20#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
21
22#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
23
24#elif defined(__BIG_ENDIAN)
25
26#define generic_test_le_bit(nr, addr) \
27 test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
28#define generic___set_le_bit(nr, addr) \
29 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
30#define generic___clear_le_bit(nr, addr) \
31 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
32
33#define generic_test_and_set_le_bit(nr, addr) \
34 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
35#define generic_test_and_clear_le_bit(nr, addr) \
36 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
37
38#define generic___test_and_set_le_bit(nr, addr) \
39 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
40#define generic___test_and_clear_le_bit(nr, addr) \
41 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
42
43extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
44 unsigned long size, unsigned long offset);
45
46#else
47#error "Please fix <asm/byteorder.h>"
48#endif
49
50#define generic_find_first_zero_le_bit(addr, size) \
51 generic_find_next_zero_le_bit((addr), (size), 0)
52
53#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h
new file mode 100644
index 000000000000..4a981c1bb1ae
--- /dev/null
+++ b/include/asm-generic/bitops/minix-le.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
2#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
3
4#include <asm-generic/bitops/le.h>
5
6#define minix_test_and_set_bit(nr,addr) \
7 generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
8#define minix_set_bit(nr,addr) \
9 generic___set_le_bit((nr),(unsigned long *)(addr))
10#define minix_test_and_clear_bit(nr,addr) \
11 generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
12#define minix_test_bit(nr,addr) \
13 generic_test_le_bit((nr),(unsigned long *)(addr))
14#define minix_find_first_zero_bit(addr,size) \
15 generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
16
17#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h
new file mode 100644
index 000000000000..91f42e87aa51
--- /dev/null
+++ b/include/asm-generic/bitops/minix.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
2#define _ASM_GENERIC_BITOPS_MINIX_H_
3
4#define minix_test_and_set_bit(nr,addr) \
5 __test_and_set_bit((nr),(unsigned long *)(addr))
6#define minix_set_bit(nr,addr) \
7 __set_bit((nr),(unsigned long *)(addr))
8#define minix_test_and_clear_bit(nr,addr) \
9 __test_and_clear_bit((nr),(unsigned long *)(addr))
10#define minix_test_bit(nr,addr) \
11 test_bit((nr),(unsigned long *)(addr))
12#define minix_find_first_zero_bit(addr,size) \
13 find_first_zero_bit((unsigned long *)(addr),(size))
14
15#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 000000000000..46a825cf2ae1
--- /dev/null
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,111 @@
1#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
4#include <asm/types.h>
5
6#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8
9/**
10 * __set_bit - Set a bit in memory
11 * @nr: the bit to set
12 * @addr: the address to start counting from
13 *
14 * Unlike set_bit(), this function is non-atomic and may be reordered.
15 * If it's called on the same region of memory simultaneously, the effect
16 * may be that only one operation succeeds.
17 */
18static inline void __set_bit(int nr, volatile unsigned long *addr)
19{
20 unsigned long mask = BITOP_MASK(nr);
21 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
22
23 *p |= mask;
24}
25
26static inline void __clear_bit(int nr, volatile unsigned long *addr)
27{
28 unsigned long mask = BITOP_MASK(nr);
29 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
30
31 *p &= ~mask;
32}
33
34/**
35 * __change_bit - Toggle a bit in memory
36 * @nr: the bit to change
37 * @addr: the address to start counting from
38 *
39 * Unlike change_bit(), this function is non-atomic and may be reordered.
40 * If it's called on the same region of memory simultaneously, the effect
41 * may be that only one operation succeeds.
42 */
43static inline void __change_bit(int nr, volatile unsigned long *addr)
44{
45 unsigned long mask = BITOP_MASK(nr);
46 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
47
48 *p ^= mask;
49}
50
51/**
52 * __test_and_set_bit - Set a bit and return its old value
53 * @nr: Bit to set
54 * @addr: Address to count from
55 *
56 * This operation is non-atomic and can be reordered.
57 * If two examples of this operation race, one can appear to succeed
58 * but actually fail. You must protect multiple accesses with a lock.
59 */
60static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
61{
62 unsigned long mask = BITOP_MASK(nr);
63 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
64 unsigned long old = *p;
65
66 *p = old | mask;
67 return (old & mask) != 0;
68}
69
70/**
71 * __test_and_clear_bit - Clear a bit and return its old value
72 * @nr: Bit to clear
73 * @addr: Address to count from
74 *
75 * This operation is non-atomic and can be reordered.
76 * If two examples of this operation race, one can appear to succeed
77 * but actually fail. You must protect multiple accesses with a lock.
78 */
79static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
80{
81 unsigned long mask = BITOP_MASK(nr);
82 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
83 unsigned long old = *p;
84
85 *p = old & ~mask;
86 return (old & mask) != 0;
87}
88
89/* WARNING: non atomic and it can be reordered! */
90static inline int __test_and_change_bit(int nr,
91 volatile unsigned long *addr)
92{
93 unsigned long mask = BITOP_MASK(nr);
94 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
95 unsigned long old = *p;
96
97 *p = old ^ mask;
98 return (old & mask) != 0;
99}
100
101/**
102 * test_bit - Determine whether a bit is set
103 * @nr: bit number to test
104 * @addr: Address to start counting from
105 */
106static inline int test_bit(int nr, const volatile unsigned long *addr)
107{
108 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
109}
110
111#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
new file mode 100644
index 000000000000..5ef93a4d009f
--- /dev/null
+++ b/include/asm-generic/bitops/sched.h
@@ -0,0 +1,36 @@
1#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
2#define _ASM_GENERIC_BITOPS_SCHED_H_
3
4#include <linux/compiler.h> /* unlikely() */
5#include <asm/types.h>
6
7/*
8 * Every architecture must define this function. It's the fastest
9 * way of searching a 140-bit bitmap where the first 100 bits are
10 * unlikely to be set. It's guaranteed that at least one of the 140
11 * bits is cleared.
12 */
13static inline int sched_find_first_bit(const unsigned long *b)
14{
15#if BITS_PER_LONG == 64
16 if (unlikely(b[0]))
17 return __ffs(b[0]);
18 if (unlikely(b[1]))
19 return __ffs(b[1]) + 64;
20 return __ffs(b[2]) + 128;
21#elif BITS_PER_LONG == 32
22 if (unlikely(b[0]))
23 return __ffs(b[0]);
24 if (unlikely(b[1]))
25 return __ffs(b[1]) + 32;
26 if (unlikely(b[2]))
27 return __ffs(b[2]) + 64;
28 if (b[3])
29 return __ffs(b[3]) + 96;
30 return __ffs(b[4]) + 128;
31#else
32#error BITS_PER_LONG not defined
33#endif
34}
35
36#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */