aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/bitops/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/bitops/atomic.h')
-rw-r--r--include/asm-generic/bitops/atomic.h188
1 files changed, 33 insertions, 155 deletions
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ 2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
3#define _ASM_GENERIC_BITOPS_ATOMIC_H_ 3#define _ASM_GENERIC_BITOPS_ATOMIC_H_
4 4
5#include <asm/types.h> 5#include <linux/atomic.h>
6#include <linux/irqflags.h> 6#include <linux/compiler.h>
7 7#include <asm/barrier.h>
8#ifdef CONFIG_SMP
9#include <asm/spinlock.h>
10#include <asm/cache.h> /* we use L1_CACHE_BYTES */
11
12/* Use an array of spinlocks for our atomic_ts.
13 * Hash function to index into a different SPINLOCK.
14 * Since "a" is usually an address, use one spinlock per cacheline.
15 */
16# define ATOMIC_HASH_SIZE 4
17# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
18
19extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
20
21/* Can't use raw_spin_lock_irq because of #include problems, so
22 * this is the substitute */
23#define _atomic_spin_lock_irqsave(l,f) do { \
24 arch_spinlock_t *s = ATOMIC_HASH(l); \
25 local_irq_save(f); \
26 arch_spin_lock(s); \
27} while(0)
28
29#define _atomic_spin_unlock_irqrestore(l,f) do { \
30 arch_spinlock_t *s = ATOMIC_HASH(l); \
31 arch_spin_unlock(s); \
32 local_irq_restore(f); \
33} while(0)
34
35
36#else
37# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
38# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
39#endif
40 8
41/* 9/*
42 * NMI events can occur at any time, including when interrupts have been 10 * Implementation of atomic bitops using atomic-fetch ops.
43 * disabled by *_irqsave(). So you can get NMI events occurring while a 11 * See Documentation/atomic_bitops.txt for details.
44 * *_bit function is holding a spin lock. If the NMI handler also wants
45 * to do bit manipulation (and they do) then you can get a deadlock
46 * between the original caller of *_bit() and the NMI handler.
47 *
48 * by Keith Owens
49 */ 12 */
50 13
51/** 14static inline void set_bit(unsigned int nr, volatile unsigned long *p)
52 * set_bit - Atomically set a bit in memory
53 * @nr: the bit to set
54 * @addr: the address to start counting from
55 *
56 * This function is atomic and may not be reordered. See __set_bit()
57 * if you do not require the atomic guarantees.
58 *
59 * Note: there are no guarantees that this function will not be reordered
60 * on non x86 architectures, so if you are writing portable code,
61 * make sure not to rely on its reordering guarantees.
62 *
63 * Note that @nr may be almost arbitrarily large; this function is not
64 * restricted to acting on a single-word quantity.
65 */
66static inline void set_bit(int nr, volatile unsigned long *addr)
67{ 15{
68 unsigned long mask = BIT_MASK(nr); 16 p += BIT_WORD(nr);
69 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 17 atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
70 unsigned long flags;
71
72 _atomic_spin_lock_irqsave(p, flags);
73 *p |= mask;
74 _atomic_spin_unlock_irqrestore(p, flags);
75} 18}
76 19
77/** 20static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
78 * clear_bit - Clears a bit in memory
79 * @nr: Bit to clear
80 * @addr: Address to start counting from
81 *
82 * clear_bit() is atomic and may not be reordered. However, it does
83 * not contain a memory barrier, so if it is used for locking purposes,
84 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
85 * in order to ensure changes are visible on other processors.
86 */
87static inline void clear_bit(int nr, volatile unsigned long *addr)
88{ 21{
89 unsigned long mask = BIT_MASK(nr); 22 p += BIT_WORD(nr);
90 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 23 atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
91 unsigned long flags;
92
93 _atomic_spin_lock_irqsave(p, flags);
94 *p &= ~mask;
95 _atomic_spin_unlock_irqrestore(p, flags);
96} 24}
97 25
98/** 26static inline void change_bit(unsigned int nr, volatile unsigned long *p)
99 * change_bit - Toggle a bit in memory
100 * @nr: Bit to change
101 * @addr: Address to start counting from
102 *
103 * change_bit() is atomic and may not be reordered. It may be
104 * reordered on other architectures than x86.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
107 */
108static inline void change_bit(int nr, volatile unsigned long *addr)
109{ 27{
110 unsigned long mask = BIT_MASK(nr); 28 p += BIT_WORD(nr);
111 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 29 atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
112 unsigned long flags;
113
114 _atomic_spin_lock_irqsave(p, flags);
115 *p ^= mask;
116 _atomic_spin_unlock_irqrestore(p, flags);
117} 30}
118 31
119/** 32static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
120 * test_and_set_bit - Set a bit and return its old value
121 * @nr: Bit to set
122 * @addr: Address to count from
123 *
124 * This operation is atomic and cannot be reordered.
125 * It may be reordered on other architectures than x86.
126 * It also implies a memory barrier.
127 */
128static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
129{ 33{
34 long old;
130 unsigned long mask = BIT_MASK(nr); 35 unsigned long mask = BIT_MASK(nr);
131 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
132 unsigned long old;
133 unsigned long flags;
134 36
135 _atomic_spin_lock_irqsave(p, flags); 37 p += BIT_WORD(nr);
136 old = *p; 38 if (READ_ONCE(*p) & mask)
137 *p = old | mask; 39 return 1;
138 _atomic_spin_unlock_irqrestore(p, flags);
139 40
140 return (old & mask) != 0; 41 old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
42 return !!(old & mask);
141} 43}
142 44
143/** 45static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
144 * test_and_clear_bit - Clear a bit and return its old value
145 * @nr: Bit to clear
146 * @addr: Address to count from
147 *
148 * This operation is atomic and cannot be reordered.
149 * It can be reorderdered on other architectures other than x86.
150 * It also implies a memory barrier.
151 */
152static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
153{ 46{
47 long old;
154 unsigned long mask = BIT_MASK(nr); 48 unsigned long mask = BIT_MASK(nr);
155 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
156 unsigned long old;
157 unsigned long flags;
158 49
159 _atomic_spin_lock_irqsave(p, flags); 50 p += BIT_WORD(nr);
160 old = *p; 51 if (!(READ_ONCE(*p) & mask))
161 *p = old & ~mask; 52 return 0;
162 _atomic_spin_unlock_irqrestore(p, flags);
163 53
164 return (old & mask) != 0; 54 old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
55 return !!(old & mask);
165} 56}
166 57
167/** 58static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
168 * test_and_change_bit - Change a bit and return its old value
169 * @nr: Bit to change
170 * @addr: Address to count from
171 *
172 * This operation is atomic and cannot be reordered.
173 * It also implies a memory barrier.
174 */
175static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
176{ 59{
60 long old;
177 unsigned long mask = BIT_MASK(nr); 61 unsigned long mask = BIT_MASK(nr);
178 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
179 unsigned long old;
180 unsigned long flags;
181
182 _atomic_spin_lock_irqsave(p, flags);
183 old = *p;
184 *p = old ^ mask;
185 _atomic_spin_unlock_irqrestore(p, flags);
186 62
187 return (old & mask) != 0; 63 p += BIT_WORD(nr);
64 old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
65 return !!(old & mask);
188} 66}
189 67
190#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ 68#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */