diff options
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r-- | include/linux/seqlock.h | 193 |
1 files changed, 91 insertions, 102 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 600060e25ec6..18299057402f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -30,92 +30,12 @@ | |||
30 | #include <linux/preempt.h> | 30 | #include <linux/preempt.h> |
31 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
32 | 32 | ||
33 | typedef struct { | ||
34 | unsigned sequence; | ||
35 | spinlock_t lock; | ||
36 | } seqlock_t; | ||
37 | |||
38 | /* | ||
39 | * These macros triggered gcc-3.x compile-time problems. We think these are | ||
40 | * OK now. Be cautious. | ||
41 | */ | ||
42 | #define __SEQLOCK_UNLOCKED(lockname) \ | ||
43 | { 0, __SPIN_LOCK_UNLOCKED(lockname) } | ||
44 | |||
45 | #define seqlock_init(x) \ | ||
46 | do { \ | ||
47 | (x)->sequence = 0; \ | ||
48 | spin_lock_init(&(x)->lock); \ | ||
49 | } while (0) | ||
50 | |||
51 | #define DEFINE_SEQLOCK(x) \ | ||
52 | seqlock_t x = __SEQLOCK_UNLOCKED(x) | ||
53 | |||
54 | /* Lock out other writers and update the count. | ||
55 | * Acts like a normal spin_lock/unlock. | ||
56 | * Don't need preempt_disable() because that is in the spin_lock already. | ||
57 | */ | ||
58 | static inline void write_seqlock(seqlock_t *sl) | ||
59 | { | ||
60 | spin_lock(&sl->lock); | ||
61 | ++sl->sequence; | ||
62 | smp_wmb(); | ||
63 | } | ||
64 | |||
65 | static inline void write_sequnlock(seqlock_t *sl) | ||
66 | { | ||
67 | smp_wmb(); | ||
68 | sl->sequence++; | ||
69 | spin_unlock(&sl->lock); | ||
70 | } | ||
71 | |||
72 | static inline int write_tryseqlock(seqlock_t *sl) | ||
73 | { | ||
74 | int ret = spin_trylock(&sl->lock); | ||
75 | |||
76 | if (ret) { | ||
77 | ++sl->sequence; | ||
78 | smp_wmb(); | ||
79 | } | ||
80 | return ret; | ||
81 | } | ||
82 | |||
83 | /* Start of read calculation -- fetch last complete writer token */ | ||
84 | static __always_inline unsigned read_seqbegin(const seqlock_t *sl) | ||
85 | { | ||
86 | unsigned ret; | ||
87 | |||
88 | repeat: | ||
89 | ret = ACCESS_ONCE(sl->sequence); | ||
90 | if (unlikely(ret & 1)) { | ||
91 | cpu_relax(); | ||
92 | goto repeat; | ||
93 | } | ||
94 | smp_rmb(); | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Test if reader processed invalid data. | ||
101 | * | ||
102 | * If sequence value changed then writer changed data while in section. | ||
103 | */ | ||
104 | static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) | ||
105 | { | ||
106 | smp_rmb(); | ||
107 | |||
108 | return unlikely(sl->sequence != start); | ||
109 | } | ||
110 | |||
111 | |||
112 | /* | 33 | /* |
113 | * Version using sequence counter only. | 34 | * Version using sequence counter only. |
114 | * This can be used when code has its own mutex protecting the | 35 | * This can be used when code has its own mutex protecting the |
115 | * updating starting before the write_seqcountbeqin() and ending | 36 | * updating starting before the write_seqcountbeqin() and ending |
116 | * after the write_seqcount_end(). | 37 | * after the write_seqcount_end(). |
117 | */ | 38 | */ |
118 | |||
119 | typedef struct seqcount { | 39 | typedef struct seqcount { |
120 | unsigned sequence; | 40 | unsigned sequence; |
121 | } seqcount_t; | 41 | } seqcount_t; |
@@ -218,7 +138,6 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) | |||
218 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) | 138 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
219 | { | 139 | { |
220 | smp_rmb(); | 140 | smp_rmb(); |
221 | |||
222 | return __read_seqcount_retry(s, start); | 141 | return __read_seqcount_retry(s, start); |
223 | } | 142 | } |
224 | 143 | ||
@@ -252,31 +171,101 @@ static inline void write_seqcount_barrier(seqcount_t *s) | |||
252 | s->sequence+=2; | 171 | s->sequence+=2; |
253 | } | 172 | } |
254 | 173 | ||
174 | typedef struct { | ||
175 | struct seqcount seqcount; | ||
176 | spinlock_t lock; | ||
177 | } seqlock_t; | ||
178 | |||
255 | /* | 179 | /* |
256 | * Possible sw/hw IRQ protected versions of the interfaces. | 180 | * These macros triggered gcc-3.x compile-time problems. We think these are |
181 | * OK now. Be cautious. | ||
257 | */ | 182 | */ |
258 | #define write_seqlock_irqsave(lock, flags) \ | 183 | #define __SEQLOCK_UNLOCKED(lockname) \ |
259 | do { local_irq_save(flags); write_seqlock(lock); } while (0) | 184 | { \ |
260 | #define write_seqlock_irq(lock) \ | 185 | .seqcount = SEQCNT_ZERO, \ |
261 | do { local_irq_disable(); write_seqlock(lock); } while (0) | 186 | .lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
262 | #define write_seqlock_bh(lock) \ | 187 | } |
263 | do { local_bh_disable(); write_seqlock(lock); } while (0) | 188 | |
189 | #define seqlock_init(x) \ | ||
190 | do { \ | ||
191 | seqcount_init(&(x)->seqcount); \ | ||
192 | spin_lock_init(&(x)->lock); \ | ||
193 | } while (0) | ||
264 | 194 | ||
265 | #define write_sequnlock_irqrestore(lock, flags) \ | 195 | #define DEFINE_SEQLOCK(x) \ |
266 | do { write_sequnlock(lock); local_irq_restore(flags); } while(0) | 196 | seqlock_t x = __SEQLOCK_UNLOCKED(x) |
267 | #define write_sequnlock_irq(lock) \ | ||
268 | do { write_sequnlock(lock); local_irq_enable(); } while(0) | ||
269 | #define write_sequnlock_bh(lock) \ | ||
270 | do { write_sequnlock(lock); local_bh_enable(); } while(0) | ||
271 | 197 | ||
272 | #define read_seqbegin_irqsave(lock, flags) \ | 198 | /* |
273 | ({ local_irq_save(flags); read_seqbegin(lock); }) | 199 | * Read side functions for starting and finalizing a read side section. |
200 | */ | ||
201 | static inline unsigned read_seqbegin(const seqlock_t *sl) | ||
202 | { | ||
203 | return read_seqcount_begin(&sl->seqcount); | ||
204 | } | ||
274 | 205 | ||
275 | #define read_seqretry_irqrestore(lock, iv, flags) \ | 206 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
276 | ({ \ | 207 | { |
277 | int ret = read_seqretry(lock, iv); \ | 208 | return read_seqcount_retry(&sl->seqcount, start); |
278 | local_irq_restore(flags); \ | 209 | } |
279 | ret; \ | 210 | |
280 | }) | 211 | /* |
212 | * Lock out other writers and update the count. | ||
213 | * Acts like a normal spin_lock/unlock. | ||
214 | * Don't need preempt_disable() because that is in the spin_lock already. | ||
215 | */ | ||
216 | static inline void write_seqlock(seqlock_t *sl) | ||
217 | { | ||
218 | spin_lock(&sl->lock); | ||
219 | write_seqcount_begin(&sl->seqcount); | ||
220 | } | ||
221 | |||
222 | static inline void write_sequnlock(seqlock_t *sl) | ||
223 | { | ||
224 | write_seqcount_end(&sl->seqcount); | ||
225 | spin_unlock(&sl->lock); | ||
226 | } | ||
227 | |||
228 | static inline void write_seqlock_bh(seqlock_t *sl) | ||
229 | { | ||
230 | spin_lock_bh(&sl->lock); | ||
231 | write_seqcount_begin(&sl->seqcount); | ||
232 | } | ||
233 | |||
234 | static inline void write_sequnlock_bh(seqlock_t *sl) | ||
235 | { | ||
236 | write_seqcount_end(&sl->seqcount); | ||
237 | spin_unlock_bh(&sl->lock); | ||
238 | } | ||
239 | |||
240 | static inline void write_seqlock_irq(seqlock_t *sl) | ||
241 | { | ||
242 | spin_lock_irq(&sl->lock); | ||
243 | write_seqcount_begin(&sl->seqcount); | ||
244 | } | ||
245 | |||
246 | static inline void write_sequnlock_irq(seqlock_t *sl) | ||
247 | { | ||
248 | write_seqcount_end(&sl->seqcount); | ||
249 | spin_unlock_irq(&sl->lock); | ||
250 | } | ||
251 | |||
252 | static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) | ||
253 | { | ||
254 | unsigned long flags; | ||
255 | |||
256 | spin_lock_irqsave(&sl->lock, flags); | ||
257 | write_seqcount_begin(&sl->seqcount); | ||
258 | return flags; | ||
259 | } | ||
260 | |||
261 | #define write_seqlock_irqsave(lock, flags) \ | ||
262 | do { flags = __write_seqlock_irqsave(lock); } while (0) | ||
263 | |||
264 | static inline void | ||
265 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) | ||
266 | { | ||
267 | write_seqcount_end(&sl->seqcount); | ||
268 | spin_unlock_irqrestore(&sl->lock, flags); | ||
269 | } | ||
281 | 270 | ||
282 | #endif /* __LINUX_SEQLOCK_H */ | 271 | #endif /* __LINUX_SEQLOCK_H */ |