summaryrefslogtreecommitdiffstats
path: root/include/linux/seqlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r--include/linux/seqlock.h108
1 files changed, 101 insertions, 7 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 21a209336e79..cf87a24c0f92 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -34,6 +34,7 @@
34 34
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/preempt.h> 36#include <linux/preempt.h>
37#include <linux/lockdep.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38 39
39/* 40/*
@@ -44,10 +45,50 @@
44 */ 45 */
45typedef struct seqcount { 46typedef struct seqcount {
46 unsigned sequence; 47 unsigned sequence;
48#ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50#endif
47} seqcount_t; 51} seqcount_t;
48 52
49#define SEQCNT_ZERO { 0 } 53static inline void __seqcount_init(seqcount_t *s, const char *name,
50#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) 54 struct lock_class_key *key)
55{
56 /*
57 * Make sure we are not reinitializing a held lock:
58 */
59 lockdep_init_map(&s->dep_map, name, key, 0);
60 s->sequence = 0;
61}
62
63#ifdef CONFIG_DEBUG_LOCK_ALLOC
64# define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
66
67# define seqcount_init(s) \
68 do { \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
71 } while (0)
72
73static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74{
75 seqcount_t *l = (seqcount_t *)s;
76 unsigned long flags;
77
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
82}
83
84#else
85# define SEQCOUNT_DEP_MAP_INIT(lockname)
86# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87# define seqcount_lockdep_reader_access(x)
88#endif
89
90#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
51 92
52/** 93/**
53 * __read_seqcount_begin - begin a seq-read critical section (without barrier) 94 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
@@ -76,6 +117,22 @@ repeat:
76} 117}
77 118
78/** 119/**
120 * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * read_seqcount_begin_no_lockdep opens a read critical section of the given
125 * seqcount, but without any lockdep checking. Validity of the critical
126 * section is tested by checking read_seqcount_retry function.
127 */
128static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
129{
130 unsigned ret = __read_seqcount_begin(s);
131 smp_rmb();
132 return ret;
133}
134
135/**
79 * read_seqcount_begin - begin a seq-read critical section 136 * read_seqcount_begin - begin a seq-read critical section
80 * @s: pointer to seqcount_t 137 * @s: pointer to seqcount_t
81 * Returns: count to be passed to read_seqcount_retry 138 * Returns: count to be passed to read_seqcount_retry
@@ -86,9 +143,8 @@ repeat:
86 */ 143 */
87static inline unsigned read_seqcount_begin(const seqcount_t *s) 144static inline unsigned read_seqcount_begin(const seqcount_t *s)
88{ 145{
89 unsigned ret = __read_seqcount_begin(s); 146 seqcount_lockdep_reader_access(s);
90 smp_rmb(); 147 return read_seqcount_begin_no_lockdep(s);
91 return ret;
92} 148}
93 149
94/** 150/**
@@ -108,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
108static inline unsigned raw_seqcount_begin(const seqcount_t *s) 164static inline unsigned raw_seqcount_begin(const seqcount_t *s)
109{ 165{
110 unsigned ret = ACCESS_ONCE(s->sequence); 166 unsigned ret = ACCESS_ONCE(s->sequence);
167
168 seqcount_lockdep_reader_access(s);
111 smp_rmb(); 169 smp_rmb();
112 return ret & ~1; 170 return ret & ~1;
113} 171}
@@ -152,14 +210,21 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
152 * Sequence counter only version assumes that callers are using their 210 * Sequence counter only version assumes that callers are using their
153 * own mutexing. 211 * own mutexing.
154 */ 212 */
155static inline void write_seqcount_begin(seqcount_t *s) 213static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
156{ 214{
157 s->sequence++; 215 s->sequence++;
158 smp_wmb(); 216 smp_wmb();
217 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
218}
219
220static inline void write_seqcount_begin(seqcount_t *s)
221{
222 write_seqcount_begin_nested(s, 0);
159} 223}
160 224
161static inline void write_seqcount_end(seqcount_t *s) 225static inline void write_seqcount_end(seqcount_t *s)
162{ 226{
227 seqcount_release(&s->dep_map, 1, _RET_IP_);
163 smp_wmb(); 228 smp_wmb();
164 s->sequence++; 229 s->sequence++;
165} 230}
@@ -188,7 +253,7 @@ typedef struct {
188 */ 253 */
189#define __SEQLOCK_UNLOCKED(lockname) \ 254#define __SEQLOCK_UNLOCKED(lockname) \
190 { \ 255 { \
191 .seqcount = SEQCNT_ZERO, \ 256 .seqcount = SEQCNT_ZERO(lockname), \
192 .lock = __SPIN_LOCK_UNLOCKED(lockname) \ 257 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
193 } 258 }
194 259
@@ -289,6 +354,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
289 spin_unlock(&sl->lock); 354 spin_unlock(&sl->lock);
290} 355}
291 356
357/**
358 * read_seqbegin_or_lock - begin a sequence number check or locking block
359 * @lock: sequence lock
360 * @seq : sequence number to be checked
361 *
362 * First try it once optimistically without taking the lock. If that fails,
363 * take the lock. The sequence number is also used as a marker for deciding
364 * whether to be a reader (even) or writer (odd).
365 * N.B. seq must be initialized to an even number to begin with.
366 */
367static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
368{
369 if (!(*seq & 1)) /* Even */
370 *seq = read_seqbegin(lock);
371 else /* Odd */
372 read_seqlock_excl(lock);
373}
374
375static inline int need_seqretry(seqlock_t *lock, int seq)
376{
377 return !(seq & 1) && read_seqretry(lock, seq);
378}
379
380static inline void done_seqretry(seqlock_t *lock, int seq)
381{
382 if (seq & 1)
383 read_sequnlock_excl(lock);
384}
385
292static inline void read_seqlock_excl_bh(seqlock_t *sl) 386static inline void read_seqlock_excl_bh(seqlock_t *sl)
293{ 387{
294 spin_lock_bh(&sl->lock); 388 spin_lock_bh(&sl->lock);