aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 01:49:51 -0500
committerNick Piggin <npiggin@kernel.dk>2011-01-07 01:50:27 -0500
commit3c22cd5709e8143444a6d08682a87f4c57902df3 (patch)
tree6d245491d9103a1d77a3128ed5d1d996d3794300 /include
parentff0c7d15f9787b7e8c601533c015295cc68329f8 (diff)
kernel: optimise seqlock
Add branch annotations for seqlock read fastpath, and introduce __read_seqcount_begin and __read_seqcount_end functions, that can avoid the smp_rmb() if used carefully. These will be used by store-free path walking algorithm performance is critical and seqlocks are in use. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/seqlock.h80
1 files changed, 73 insertions, 7 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 632205ccc25d..e98cd2e57194 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -107,7 +107,7 @@ static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
107{ 107{
108 smp_rmb(); 108 smp_rmb();
109 109
110 return (sl->sequence != start); 110 return unlikely(sl->sequence != start);
111} 111}
112 112
113 113
@@ -125,14 +125,25 @@ typedef struct seqcount {
125#define SEQCNT_ZERO { 0 } 125#define SEQCNT_ZERO { 0 }
126#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) 126#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
127 127
128/* Start of read using pointer to a sequence counter only. */ 128/**
129static inline unsigned read_seqcount_begin(const seqcount_t *s) 129 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
130 * @s: pointer to seqcount_t
131 * Returns: count to be passed to read_seqcount_retry
132 *
133 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
134 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
135 * provided before actually loading any of the variables that are to be
136 * protected in this critical section.
137 *
138 * Use carefully, only in critical code, and comment how the barrier is
139 * provided.
140 */
141static inline unsigned __read_seqcount_begin(const seqcount_t *s)
130{ 142{
131 unsigned ret; 143 unsigned ret;
132 144
133repeat: 145repeat:
134 ret = s->sequence; 146 ret = s->sequence;
135 smp_rmb();
136 if (unlikely(ret & 1)) { 147 if (unlikely(ret & 1)) {
137 cpu_relax(); 148 cpu_relax();
138 goto repeat; 149 goto repeat;
@@ -140,14 +151,56 @@ repeat:
140 return ret; 151 return ret;
141} 152}
142 153
143/* 154/**
144 * Test if reader processed invalid data because sequence number has changed. 155 * read_seqcount_begin - begin a seq-read critical section
156 * @s: pointer to seqcount_t
157 * Returns: count to be passed to read_seqcount_retry
158 *
159 * read_seqcount_begin opens a read critical section of the given seqcount.
160 * Validity of the critical section is tested by checking read_seqcount_retry
161 * function.
162 */
163static inline unsigned read_seqcount_begin(const seqcount_t *s)
164{
165 unsigned ret = __read_seqcount_begin(s);
166 smp_rmb();
167 return ret;
168}
169
170/**
171 * __read_seqcount_retry - end a seq-read critical section (without barrier)
172 * @s: pointer to seqcount_t
173 * @start: count, from read_seqcount_begin
174 * Returns: 1 if retry is required, else 0
175 *
176 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
177 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
178 * provided before actually loading any of the variables that are to be
179 * protected in this critical section.
180 *
181 * Use carefully, only in critical code, and comment how the barrier is
182 * provided.
183 */
184static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
185{
186 return unlikely(s->sequence != start);
187}
188
189/**
190 * read_seqcount_retry - end a seq-read critical section
191 * @s: pointer to seqcount_t
192 * @start: count, from read_seqcount_begin
193 * Returns: 1 if retry is required, else 0
194 *
195 * read_seqcount_retry closes a read critical section of the given seqcount.
196 * If the critical section was invalid, it must be ignored (and typically
197 * retried).
145 */ 198 */
146static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) 199static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
147{ 200{
148 smp_rmb(); 201 smp_rmb();
149 202
150 return s->sequence != start; 203 return __read_seqcount_retry(s, start);
151} 204}
152 205
153 206
@@ -167,6 +220,19 @@ static inline void write_seqcount_end(seqcount_t *s)
167 s->sequence++; 220 s->sequence++;
168} 221}
169 222
223/**
224 * write_seqcount_barrier - invalidate in-progress read-side seq operations
225 * @s: pointer to seqcount_t
226 *
227 * After write_seqcount_barrier, no read-side seq operations will complete
228 * successfully and see data older than this.
229 */
230static inline void write_seqcount_barrier(seqcount_t *s)
231{
232 smp_wmb();
233 s->sequence+=2;
234}
235
170/* 236/*
171 * Possible sw/hw IRQ protected versions of the interfaces. 237 * Possible sw/hw IRQ protected versions of the interfaces.
172 */ 238 */