summaryrefslogtreecommitdiffstats
path: root/include/linux/seqlock.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-01 13:49:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-01 13:49:25 -0400
commit02201e3f1b46aed7c6348f406b7b40de80ba6de3 (patch)
tree2392c9098359725c195dd82a72b20ccedc1a1509 /include/linux/seqlock.h
parent0890a264794f33df540fbaf274699146903b4e6b (diff)
parent20bdc2cfdbc484777b30b96fcdbb8994038f3ce1 (diff)
Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull module updates from Rusty Russell: "Main excitement here is Peter Zijlstra's lockless rbtree optimization to speed module address lookup. He found some abusers of the module lock doing that too. A little bit of parameter work here too; including Dan Streetman's breaking up the big param mutex so writing a parameter can load another module (yeah, really). Unfortunately that broke the usual suspects, !CONFIG_MODULES and !CONFIG_SYSFS, so those fixes were appended too" * tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (26 commits) modules: only use mod->param_lock if CONFIG_MODULES param: fix module param locks when !CONFIG_SYSFS. rcu: merge fix for Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE() module: add per-module param_lock module: make perm const params: suppress unused variable error, warn once just in case code changes. modules: clarify CONFIG_MODULE_COMPRESS help, suggest 'N'. kernel/module.c: avoid ifdefs for sig_enforce declaration kernel/workqueue.c: remove ifdefs over wq_power_efficient kernel/params.c: export param_ops_bool_enable_only kernel/params.c: generalize bool_enable_only kernel/module.c: use generic module param operaters for sig_enforce kernel/params: constify struct kernel_param_ops uses sysfs: tightened sysfs permission checks module: Rework module_addr_{min,max} module: Use __module_address() for module_address_lookup() module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING module: Optimize __module_address() using a latched RB-tree rbtree: Implement generic latch_tree seqlock: Introduce raw_read_seqcount_latch() ...
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r--include/linux/seqlock.h81
1 files changed, 80 insertions, 1 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 486e685a226a..e0582106ef4f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/preempt.h> 36#include <linux/preempt.h>
37#include <linux/lockdep.h> 37#include <linux/lockdep.h>
38#include <linux/compiler.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39 40
40/* 41/*
@@ -274,9 +275,87 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
274 s->sequence++; 275 s->sequence++;
275} 276}
276 277
277/* 278static inline int raw_read_seqcount_latch(seqcount_t *s)
279{
280 return lockless_dereference(s->sequence);
281}
282
283/**
278 * raw_write_seqcount_latch - redirect readers to even/odd copy 284 * raw_write_seqcount_latch - redirect readers to even/odd copy
279 * @s: pointer to seqcount_t 285 * @s: pointer to seqcount_t
286 *
287 * The latch technique is a multiversion concurrency control method that allows
288 * queries during non-atomic modifications. If you can guarantee queries never
289 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
290 * -- you most likely do not need this.
291 *
292 * Where the traditional RCU/lockless data structures rely on atomic
293 * modifications to ensure queries observe either the old or the new state the
294 * latch allows the same for non-atomic updates. The trade-off is doubling the
295 * cost of storage; we have to maintain two copies of the entire data
296 * structure.
297 *
298 * Very simply put: we first modify one copy and then the other. This ensures
299 * there is always one copy in a stable state, ready to give us an answer.
300 *
301 * The basic form is a data structure like:
302 *
303 * struct latch_struct {
304 * seqcount_t seq;
305 * struct data_struct data[2];
306 * };
307 *
308 * Where a modification, which is assumed to be externally serialized, does the
309 * following:
310 *
311 * void latch_modify(struct latch_struct *latch, ...)
312 * {
313 * smp_wmb(); <- Ensure that the last data[1] update is visible
314 * latch->seq++;
315 * smp_wmb(); <- Ensure that the seqcount update is visible
316 *
317 * modify(latch->data[0], ...);
318 *
319 * smp_wmb(); <- Ensure that the data[0] update is visible
320 * latch->seq++;
321 * smp_wmb(); <- Ensure that the seqcount update is visible
322 *
323 * modify(latch->data[1], ...);
324 * }
325 *
326 * The query will have a form like:
327 *
328 * struct entry *latch_query(struct latch_struct *latch, ...)
329 * {
330 * struct entry *entry;
331 * unsigned seq, idx;
332 *
333 * do {
334 * seq = lockless_dereference(latch->seq);
335 *
336 * idx = seq & 0x01;
337 * entry = data_query(latch->data[idx], ...);
338 *
339 * smp_rmb();
340 * } while (seq != latch->seq);
341 *
342 * return entry;
343 * }
344 *
345 * So during the modification, queries are first redirected to data[1]. Then we
346 * modify data[0]. When that is complete, we redirect queries back to data[0]
347 * and we can modify data[1].
348 *
349 * NOTE: The non-requirement for atomic modifications does _NOT_ include
350 * the publishing of new entries in the case where data is a dynamic
351 * data structure.
352 *
353 * An iteration might start in data[0] and get suspended long enough
354 * to miss an entire modification sequence, once it resumes it might
355 * observe the new entry.
356 *
357 * NOTE: When data is a dynamic data structure; one should use regular RCU
358 * patterns to manage the lifetimes of the objects within.
280 */ 359 */
281static inline void raw_write_seqcount_latch(seqcount_t *s) 360static inline void raw_write_seqcount_latch(seqcount_t *s)
282{ 361{