diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2012-03-19 04:12:13 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-04-30 13:48:25 -0400 |
commit | 931ea9d1a6e06a5e3af03aa4aaaa7c7fd90e163f (patch) | |
tree | d25495ccbf92d1ba720a945ac7fb4079b0434db4 /include/linux/srcu.h | |
parent | d9792edd7a9a0858a3b1df92cf8beb31e4191e3c (diff) |
rcu: Implement per-domain single-threaded call_srcu() state machine
This commit implements an SRCU state machine in support of call_srcu().
The state machine is preemptible, light-weight, and single-threaded,
minimizing synchronization overhead. In particular, there is no longer
any need for synchronize_srcu() to be guarded by a mutex.
Expedited processing is handled, at least in the absence of concurrent
grace-period operations on that same srcu_struct structure, by having
the synchronize_srcu_expedited() thread take on the role of the
workqueue thread for one iteration.
There is a reasonable probability that a given SRCU callback will
be invoked on the same CPU that registered it, however, there is no
guarantee. Concurrent SRCU grace-period primitives can cause callbacks
to be executed elsewhere, even in absence of CPU-hotplug operations.
Callbacks execute in process context, but under the influence of
local_bh_disable(), so it is illegal to sleep in an SRCU callback
function.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'include/linux/srcu.h')
-rw-r--r-- | include/linux/srcu.h | 37 |
1 files changed, 36 insertions, 1 deletions
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e5ce80452b62..55a5c52cbb25 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -29,16 +29,30 @@ | |||
29 | 29 | ||
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | #include <linux/rcupdate.h> | 31 | #include <linux/rcupdate.h> |
32 | #include <linux/workqueue.h> | ||
32 | 33 | ||
33 | struct srcu_struct_array { | 34 | struct srcu_struct_array { |
34 | unsigned long c[2]; | 35 | unsigned long c[2]; |
35 | unsigned long seq[2]; | 36 | unsigned long seq[2]; |
36 | }; | 37 | }; |
37 | 38 | ||
39 | struct rcu_batch { | ||
40 | struct rcu_head *head, **tail; | ||
41 | }; | ||
42 | |||
38 | struct srcu_struct { | 43 | struct srcu_struct { |
39 | unsigned completed; | 44 | unsigned completed; |
40 | struct srcu_struct_array __percpu *per_cpu_ref; | 45 | struct srcu_struct_array __percpu *per_cpu_ref; |
41 | struct mutex mutex; | 46 | spinlock_t queue_lock; /* protect ->batch_queue, ->running */ |
47 | bool running; | ||
48 | /* callbacks just queued */ | ||
49 | struct rcu_batch batch_queue; | ||
50 | /* callbacks try to do the first check_zero */ | ||
51 | struct rcu_batch batch_check0; | ||
52 | /* callbacks done with the first check_zero and the flip */ | ||
53 | struct rcu_batch batch_check1; | ||
54 | struct rcu_batch batch_done; | ||
55 | struct delayed_work work; | ||
42 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 56 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
43 | struct lockdep_map dep_map; | 57 | struct lockdep_map dep_map; |
44 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 58 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
@@ -62,12 +76,33 @@ int init_srcu_struct(struct srcu_struct *sp); | |||
62 | 76 | ||
63 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 77 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
64 | 78 | ||
79 | /** | ||
80 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | ||
81 | * @sp: srcu_struct in queue the callback | ||
82 | * @head: structure to be used for queueing the SRCU callback. | ||
83 | * @func: function to be invoked after the SRCU grace period | ||
84 | * | ||
85 | * The callback function will be invoked some time after a full SRCU | ||
86 | * grace period elapses, in other words after all pre-existing SRCU | ||
87 | * read-side critical sections have completed. However, the callback | ||
88 | * function might well execute concurrently with other SRCU read-side | ||
89 | * critical sections that started after call_srcu() was invoked. SRCU | ||
90 | * read-side critical sections are delimited by srcu_read_lock() and | ||
91 | * srcu_read_unlock(), and may be nested. | ||
92 | * | ||
93 | * The callback will be invoked from process context, but must nevertheless | ||
94 | * be fast and must not block. | ||
95 | */ | ||
96 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | ||
97 | void (*func)(struct rcu_head *head)); | ||
98 | |||
65 | void cleanup_srcu_struct(struct srcu_struct *sp); | 99 | void cleanup_srcu_struct(struct srcu_struct *sp); |
66 | int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); | 100 | int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); |
67 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); | 101 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); |
68 | void synchronize_srcu(struct srcu_struct *sp); | 102 | void synchronize_srcu(struct srcu_struct *sp); |
69 | void synchronize_srcu_expedited(struct srcu_struct *sp); | 103 | void synchronize_srcu_expedited(struct srcu_struct *sp); |
70 | long srcu_batches_completed(struct srcu_struct *sp); | 104 | long srcu_batches_completed(struct srcu_struct *sp); |
105 | void srcu_barrier(struct srcu_struct *sp); | ||
71 | 106 | ||
72 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 107 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
73 | 108 | ||