diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-10-25 22:03:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-26 04:40:30 -0400 |
commit | 0cd397d33608ae6c97d2ee6c8c43462b419b7e26 (patch) | |
tree | 7c6ac61e7d8cc9cf410251024f1a6e515987b478 /kernel/srcu.c | |
parent | 9b1d82fa1611706fa7ee1505f290160a18caf95d (diff) |
rcu: Add synchronize_srcu_expedited()
This patch creates a synchronize_srcu_expedited() that uses
synchronize_sched_expedited() where synchronize_srcu()
uses synchronize_sched(). The synchronize_srcu() and
synchronize_srcu_expedited() functions become one-liners that
pass synchronize_sched() or synchronize_sched_expedited(),
repectively, to a new __synchronize_srcu() function.
While in the file, move the EXPORT_SYMBOL_GPL()s to immediately
follow the corresponding functions.
Requested-by: Avi Kivity <avi@redhat.com>
Tested-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Josh Triplett <josh@joshtriplett.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
Cc: avi@redhat.com
LKML-Reference: <12565226354038-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r-- | kernel/srcu.c | 74 |
1 files changed, 51 insertions, 23 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c index b0aeeaf22ce4..818d7d9aa03c 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp) | |||
49 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); | 49 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
50 | return (sp->per_cpu_ref ? 0 : -ENOMEM); | 50 | return (sp->per_cpu_ref ? 0 : -ENOMEM); |
51 | } | 51 | } |
52 | EXPORT_SYMBOL_GPL(init_srcu_struct); | ||
52 | 53 | ||
53 | /* | 54 | /* |
54 | * srcu_readers_active_idx -- returns approximate number of readers | 55 | * srcu_readers_active_idx -- returns approximate number of readers |
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) | |||
97 | free_percpu(sp->per_cpu_ref); | 98 | free_percpu(sp->per_cpu_ref); |
98 | sp->per_cpu_ref = NULL; | 99 | sp->per_cpu_ref = NULL; |
99 | } | 100 | } |
101 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||
100 | 102 | ||
101 | /** | 103 | /** |
102 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 104 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp) | |||
118 | preempt_enable(); | 120 | preempt_enable(); |
119 | return idx; | 121 | return idx; |
120 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(srcu_read_lock); | ||
121 | 124 | ||
122 | /** | 125 | /** |
123 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | 126 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. |
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx) | |||
136 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; | 139 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; |
137 | preempt_enable(); | 140 | preempt_enable(); |
138 | } | 141 | } |
142 | EXPORT_SYMBOL_GPL(srcu_read_unlock); | ||
139 | 143 | ||
140 | /** | 144 | /* |
141 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 145 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
142 | * @sp: srcu_struct with which to synchronize. | ||
143 | * | ||
144 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
145 | * As with classic RCU, the updater must use some separate means of | ||
146 | * synchronizing concurrent updates. Can block; must be called from | ||
147 | * process context. | ||
148 | * | ||
149 | * Note that it is illegal to call synchornize_srcu() from the corresponding | ||
150 | * SRCU read-side critical section; doing so will result in deadlock. | ||
151 | * However, it is perfectly legal to call synchronize_srcu() on one | ||
152 | * srcu_struct from some other srcu_struct's read-side critical section. | ||
153 | */ | 146 | */ |
154 | void synchronize_srcu(struct srcu_struct *sp) | 147 | void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) |
155 | { | 148 | { |
156 | int idx; | 149 | int idx; |
157 | 150 | ||
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
173 | return; | 166 | return; |
174 | } | 167 | } |
175 | 168 | ||
176 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 169 | sync_func(); /* Force memory barrier on all CPUs. */ |
177 | 170 | ||
178 | /* | 171 | /* |
179 | * The preceding synchronize_sched() ensures that any CPU that | 172 | * The preceding synchronize_sched() ensures that any CPU that |
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
190 | idx = sp->completed & 0x1; | 183 | idx = sp->completed & 0x1; |
191 | sp->completed++; | 184 | sp->completed++; |
192 | 185 | ||
193 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 186 | sync_func(); /* Force memory barrier on all CPUs. */ |
194 | 187 | ||
195 | /* | 188 | /* |
196 | * At this point, because of the preceding synchronize_sched(), | 189 | * At this point, because of the preceding synchronize_sched(), |
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
203 | while (srcu_readers_active_idx(sp, idx)) | 196 | while (srcu_readers_active_idx(sp, idx)) |
204 | schedule_timeout_interruptible(1); | 197 | schedule_timeout_interruptible(1); |
205 | 198 | ||
206 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 199 | sync_func(); /* Force memory barrier on all CPUs. */ |
207 | 200 | ||
208 | /* | 201 | /* |
209 | * The preceding synchronize_sched() forces all srcu_read_unlock() | 202 | * The preceding synchronize_sched() forces all srcu_read_unlock() |
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
237 | } | 230 | } |
238 | 231 | ||
239 | /** | 232 | /** |
233 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | ||
234 | * @sp: srcu_struct with which to synchronize. | ||
235 | * | ||
236 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
237 | * As with classic RCU, the updater must use some separate means of | ||
238 | * synchronizing concurrent updates. Can block; must be called from | ||
239 | * process context. | ||
240 | * | ||
241 | * Note that it is illegal to call synchronize_srcu() from the corresponding | ||
242 | * SRCU read-side critical section; doing so will result in deadlock. | ||
243 | * However, it is perfectly legal to call synchronize_srcu() on one | ||
244 | * srcu_struct from some other srcu_struct's read-side critical section. | ||
245 | */ | ||
246 | void synchronize_srcu(struct srcu_struct *sp) | ||
247 | { | ||
248 | __synchronize_srcu(sp, synchronize_sched); | ||
249 | } | ||
250 | EXPORT_SYMBOL_GPL(synchronize_srcu); | ||
251 | |||
252 | /** | ||
253 | * synchronize_srcu_expedited - like synchronize_srcu, but less patient | ||
254 | * @sp: srcu_struct with which to synchronize. | ||
255 | * | ||
256 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
257 | * As with classic RCU, the updater must use some separate means of | ||
258 | * synchronizing concurrent updates. Can block; must be called from | ||
259 | * process context. | ||
260 | * | ||
261 | * Note that it is illegal to call synchronize_srcu_expedited() | ||
262 | * from the corresponding SRCU read-side critical section; doing so | ||
263 | * will result in deadlock. However, it is perfectly legal to call | ||
264 | * synchronize_srcu_expedited() on one srcu_struct from some other | ||
265 | * srcu_struct's read-side critical section. | ||
266 | */ | ||
267 | void synchronize_srcu_expedited(struct srcu_struct *sp) | ||
268 | { | ||
269 | __synchronize_srcu(sp, synchronize_sched_expedited); | ||
270 | } | ||
271 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | ||
272 | |||
273 | /** | ||
240 | * srcu_batches_completed - return batches completed. | 274 | * srcu_batches_completed - return batches completed. |
241 | * @sp: srcu_struct on which to report batch completion. | 275 | * @sp: srcu_struct on which to report batch completion. |
242 | * | 276 | * |
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp) | |||
248 | { | 282 | { |
249 | return sp->completed; | 283 | return sp->completed; |
250 | } | 284 | } |
251 | |||
252 | EXPORT_SYMBOL_GPL(init_srcu_struct); | ||
253 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||
254 | EXPORT_SYMBOL_GPL(srcu_read_lock); | ||
255 | EXPORT_SYMBOL_GPL(srcu_read_unlock); | ||
256 | EXPORT_SYMBOL_GPL(synchronize_srcu); | ||
257 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | 285 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |