aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 2980da3fd509..73ce23feaea9 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -31,6 +31,7 @@
31#include <linux/rcupdate.h> 31#include <linux/rcupdate.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/smp.h> 33#include <linux/smp.h>
34#include <linux/delay.h>
34#include <linux/srcu.h> 35#include <linux/srcu.h>
35 36
36static int init_srcu_struct_fields(struct srcu_struct *sp) 37static int init_srcu_struct_fields(struct srcu_struct *sp)
@@ -46,11 +47,9 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
46int __init_srcu_struct(struct srcu_struct *sp, const char *name, 47int __init_srcu_struct(struct srcu_struct *sp, const char *name,
47 struct lock_class_key *key) 48 struct lock_class_key *key)
48{ 49{
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50 /* Don't re-initialize a lock while it is held. */ 50 /* Don't re-initialize a lock while it is held. */
51 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 51 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
52 lockdep_init_map(&sp->dep_map, name, key, 0); 52 lockdep_init_map(&sp->dep_map, name, key, 0);
53#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
54 return init_srcu_struct_fields(sp); 53 return init_srcu_struct_fields(sp);
55} 54}
56EXPORT_SYMBOL_GPL(__init_srcu_struct); 55EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -157,6 +156,16 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
157EXPORT_SYMBOL_GPL(__srcu_read_unlock); 156EXPORT_SYMBOL_GPL(__srcu_read_unlock);
158 157
159/* 158/*
159 * We use an adaptive strategy for synchronize_srcu() and especially for
160 * synchronize_srcu_expedited(). We spin for a fixed time period
161 * (defined below) to allow SRCU readers to exit their read-side critical
162 * sections. If there are still some readers after 10 microseconds,
163 * we repeatedly block for 1-millisecond time periods. This approach
164 * has done well in testing, so there is no need for a config parameter.
165 */
166#define SYNCHRONIZE_SRCU_READER_DELAY 10
167
168/*
160 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 169 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
161 */ 170 */
162static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) 171static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
@@ -205,9 +214,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
205 * all srcu_read_lock() calls using the old counters have completed. 214 * all srcu_read_lock() calls using the old counters have completed.
206 * Their corresponding critical sections might well be still 215 * Their corresponding critical sections might well be still
207 * executing, but the srcu_read_lock() primitives themselves 216 * executing, but the srcu_read_lock() primitives themselves
208 * will have finished executing. 217 * will have finished executing. We initially give readers
218 * an arbitrarily chosen 10 microseconds to get out of their
219 * SRCU read-side critical sections, then loop waiting 1/HZ
220 * seconds per iteration. The 10-microsecond value has done
221 * very well in testing.
209 */ 222 */
210 223
224 if (srcu_readers_active_idx(sp, idx))
225 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
211 while (srcu_readers_active_idx(sp, idx)) 226 while (srcu_readers_active_idx(sp, idx))
212 schedule_timeout_interruptible(1); 227 schedule_timeout_interruptible(1);
213 228