aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c120
1 files changed, 82 insertions, 38 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index b0aeeaf22ce4..bde4295774c8 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -34,6 +34,30 @@
34#include <linux/smp.h> 34#include <linux/smp.h>
35#include <linux/srcu.h> 35#include <linux/srcu.h>
36 36
37static int init_srcu_struct_fields(struct srcu_struct *sp)
38{
39 sp->completed = 0;
40 mutex_init(&sp->mutex);
41 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
42 return sp->per_cpu_ref ? 0 : -ENOMEM;
43}
44
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46
47int __init_srcu_struct(struct srcu_struct *sp, const char *name,
48 struct lock_class_key *key)
49{
50#ifdef CONFIG_DEBUG_LOCK_ALLOC
51 /* Don't re-initialize a lock while it is held. */
52 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
53 lockdep_init_map(&sp->dep_map, name, key, 0);
54#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
55 return init_srcu_struct_fields(sp);
56}
57EXPORT_SYMBOL_GPL(__init_srcu_struct);
58
59#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
60
37/** 61/**
38 * init_srcu_struct - initialize a sleep-RCU structure 62 * init_srcu_struct - initialize a sleep-RCU structure
39 * @sp: structure to initialize. 63 * @sp: structure to initialize.
@@ -44,11 +68,11 @@
44 */ 68 */
45int init_srcu_struct(struct srcu_struct *sp) 69int init_srcu_struct(struct srcu_struct *sp)
46{ 70{
47 sp->completed = 0; 71 return init_srcu_struct_fields(sp);
48 mutex_init(&sp->mutex);
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 72}
73EXPORT_SYMBOL_GPL(init_srcu_struct);
74
75#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
52 76
53/* 77/*
54 * srcu_readers_active_idx -- returns approximate number of readers 78 * srcu_readers_active_idx -- returns approximate number of readers
@@ -97,16 +121,14 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
97 free_percpu(sp->per_cpu_ref); 121 free_percpu(sp->per_cpu_ref);
98 sp->per_cpu_ref = NULL; 122 sp->per_cpu_ref = NULL;
99} 123}
124EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
100 125
101/** 126/*
102 * srcu_read_lock - register a new reader for an SRCU-protected structure.
103 * @sp: srcu_struct in which to register the new reader.
104 *
105 * Counts the new reader in the appropriate per-CPU element of the 127 * Counts the new reader in the appropriate per-CPU element of the
106 * srcu_struct. Must be called from process context. 128 * srcu_struct. Must be called from process context.
107 * Returns an index that must be passed to the matching srcu_read_unlock(). 129 * Returns an index that must be passed to the matching srcu_read_unlock().
108 */ 130 */
109int srcu_read_lock(struct srcu_struct *sp) 131int __srcu_read_lock(struct srcu_struct *sp)
110{ 132{
111 int idx; 133 int idx;
112 134
@@ -118,40 +140,27 @@ int srcu_read_lock(struct srcu_struct *sp)
118 preempt_enable(); 140 preempt_enable();
119 return idx; 141 return idx;
120} 142}
143EXPORT_SYMBOL_GPL(__srcu_read_lock);
121 144
122/** 145/*
123 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
124 * @sp: srcu_struct in which to unregister the old reader.
125 * @idx: return value from corresponding srcu_read_lock().
126 *
127 * Removes the count for the old reader from the appropriate per-CPU 146 * Removes the count for the old reader from the appropriate per-CPU
128 * element of the srcu_struct. Note that this may well be a different 147 * element of the srcu_struct. Note that this may well be a different
129 * CPU than that which was incremented by the corresponding srcu_read_lock(). 148 * CPU than that which was incremented by the corresponding srcu_read_lock().
130 * Must be called from process context. 149 * Must be called from process context.
131 */ 150 */
132void srcu_read_unlock(struct srcu_struct *sp, int idx) 151void __srcu_read_unlock(struct srcu_struct *sp, int idx)
133{ 152{
134 preempt_disable(); 153 preempt_disable();
135 srcu_barrier(); /* ensure compiler won't misorder critical section. */ 154 srcu_barrier(); /* ensure compiler won't misorder critical section. */
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 155 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
137 preempt_enable(); 156 preempt_enable();
138} 157}
158EXPORT_SYMBOL_GPL(__srcu_read_unlock);
139 159
140/** 160/*
141 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 161 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
142 * @sp: srcu_struct with which to synchronize.
143 *
144 * Flip the completed counter, and wait for the old count to drain to zero.
145 * As with classic RCU, the updater must use some separate means of
146 * synchronizing concurrent updates. Can block; must be called from
147 * process context.
148 *
149 * Note that it is illegal to call synchornize_srcu() from the corresponding
150 * SRCU read-side critical section; doing so will result in deadlock.
151 * However, it is perfectly legal to call synchronize_srcu() on one
152 * srcu_struct from some other srcu_struct's read-side critical section.
153 */ 162 */
154void synchronize_srcu(struct srcu_struct *sp) 163static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
155{ 164{
156 int idx; 165 int idx;
157 166
@@ -173,7 +182,7 @@ void synchronize_srcu(struct srcu_struct *sp)
173 return; 182 return;
174 } 183 }
175 184
176 synchronize_sched(); /* Force memory barrier on all CPUs. */ 185 sync_func(); /* Force memory barrier on all CPUs. */
177 186
178 /* 187 /*
179 * The preceding synchronize_sched() ensures that any CPU that 188 * The preceding synchronize_sched() ensures that any CPU that
@@ -190,7 +199,7 @@ void synchronize_srcu(struct srcu_struct *sp)
190 idx = sp->completed & 0x1; 199 idx = sp->completed & 0x1;
191 sp->completed++; 200 sp->completed++;
192 201
193 synchronize_sched(); /* Force memory barrier on all CPUs. */ 202 sync_func(); /* Force memory barrier on all CPUs. */
194 203
195 /* 204 /*
196 * At this point, because of the preceding synchronize_sched(), 205 * At this point, because of the preceding synchronize_sched(),
@@ -203,7 +212,7 @@ void synchronize_srcu(struct srcu_struct *sp)
203 while (srcu_readers_active_idx(sp, idx)) 212 while (srcu_readers_active_idx(sp, idx))
204 schedule_timeout_interruptible(1); 213 schedule_timeout_interruptible(1);
205 214
206 synchronize_sched(); /* Force memory barrier on all CPUs. */ 215 sync_func(); /* Force memory barrier on all CPUs. */
207 216
208 /* 217 /*
209 * The preceding synchronize_sched() forces all srcu_read_unlock() 218 * The preceding synchronize_sched() forces all srcu_read_unlock()
@@ -237,6 +246,47 @@ void synchronize_srcu(struct srcu_struct *sp)
237} 246}
238 247
239/** 248/**
249 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
250 * @sp: srcu_struct with which to synchronize.
251 *
252 * Flip the completed counter, and wait for the old count to drain to zero.
253 * As with classic RCU, the updater must use some separate means of
254 * synchronizing concurrent updates. Can block; must be called from
255 * process context.
256 *
257 * Note that it is illegal to call synchronize_srcu() from the corresponding
258 * SRCU read-side critical section; doing so will result in deadlock.
259 * However, it is perfectly legal to call synchronize_srcu() on one
260 * srcu_struct from some other srcu_struct's read-side critical section.
261 */
262void synchronize_srcu(struct srcu_struct *sp)
263{
264 __synchronize_srcu(sp, synchronize_sched);
265}
266EXPORT_SYMBOL_GPL(synchronize_srcu);
267
268/**
269 * synchronize_srcu_expedited - like synchronize_srcu, but less patient
270 * @sp: srcu_struct with which to synchronize.
271 *
272 * Flip the completed counter, and wait for the old count to drain to zero.
273 * As with classic RCU, the updater must use some separate means of
274 * synchronizing concurrent updates. Can block; must be called from
275 * process context.
276 *
277 * Note that it is illegal to call synchronize_srcu_expedited()
278 * from the corresponding SRCU read-side critical section; doing so
279 * will result in deadlock. However, it is perfectly legal to call
280 * synchronize_srcu_expedited() on one srcu_struct from some other
281 * srcu_struct's read-side critical section.
282 */
283void synchronize_srcu_expedited(struct srcu_struct *sp)
284{
285 __synchronize_srcu(sp, synchronize_sched_expedited);
286}
287EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
288
289/**
240 * srcu_batches_completed - return batches completed. 290 * srcu_batches_completed - return batches completed.
241 * @sp: srcu_struct on which to report batch completion. 291 * @sp: srcu_struct on which to report batch completion.
242 * 292 *
@@ -248,10 +298,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
248{ 298{
249 return sp->completed; 299 return sp->completed;
250} 300}
251
252EXPORT_SYMBOL_GPL(init_srcu_struct);
253EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
254EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed); 301EXPORT_SYMBOL_GPL(srcu_batches_completed);