aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/srcu.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c121
1 files changed, 82 insertions, 39 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index b0aeeaf22ce4..2980da3fd509 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -30,10 +30,33 @@
30#include <linux/preempt.h> 30#include <linux/preempt.h>
31#include <linux/rcupdate.h> 31#include <linux/rcupdate.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/smp.h> 33#include <linux/smp.h>
35#include <linux/srcu.h> 34#include <linux/srcu.h>
36 35
36static int init_srcu_struct_fields(struct srcu_struct *sp)
37{
38 sp->completed = 0;
39 mutex_init(&sp->mutex);
40 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
41 return sp->per_cpu_ref ? 0 : -ENOMEM;
42}
43
44#ifdef CONFIG_DEBUG_LOCK_ALLOC
45
46int __init_srcu_struct(struct srcu_struct *sp, const char *name,
47 struct lock_class_key *key)
48{
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50 /* Don't re-initialize a lock while it is held. */
51 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
52 lockdep_init_map(&sp->dep_map, name, key, 0);
53#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
54 return init_srcu_struct_fields(sp);
55}
56EXPORT_SYMBOL_GPL(__init_srcu_struct);
57
58#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
59
37/** 60/**
38 * init_srcu_struct - initialize a sleep-RCU structure 61 * init_srcu_struct - initialize a sleep-RCU structure
39 * @sp: structure to initialize. 62 * @sp: structure to initialize.
@@ -44,11 +67,11 @@
44 */ 67 */
45int init_srcu_struct(struct srcu_struct *sp) 68int init_srcu_struct(struct srcu_struct *sp)
46{ 69{
47 sp->completed = 0; 70 return init_srcu_struct_fields(sp);
48 mutex_init(&sp->mutex);
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 71}
72EXPORT_SYMBOL_GPL(init_srcu_struct);
73
74#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
52 75
53/* 76/*
54 * srcu_readers_active_idx -- returns approximate number of readers 77 * srcu_readers_active_idx -- returns approximate number of readers
@@ -97,16 +120,14 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
97 free_percpu(sp->per_cpu_ref); 120 free_percpu(sp->per_cpu_ref);
98 sp->per_cpu_ref = NULL; 121 sp->per_cpu_ref = NULL;
99} 122}
123EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
100 124
101/** 125/*
102 * srcu_read_lock - register a new reader for an SRCU-protected structure.
103 * @sp: srcu_struct in which to register the new reader.
104 *
105 * Counts the new reader in the appropriate per-CPU element of the 126 * Counts the new reader in the appropriate per-CPU element of the
106 * srcu_struct. Must be called from process context. 127 * srcu_struct. Must be called from process context.
107 * Returns an index that must be passed to the matching srcu_read_unlock(). 128 * Returns an index that must be passed to the matching srcu_read_unlock().
108 */ 129 */
109int srcu_read_lock(struct srcu_struct *sp) 130int __srcu_read_lock(struct srcu_struct *sp)
110{ 131{
111 int idx; 132 int idx;
112 133
@@ -118,40 +139,27 @@ int srcu_read_lock(struct srcu_struct *sp)
118 preempt_enable(); 139 preempt_enable();
119 return idx; 140 return idx;
120} 141}
142EXPORT_SYMBOL_GPL(__srcu_read_lock);
121 143
122/** 144/*
123 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
124 * @sp: srcu_struct in which to unregister the old reader.
125 * @idx: return value from corresponding srcu_read_lock().
126 *
127 * Removes the count for the old reader from the appropriate per-CPU 145 * Removes the count for the old reader from the appropriate per-CPU
128 * element of the srcu_struct. Note that this may well be a different 146 * element of the srcu_struct. Note that this may well be a different
129 * CPU than that which was incremented by the corresponding srcu_read_lock(). 147 * CPU than that which was incremented by the corresponding srcu_read_lock().
130 * Must be called from process context. 148 * Must be called from process context.
131 */ 149 */
132void srcu_read_unlock(struct srcu_struct *sp, int idx) 150void __srcu_read_unlock(struct srcu_struct *sp, int idx)
133{ 151{
134 preempt_disable(); 152 preempt_disable();
135 srcu_barrier(); /* ensure compiler won't misorder critical section. */ 153 srcu_barrier(); /* ensure compiler won't misorder critical section. */
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 154 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
137 preempt_enable(); 155 preempt_enable();
138} 156}
157EXPORT_SYMBOL_GPL(__srcu_read_unlock);
139 158
140/** 159/*
141 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 160 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
142 * @sp: srcu_struct with which to synchronize.
143 *
144 * Flip the completed counter, and wait for the old count to drain to zero.
145 * As with classic RCU, the updater must use some separate means of
146 * synchronizing concurrent updates. Can block; must be called from
147 * process context.
148 *
149 * Note that it is illegal to call synchornize_srcu() from the corresponding
150 * SRCU read-side critical section; doing so will result in deadlock.
151 * However, it is perfectly legal to call synchronize_srcu() on one
152 * srcu_struct from some other srcu_struct's read-side critical section.
153 */ 161 */
154void synchronize_srcu(struct srcu_struct *sp) 162static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
155{ 163{
156 int idx; 164 int idx;
157 165
@@ -173,7 +181,7 @@ void synchronize_srcu(struct srcu_struct *sp)
173 return; 181 return;
174 } 182 }
175 183
176 synchronize_sched(); /* Force memory barrier on all CPUs. */ 184 sync_func(); /* Force memory barrier on all CPUs. */
177 185
178 /* 186 /*
179 * The preceding synchronize_sched() ensures that any CPU that 187 * The preceding synchronize_sched() ensures that any CPU that
@@ -190,7 +198,7 @@ void synchronize_srcu(struct srcu_struct *sp)
190 idx = sp->completed & 0x1; 198 idx = sp->completed & 0x1;
191 sp->completed++; 199 sp->completed++;
192 200
193 synchronize_sched(); /* Force memory barrier on all CPUs. */ 201 sync_func(); /* Force memory barrier on all CPUs. */
194 202
195 /* 203 /*
196 * At this point, because of the preceding synchronize_sched(), 204 * At this point, because of the preceding synchronize_sched(),
@@ -203,7 +211,7 @@ void synchronize_srcu(struct srcu_struct *sp)
203 while (srcu_readers_active_idx(sp, idx)) 211 while (srcu_readers_active_idx(sp, idx))
204 schedule_timeout_interruptible(1); 212 schedule_timeout_interruptible(1);
205 213
206 synchronize_sched(); /* Force memory barrier on all CPUs. */ 214 sync_func(); /* Force memory barrier on all CPUs. */
207 215
208 /* 216 /*
209 * The preceding synchronize_sched() forces all srcu_read_unlock() 217 * The preceding synchronize_sched() forces all srcu_read_unlock()
@@ -237,6 +245,47 @@ void synchronize_srcu(struct srcu_struct *sp)
237} 245}
238 246
239/** 247/**
248 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
249 * @sp: srcu_struct with which to synchronize.
250 *
251 * Flip the completed counter, and wait for the old count to drain to zero.
252 * As with classic RCU, the updater must use some separate means of
253 * synchronizing concurrent updates. Can block; must be called from
254 * process context.
255 *
256 * Note that it is illegal to call synchronize_srcu() from the corresponding
257 * SRCU read-side critical section; doing so will result in deadlock.
258 * However, it is perfectly legal to call synchronize_srcu() on one
259 * srcu_struct from some other srcu_struct's read-side critical section.
260 */
261void synchronize_srcu(struct srcu_struct *sp)
262{
263 __synchronize_srcu(sp, synchronize_sched);
264}
265EXPORT_SYMBOL_GPL(synchronize_srcu);
266
267/**
268 * synchronize_srcu_expedited - like synchronize_srcu, but less patient
269 * @sp: srcu_struct with which to synchronize.
270 *
271 * Flip the completed counter, and wait for the old count to drain to zero.
272 * As with classic RCU, the updater must use some separate means of
273 * synchronizing concurrent updates. Can block; must be called from
274 * process context.
275 *
276 * Note that it is illegal to call synchronize_srcu_expedited()
277 * from the corresponding SRCU read-side critical section; doing so
278 * will result in deadlock. However, it is perfectly legal to call
279 * synchronize_srcu_expedited() on one srcu_struct from some other
280 * srcu_struct's read-side critical section.
281 */
282void synchronize_srcu_expedited(struct srcu_struct *sp)
283{
284 __synchronize_srcu(sp, synchronize_sched_expedited);
285}
286EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
287
288/**
240 * srcu_batches_completed - return batches completed. 289 * srcu_batches_completed - return batches completed.
241 * @sp: srcu_struct on which to report batch completion. 290 * @sp: srcu_struct on which to report batch completion.
242 * 291 *
@@ -248,10 +297,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
248{ 297{
249 return sp->completed; 298 return sp->completed;
250} 299}
251
252EXPORT_SYMBOL_GPL(init_srcu_struct);
253EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
254EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed); 300EXPORT_SYMBOL_GPL(srcu_batches_completed);