aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2012-02-22 16:06:51 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-04-30 13:48:20 -0400
commit4b7a3e9e32114a09c61995048f055615b5d4c26d (patch)
tree562ff1829abb99e830b5e3b3719834b059376b03 /kernel/srcu.c
parentcef50120b61c2af4ce34bc165e19cad66296f93d (diff)
rcu: Remove fast check path from __synchronize_srcu()
The fastpath in __synchronize_srcu() is designed to handle cases where there are a large number of concurrent calls for the same srcu_struct structure. However, the Linux kernel currently does not use SRCU in this manner, so remove the fastpath checks for simplicity. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c25
1 files changed, 1 insertions, 24 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 84c9b97dc3d9..17e95bcc901c 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -308,7 +308,7 @@ static void flip_idx_and_wait(struct srcu_struct *sp, bool expedited)
308 */ 308 */
309static void __synchronize_srcu(struct srcu_struct *sp, bool expedited) 309static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
310{ 310{
311 int idx; 311 int idx = 0;
312 312
313 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && 313 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
314 !lock_is_held(&rcu_bh_lock_map) && 314 !lock_is_held(&rcu_bh_lock_map) &&
@@ -316,32 +316,9 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
316 !lock_is_held(&rcu_sched_lock_map), 316 !lock_is_held(&rcu_sched_lock_map),
317 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); 317 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
318 318
319 smp_mb(); /* Ensure prior action happens before grace period. */
320 idx = ACCESS_ONCE(sp->completed);
321 smp_mb(); /* Access to ->completed before lock acquisition. */
322 mutex_lock(&sp->mutex); 319 mutex_lock(&sp->mutex);
323 320
324 /* 321 /*
325 * Check to see if someone else did the work for us while we were
326 * waiting to acquire the lock. We need -three- advances of
327 * the counter, not just one. If there was but one, we might have
328 * shown up -after- our helper's first synchronize_sched(), thus
329 * having failed to prevent CPU-reordering races with concurrent
330 * srcu_read_unlock()s on other CPUs (see comment below). If there
331 * was only two, we are guaranteed to have waited through only one
332 * full index-flip phase. So we either (1) wait for three or
333 * (2) supply the additional ones we need.
334 */
335
336 if (sp->completed == idx + 2)
337 idx = 1;
338 else if (sp->completed == idx + 3) {
339 mutex_unlock(&sp->mutex);
340 return;
341 } else
342 idx = 0;
343
344 /*
345 * If there were no helpers, then we need to do two flips of 322 * If there were no helpers, then we need to do two flips of
346 * the index. The first flip is required if there are any 323 * the index. The first flip is required if there are any
347 * outstanding SRCU readers even if there are no new readers 324 * outstanding SRCU readers even if there are no new readers