aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:23:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:23:08 -0500
commit6ffbe7d1fabddc768724656f159759cae7818cd9 (patch)
treeece184db0c35bcd9606968303984b430c24b847f /kernel/rcu/tree.c
parent897aea303fec0c24b2a21b8e29f45dc73a234555 (diff)
parent63b1a81699c2a45c9f737419b1ec1da0ecf92812 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar: - futex performance increases: larger hashes, smarter wakeups - mutex debugging improvements - lots of SMP ordering documentation updates - introduce the smp_load_acquire(), smp_store_release() primitives. (There are WIP patches that make use of them - not yet merged) - lockdep micro-optimizations - lockdep improvement: better cover IRQ contexts - liblockdep at last. We'll continue to monitor how useful this is * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits) futexes: Fix futex_hashsize initialization arch: Re-sort some Kbuild files to hopefully help avoid some conflicts futexes: Avoid taking the hb->lock if there's nothing to wake up futexes: Document multiprocessor ordering guarantees futexes: Increase hash table size for better performance futexes: Clean up various details arch: Introduce smp_load_acquire(), smp_store_release() arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h arch: Move smp_mb__{before,after}_atomic_{inc,dec}.h into asm/atomic.h locking/doc: Rename LOCK/UNLOCK to ACQUIRE/RELEASE mutexes: Give more informative mutex warning in the !lock->owner case powerpc: Full barrier for smp_mb__after_unlock_lock() rcu: Apply smp_mb__after_unlock_lock() to preserve grace periods Documentation/memory-barriers.txt: Downgrade UNLOCK+BLOCK locking: Add an smp_mb__after_unlock_lock() for UNLOCK+BLOCK barrier Documentation/memory-barriers.txt: Document ACCESS_ONCE() Documentation/memory-barriers.txt: Prohibit speculative writes Documentation/memory-barriers.txt: Add long atomic examples to memory-barriers.txt Documentation/memory-barriers.txt: Add needed ACCESS_ONCE() calls to memory-barriers.txt Revert "smp/cpumask: Make CONFIG_CPUMASK_OFFSTACK=y usable without debug dependency" ...
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index dd081987a8ec..a6205a05b5e4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1133,8 +1133,10 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1133 * hold it, acquire the root rcu_node structure's lock in order to 1133 * hold it, acquire the root rcu_node structure's lock in order to
1134 * start one (if needed). 1134 * start one (if needed).
1135 */ 1135 */
1136 if (rnp != rnp_root) 1136 if (rnp != rnp_root) {
1137 raw_spin_lock(&rnp_root->lock); 1137 raw_spin_lock(&rnp_root->lock);
1138 smp_mb__after_unlock_lock();
1139 }
1138 1140
1139 /* 1141 /*
1140 * Get a new grace-period number. If there really is no grace 1142 * Get a new grace-period number. If there really is no grace
@@ -1354,6 +1356,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1354 local_irq_restore(flags); 1356 local_irq_restore(flags);
1355 return; 1357 return;
1356 } 1358 }
1359 smp_mb__after_unlock_lock();
1357 __note_gp_changes(rsp, rnp, rdp); 1360 __note_gp_changes(rsp, rnp, rdp);
1358 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1361 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1359} 1362}
@@ -1368,6 +1371,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1368 1371
1369 rcu_bind_gp_kthread(); 1372 rcu_bind_gp_kthread();
1370 raw_spin_lock_irq(&rnp->lock); 1373 raw_spin_lock_irq(&rnp->lock);
1374 smp_mb__after_unlock_lock();
1371 if (rsp->gp_flags == 0) { 1375 if (rsp->gp_flags == 0) {
1372 /* Spurious wakeup, tell caller to go back to sleep. */ 1376 /* Spurious wakeup, tell caller to go back to sleep. */
1373 raw_spin_unlock_irq(&rnp->lock); 1377 raw_spin_unlock_irq(&rnp->lock);
@@ -1409,6 +1413,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1409 */ 1413 */
1410 rcu_for_each_node_breadth_first(rsp, rnp) { 1414 rcu_for_each_node_breadth_first(rsp, rnp) {
1411 raw_spin_lock_irq(&rnp->lock); 1415 raw_spin_lock_irq(&rnp->lock);
1416 smp_mb__after_unlock_lock();
1412 rdp = this_cpu_ptr(rsp->rda); 1417 rdp = this_cpu_ptr(rsp->rda);
1413 rcu_preempt_check_blocked_tasks(rnp); 1418 rcu_preempt_check_blocked_tasks(rnp);
1414 rnp->qsmask = rnp->qsmaskinit; 1419 rnp->qsmask = rnp->qsmaskinit;
@@ -1463,6 +1468,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1463 /* Clear flag to prevent immediate re-entry. */ 1468 /* Clear flag to prevent immediate re-entry. */
1464 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 1469 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1465 raw_spin_lock_irq(&rnp->lock); 1470 raw_spin_lock_irq(&rnp->lock);
1471 smp_mb__after_unlock_lock();
1466 rsp->gp_flags &= ~RCU_GP_FLAG_FQS; 1472 rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1467 raw_spin_unlock_irq(&rnp->lock); 1473 raw_spin_unlock_irq(&rnp->lock);
1468 } 1474 }
@@ -1480,6 +1486,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1480 struct rcu_node *rnp = rcu_get_root(rsp); 1486 struct rcu_node *rnp = rcu_get_root(rsp);
1481 1487
1482 raw_spin_lock_irq(&rnp->lock); 1488 raw_spin_lock_irq(&rnp->lock);
1489 smp_mb__after_unlock_lock();
1483 gp_duration = jiffies - rsp->gp_start; 1490 gp_duration = jiffies - rsp->gp_start;
1484 if (gp_duration > rsp->gp_max) 1491 if (gp_duration > rsp->gp_max)
1485 rsp->gp_max = gp_duration; 1492 rsp->gp_max = gp_duration;
@@ -1505,6 +1512,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1505 */ 1512 */
1506 rcu_for_each_node_breadth_first(rsp, rnp) { 1513 rcu_for_each_node_breadth_first(rsp, rnp) {
1507 raw_spin_lock_irq(&rnp->lock); 1514 raw_spin_lock_irq(&rnp->lock);
1515 smp_mb__after_unlock_lock();
1508 ACCESS_ONCE(rnp->completed) = rsp->gpnum; 1516 ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1509 rdp = this_cpu_ptr(rsp->rda); 1517 rdp = this_cpu_ptr(rsp->rda);
1510 if (rnp == rdp->mynode) 1518 if (rnp == rdp->mynode)
@@ -1515,6 +1523,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1515 } 1523 }
1516 rnp = rcu_get_root(rsp); 1524 rnp = rcu_get_root(rsp);
1517 raw_spin_lock_irq(&rnp->lock); 1525 raw_spin_lock_irq(&rnp->lock);
1526 smp_mb__after_unlock_lock();
1518 rcu_nocb_gp_set(rnp, nocb); 1527 rcu_nocb_gp_set(rnp, nocb);
1519 1528
1520 rsp->completed = rsp->gpnum; /* Declare grace period done. */ 1529 rsp->completed = rsp->gpnum; /* Declare grace period done. */
@@ -1749,6 +1758,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1749 rnp_c = rnp; 1758 rnp_c = rnp;
1750 rnp = rnp->parent; 1759 rnp = rnp->parent;
1751 raw_spin_lock_irqsave(&rnp->lock, flags); 1760 raw_spin_lock_irqsave(&rnp->lock, flags);
1761 smp_mb__after_unlock_lock();
1752 WARN_ON_ONCE(rnp_c->qsmask); 1762 WARN_ON_ONCE(rnp_c->qsmask);
1753 } 1763 }
1754 1764
@@ -1778,6 +1788,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1778 1788
1779 rnp = rdp->mynode; 1789 rnp = rdp->mynode;
1780 raw_spin_lock_irqsave(&rnp->lock, flags); 1790 raw_spin_lock_irqsave(&rnp->lock, flags);
1791 smp_mb__after_unlock_lock();
1781 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || 1792 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
1782 rnp->completed == rnp->gpnum) { 1793 rnp->completed == rnp->gpnum) {
1783 1794
@@ -1992,6 +2003,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1992 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2003 mask = rdp->grpmask; /* rnp->grplo is constant. */
1993 do { 2004 do {
1994 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2005 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2006 smp_mb__after_unlock_lock();
1995 rnp->qsmaskinit &= ~mask; 2007 rnp->qsmaskinit &= ~mask;
1996 if (rnp->qsmaskinit != 0) { 2008 if (rnp->qsmaskinit != 0) {
1997 if (rnp != rdp->mynode) 2009 if (rnp != rdp->mynode)
@@ -2202,6 +2214,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2202 cond_resched(); 2214 cond_resched();
2203 mask = 0; 2215 mask = 0;
2204 raw_spin_lock_irqsave(&rnp->lock, flags); 2216 raw_spin_lock_irqsave(&rnp->lock, flags);
2217 smp_mb__after_unlock_lock();
2205 if (!rcu_gp_in_progress(rsp)) { 2218 if (!rcu_gp_in_progress(rsp)) {
2206 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2219 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2207 return; 2220 return;
@@ -2231,6 +2244,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
2231 rnp = rcu_get_root(rsp); 2244 rnp = rcu_get_root(rsp);
2232 if (rnp->qsmask == 0) { 2245 if (rnp->qsmask == 0) {
2233 raw_spin_lock_irqsave(&rnp->lock, flags); 2246 raw_spin_lock_irqsave(&rnp->lock, flags);
2247 smp_mb__after_unlock_lock();
2234 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ 2248 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2235 } 2249 }
2236} 2250}
@@ -2263,6 +2277,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
2263 2277
2264 /* Reached the root of the rcu_node tree, acquire lock. */ 2278 /* Reached the root of the rcu_node tree, acquire lock. */
2265 raw_spin_lock_irqsave(&rnp_old->lock, flags); 2279 raw_spin_lock_irqsave(&rnp_old->lock, flags);
2280 smp_mb__after_unlock_lock();
2266 raw_spin_unlock(&rnp_old->fqslock); 2281 raw_spin_unlock(&rnp_old->fqslock);
2267 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2282 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2268 rsp->n_force_qs_lh++; 2283 rsp->n_force_qs_lh++;
@@ -2378,6 +2393,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2378 struct rcu_node *rnp_root = rcu_get_root(rsp); 2393 struct rcu_node *rnp_root = rcu_get_root(rsp);
2379 2394
2380 raw_spin_lock(&rnp_root->lock); 2395 raw_spin_lock(&rnp_root->lock);
2396 smp_mb__after_unlock_lock();
2381 rcu_start_gp(rsp); 2397 rcu_start_gp(rsp);
2382 raw_spin_unlock(&rnp_root->lock); 2398 raw_spin_unlock(&rnp_root->lock);
2383 } else { 2399 } else {