diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-01-24 01:29:37 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-03-12 18:19:37 -0400 |
commit | c199068913c9c5cbb5498e289bb387703e087ea8 (patch) | |
tree | a2d6fa7368bd2b0ef71a600586db5a761d6e533d | |
parent | 0aa04b055e71bd3b8040dd71a126126c66b6f01e (diff) |
rcu: Eliminate ->onoff_mutex from rcu_node structure
Because that RCU grace-period initialization need no longer exclude
CPU-hotplug operations, this commit eliminates the ->onoff_mutex and
its uses.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | kernel/rcu/tree.c | 15 | ||||
-rw-r--r-- | kernel/rcu/tree.h | 2 |
2 files changed, 0 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f0f4d3510d24..79d53399247e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -101,7 +101,6 @@ struct rcu_state sname##_state = { \ | |||
101 | .orphan_nxttail = &sname##_state.orphan_nxtlist, \ | 101 | .orphan_nxttail = &sname##_state.orphan_nxtlist, \ |
102 | .orphan_donetail = &sname##_state.orphan_donelist, \ | 102 | .orphan_donetail = &sname##_state.orphan_donelist, \ |
103 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ | 103 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ |
104 | .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \ | ||
105 | .name = RCU_STATE_NAME(sname), \ | 104 | .name = RCU_STATE_NAME(sname), \ |
106 | .abbr = sabbr, \ | 105 | .abbr = sabbr, \ |
107 | }; \ | 106 | }; \ |
@@ -1754,10 +1753,6 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1754 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); | 1753 | trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); |
1755 | raw_spin_unlock_irq(&rnp->lock); | 1754 | raw_spin_unlock_irq(&rnp->lock); |
1756 | 1755 | ||
1757 | /* Exclude any concurrent CPU-hotplug operations. */ | ||
1758 | mutex_lock(&rsp->onoff_mutex); | ||
1759 | smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */ | ||
1760 | |||
1761 | /* | 1756 | /* |
1762 | * Apply per-leaf buffered online and offline operations to the | 1757 | * Apply per-leaf buffered online and offline operations to the |
1763 | * rcu_node tree. Note that this new grace period need not wait | 1758 | * rcu_node tree. Note that this new grace period need not wait |
@@ -1844,7 +1839,6 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1844 | schedule_timeout_uninterruptible(gp_init_delay); | 1839 | schedule_timeout_uninterruptible(gp_init_delay); |
1845 | } | 1840 | } |
1846 | 1841 | ||
1847 | mutex_unlock(&rsp->onoff_mutex); | ||
1848 | return 1; | 1842 | return 1; |
1849 | } | 1843 | } |
1850 | 1844 | ||
@@ -2498,9 +2492,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
2498 | /* Adjust any no-longer-needed kthreads. */ | 2492 | /* Adjust any no-longer-needed kthreads. */ |
2499 | rcu_boost_kthread_setaffinity(rnp, -1); | 2493 | rcu_boost_kthread_setaffinity(rnp, -1); |
2500 | 2494 | ||
2501 | /* Exclude any attempts to start a new grace period. */ | ||
2502 | mutex_lock(&rsp->onoff_mutex); | ||
2503 | |||
2504 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ | 2495 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ |
2505 | raw_spin_lock_irqsave(&rsp->orphan_lock, flags); | 2496 | raw_spin_lock_irqsave(&rsp->orphan_lock, flags); |
2506 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); | 2497 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); |
@@ -2517,7 +2508,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
2517 | WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, | 2508 | WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, |
2518 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", | 2509 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", |
2519 | cpu, rdp->qlen, rdp->nxtlist); | 2510 | cpu, rdp->qlen, rdp->nxtlist); |
2520 | mutex_unlock(&rsp->onoff_mutex); | ||
2521 | } | 2511 | } |
2522 | 2512 | ||
2523 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 2513 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
@@ -3700,9 +3690,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3700 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 3690 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
3701 | struct rcu_node *rnp = rcu_get_root(rsp); | 3691 | struct rcu_node *rnp = rcu_get_root(rsp); |
3702 | 3692 | ||
3703 | /* Exclude new grace periods. */ | ||
3704 | mutex_lock(&rsp->onoff_mutex); | ||
3705 | |||
3706 | /* Set up local state, ensuring consistent view of global state. */ | 3693 | /* Set up local state, ensuring consistent view of global state. */ |
3707 | raw_spin_lock_irqsave(&rnp->lock, flags); | 3694 | raw_spin_lock_irqsave(&rnp->lock, flags); |
3708 | rdp->beenonline = 1; /* We have now been online. */ | 3695 | rdp->beenonline = 1; /* We have now been online. */ |
@@ -3733,8 +3720,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3733 | rdp->qs_pending = false; | 3720 | rdp->qs_pending = false; |
3734 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); | 3721 | trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); |
3735 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 3722 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
3736 | |||
3737 | mutex_unlock(&rsp->onoff_mutex); | ||
3738 | } | 3723 | } |
3739 | 3724 | ||
3740 | static void rcu_prepare_cpu(int cpu) | 3725 | static void rcu_prepare_cpu(int cpu) |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index aa42562ff5b2..a69d3dab2ec4 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -456,8 +456,6 @@ struct rcu_state { | |||
456 | long qlen; /* Total number of callbacks. */ | 456 | long qlen; /* Total number of callbacks. */ |
457 | /* End of fields guarded by orphan_lock. */ | 457 | /* End of fields guarded by orphan_lock. */ |
458 | 458 | ||
459 | struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */ | ||
460 | |||
461 | struct mutex barrier_mutex; /* Guards barrier fields. */ | 459 | struct mutex barrier_mutex; /* Guards barrier fields. */ |
462 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ | 460 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
463 | struct completion barrier_completion; /* Wake at barrier end. */ | 461 | struct completion barrier_completion; /* Wake at barrier end. */ |