aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree_plugin.h30
1 files changed, 22 insertions, 8 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index cb5879386a02..b4e8eb24a5f1 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -800,33 +800,47 @@ void synchronize_rcu_expedited(void)
800 smp_mb(); /* Above access cannot bleed into critical section. */ 800 smp_mb(); /* Above access cannot bleed into critical section. */
801 801
802 /* 802 /*
803 * Block CPU-hotplug operations. This means that any CPU-hotplug
804 * operation that finds an rcu_node structure with tasks in the
805 * process of being boosted will know that all tasks blocking
806 * this expedited grace period will already be in the process of
807 * being boosted. This simplifies the process of moving tasks
808 * from leaf to root rcu_node structures.
809 */
810 get_online_cpus();
811
812 /*
803 * Acquire lock, falling back to synchronize_rcu() if too many 813 * Acquire lock, falling back to synchronize_rcu() if too many
804 * lock-acquisition failures. Of course, if someone does the 814 * lock-acquisition failures. Of course, if someone does the
805 * expedited grace period for us, just leave. 815 * expedited grace period for us, just leave.
806 */ 816 */
807 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { 817 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
818 if (ULONG_CMP_LT(snap,
819 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
820 put_online_cpus();
821 goto mb_ret; /* Others did our work for us. */
822 }
808 if (trycount++ < 10) { 823 if (trycount++ < 10) {
809 udelay(trycount * num_online_cpus()); 824 udelay(trycount * num_online_cpus());
810 } else { 825 } else {
826 put_online_cpus();
811 synchronize_rcu(); 827 synchronize_rcu();
812 return; 828 return;
813 } 829 }
814 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
815 goto mb_ret; /* Others did our work for us. */
816 } 830 }
817 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) 831 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
832 put_online_cpus();
818 goto unlock_mb_ret; /* Others did our work for us. */ 833 goto unlock_mb_ret; /* Others did our work for us. */
834 }
819 835
820 /* force all RCU readers onto ->blkd_tasks lists. */ 836 /* force all RCU readers onto ->blkd_tasks lists. */
821 synchronize_sched_expedited(); 837 synchronize_sched_expedited();
822 838
823 raw_spin_lock_irqsave(&rsp->onofflock, flags);
824
825 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 839 /* Initialize ->expmask for all non-leaf rcu_node structures. */
826 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 840 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
827 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 841 raw_spin_lock_irqsave(&rnp->lock, flags);
828 rnp->expmask = rnp->qsmaskinit; 842 rnp->expmask = rnp->qsmaskinit;
829 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 843 raw_spin_unlock_irqrestore(&rnp->lock, flags);
830 } 844 }
831 845
832 /* Snapshot current state of ->blkd_tasks lists. */ 846 /* Snapshot current state of ->blkd_tasks lists. */
@@ -835,7 +849,7 @@ void synchronize_rcu_expedited(void)
835 if (NUM_RCU_NODES > 1) 849 if (NUM_RCU_NODES > 1)
836 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 850 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
837 851
838 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 852 put_online_cpus();
839 853
840 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 854 /* Wait for snapshotted ->blkd_tasks lists to drain. */
841 rnp = rcu_get_root(rsp); 855 rnp = rcu_get_root(rsp);