diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-11-09 22:10:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-09 22:10:35 -0500 |
commit | 7e1a2766e67a529f62c8cfba0a47d63fc4f7fa8a (patch) | |
tree | 197950369a773afdf04af9bdfc4a2ce1d2b5d3af /kernel/rcutree.c | |
parent | c5e0cb3ddc5f14cedcfc50c0fb3b5fc6b56576da (diff) | |
parent | 83f5b01ffbbaea6f97c9a79d21e240dbfb69f2f1 (diff) |
Merge branch 'core/urgent' into core/rcu
Merge reason: Pick up RCU fixlet to base further commits on.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 31 |
1 files changed, 25 insertions, 6 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 055f1a941a9e..69f4efec3449 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -61,7 +61,7 @@ static struct lock_class_key rcu_root_class; | |||
61 | NUM_RCU_LVL_2, \ | 61 | NUM_RCU_LVL_2, \ |
62 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 62 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ |
63 | }, \ | 63 | }, \ |
64 | .signaled = RCU_SIGNAL_INIT, \ | 64 | .signaled = RCU_GP_IDLE, \ |
65 | .gpnum = -300, \ | 65 | .gpnum = -300, \ |
66 | .completed = -300, \ | 66 | .completed = -300, \ |
67 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 67 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ |
@@ -659,14 +659,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
659 | * irqs disabled. | 659 | * irqs disabled. |
660 | */ | 660 | */ |
661 | rcu_for_each_node_breadth_first(rsp, rnp) { | 661 | rcu_for_each_node_breadth_first(rsp, rnp) { |
662 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 662 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
663 | rcu_preempt_check_blocked_tasks(rnp); | 663 | rcu_preempt_check_blocked_tasks(rnp); |
664 | rnp->qsmask = rnp->qsmaskinit; | 664 | rnp->qsmask = rnp->qsmaskinit; |
665 | rnp->gpnum = rsp->gpnum; | 665 | rnp->gpnum = rsp->gpnum; |
666 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 666 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
667 | } | 667 | } |
668 | 668 | ||
669 | rnp = rcu_get_root(rsp); | ||
670 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
669 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | 671 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
672 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
670 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 673 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
671 | } | 674 | } |
672 | 675 | ||
@@ -708,6 +711,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | |||
708 | { | 711 | { |
709 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 712 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
710 | rsp->completed = rsp->gpnum; | 713 | rsp->completed = rsp->gpnum; |
714 | rsp->signaled = RCU_GP_IDLE; | ||
711 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | 715 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); |
712 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 716 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
713 | } | 717 | } |
@@ -915,7 +919,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
915 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 919 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
916 | break; | 920 | break; |
917 | } | 921 | } |
918 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 922 | |
923 | /* | ||
924 | * If there was a task blocking the current grace period, | ||
925 | * and if all CPUs have checked in, we need to propagate | ||
926 | * the quiescent state up the rcu_node hierarchy. But that | ||
927 | * is inconvenient at the moment due to deadlock issues if | ||
928 | * this should end the current grace period. So set the | ||
929 | * offlined CPU's bit in ->qsmask in order to force the | ||
930 | * next force_quiescent_state() invocation to clean up this | ||
931 | * mess in a deadlock-free manner. | ||
932 | */ | ||
933 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
934 | rnp->qsmask |= mask; | ||
935 | |||
919 | mask = rnp->grpmask; | 936 | mask = rnp->grpmask; |
920 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 937 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
921 | rnp = rnp->parent; | 938 | rnp = rnp->parent; |
@@ -1151,9 +1168,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1151 | } | 1168 | } |
1152 | spin_unlock(&rnp->lock); | 1169 | spin_unlock(&rnp->lock); |
1153 | switch (signaled) { | 1170 | switch (signaled) { |
1171 | case RCU_GP_IDLE: | ||
1154 | case RCU_GP_INIT: | 1172 | case RCU_GP_INIT: |
1155 | 1173 | ||
1156 | break; /* grace period still initializing, ignore. */ | 1174 | break; /* grace period idle or initializing, ignore. */ |
1157 | 1175 | ||
1158 | case RCU_SAVE_DYNTICK: | 1176 | case RCU_SAVE_DYNTICK: |
1159 | 1177 | ||
@@ -1167,7 +1185,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1167 | 1185 | ||
1168 | /* Update state, record completion counter. */ | 1186 | /* Update state, record completion counter. */ |
1169 | spin_lock(&rnp->lock); | 1187 | spin_lock(&rnp->lock); |
1170 | if (lastcomp == rsp->completed) { | 1188 | if (lastcomp == rsp->completed && |
1189 | rsp->signaled == RCU_SAVE_DYNTICK) { | ||
1171 | rsp->signaled = RCU_FORCE_QS; | 1190 | rsp->signaled = RCU_FORCE_QS; |
1172 | dyntick_record_completed(rsp, lastcomp); | 1191 | dyntick_record_completed(rsp, lastcomp); |
1173 | } | 1192 | } |