diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 44 |
1 files changed, 38 insertions, 6 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 705f02ac7433..0536125b0497 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -913,7 +913,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
914 | break; | 914 | break; |
915 | } | 915 | } |
916 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 916 | |
917 | /* | ||
918 | * If there was a task blocking the current grace period, | ||
919 | * and if all CPUs have checked in, we need to propagate | ||
920 | * the quiescent state up the rcu_node hierarchy. But that | ||
921 | * is inconvenient at the moment due to deadlock issues if | ||
922 | * this should end the current grace period. So set the | ||
923 | * offlined CPU's bit in ->qsmask in order to force the | ||
924 | * next force_quiescent_state() invocation to clean up this | ||
925 | * mess in a deadlock-free manner. | ||
926 | */ | ||
927 | if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) | ||
928 | rnp->qsmask |= mask; | ||
929 | |||
917 | mask = rnp->grpmask; | 930 | mask = rnp->grpmask; |
918 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 931 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
919 | rnp = rnp->parent; | 932 | rnp = rnp->parent; |
@@ -958,7 +971,7 @@ static void rcu_offline_cpu(int cpu) | |||
958 | * Invoke any RCU callbacks that have made it to the end of their grace | 971 | * Invoke any RCU callbacks that have made it to the end of their grace |
959 | * period. Thottle as specified by rdp->blimit. | 972 | * period. Thottle as specified by rdp->blimit. |
960 | */ | 973 | */ |
961 | static void rcu_do_batch(struct rcu_data *rdp) | 974 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
962 | { | 975 | { |
963 | unsigned long flags; | 976 | unsigned long flags; |
964 | struct rcu_head *next, *list, **tail; | 977 | struct rcu_head *next, *list, **tail; |
@@ -1011,6 +1024,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
1011 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1024 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
1012 | rdp->blimit = blimit; | 1025 | rdp->blimit = blimit; |
1013 | 1026 | ||
1027 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1028 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1029 | rdp->qlen_last_fqs_check = 0; | ||
1030 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1031 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1032 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1033 | |||
1014 | local_irq_restore(flags); | 1034 | local_irq_restore(flags); |
1015 | 1035 | ||
1016 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1036 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -1224,7 +1244,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1224 | } | 1244 | } |
1225 | 1245 | ||
1226 | /* If there are callbacks ready, invoke them. */ | 1246 | /* If there are callbacks ready, invoke them. */ |
1227 | rcu_do_batch(rdp); | 1247 | rcu_do_batch(rsp, rdp); |
1228 | } | 1248 | } |
1229 | 1249 | ||
1230 | /* | 1250 | /* |
@@ -1288,10 +1308,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1288 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1308 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1289 | } | 1309 | } |
1290 | 1310 | ||
1291 | /* Force the grace period if too many callbacks or too long waiting. */ | 1311 | /* |
1292 | if (unlikely(++rdp->qlen > qhimark)) { | 1312 | * Force the grace period if too many callbacks or too long waiting. |
1313 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1314 | * if some other CPU has recently done so. Also, don't bother | ||
1315 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1316 | * is the only one waiting for a grace period to complete. | ||
1317 | */ | ||
1318 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1293 | rdp->blimit = LONG_MAX; | 1319 | rdp->blimit = LONG_MAX; |
1294 | force_quiescent_state(rsp, 0); | 1320 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1321 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1322 | force_quiescent_state(rsp, 0); | ||
1323 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1324 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1295 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | 1325 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1296 | force_quiescent_state(rsp, 1); | 1326 | force_quiescent_state(rsp, 1); |
1297 | local_irq_restore(flags); | 1327 | local_irq_restore(flags); |
@@ -1523,6 +1553,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1523 | rdp->beenonline = 1; /* We have now been online. */ | 1553 | rdp->beenonline = 1; /* We have now been online. */ |
1524 | rdp->preemptable = preemptable; | 1554 | rdp->preemptable = preemptable; |
1525 | rdp->passed_quiesc_completed = lastcomp - 1; | 1555 | rdp->passed_quiesc_completed = lastcomp - 1; |
1556 | rdp->qlen_last_fqs_check = 0; | ||
1557 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1526 | rdp->blimit = blimit; | 1558 | rdp->blimit = blimit; |
1527 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1559 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1528 | 1560 | ||