diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 114 |
1 files changed, 86 insertions, 28 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 121f833acd04..9180158756d2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -207,6 +207,19 @@ static int rcu_gp_in_progress(void) | |||
207 | return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); | 207 | return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); |
208 | } | 208 | } |
209 | 209 | ||
210 | /* | ||
211 | * Return the number of callbacks queued on the specified CPU. | ||
212 | * Handles both the nocbs and normal cases. | ||
213 | */ | ||
214 | static long rcu_get_n_cbs_cpu(int cpu) | ||
215 | { | ||
216 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); | ||
217 | |||
218 | if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */ | ||
219 | return rcu_segcblist_n_cbs(&rdp->cblist); | ||
220 | return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */ | ||
221 | } | ||
222 | |||
210 | void rcu_softirq_qs(void) | 223 | void rcu_softirq_qs(void) |
211 | { | 224 | { |
212 | rcu_qs(); | 225 | rcu_qs(); |
@@ -500,16 +513,29 @@ void rcu_force_quiescent_state(void) | |||
500 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 513 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
501 | 514 | ||
502 | /* | 515 | /* |
516 | * Convert a ->gp_state value to a character string. | ||
517 | */ | ||
518 | static const char *gp_state_getname(short gs) | ||
519 | { | ||
520 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) | ||
521 | return "???"; | ||
522 | return gp_state_names[gs]; | ||
523 | } | ||
524 | |||
525 | /* | ||
503 | * Show the state of the grace-period kthreads. | 526 | * Show the state of the grace-period kthreads. |
504 | */ | 527 | */ |
505 | void show_rcu_gp_kthreads(void) | 528 | void show_rcu_gp_kthreads(void) |
506 | { | 529 | { |
507 | int cpu; | 530 | int cpu; |
531 | unsigned long j; | ||
508 | struct rcu_data *rdp; | 532 | struct rcu_data *rdp; |
509 | struct rcu_node *rnp; | 533 | struct rcu_node *rnp; |
510 | 534 | ||
511 | pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name, | 535 | j = jiffies - READ_ONCE(rcu_state.gp_activity); |
512 | rcu_state.gp_state, rcu_state.gp_kthread->state); | 536 | pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n", |
537 | rcu_state.name, gp_state_getname(rcu_state.gp_state), | ||
538 | rcu_state.gp_state, rcu_state.gp_kthread->state, j); | ||
513 | rcu_for_each_node_breadth_first(rnp) { | 539 | rcu_for_each_node_breadth_first(rnp) { |
514 | if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) | 540 | if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) |
515 | continue; | 541 | continue; |
@@ -891,12 +917,12 @@ void rcu_irq_enter_irqson(void) | |||
891 | } | 917 | } |
892 | 918 | ||
893 | /** | 919 | /** |
894 | * rcu_is_watching - see if RCU thinks that the current CPU is idle | 920 | * rcu_is_watching - see if RCU thinks that the current CPU is not idle |
895 | * | 921 | * |
896 | * Return true if RCU is watching the running CPU, which means that this | 922 | * Return true if RCU is watching the running CPU, which means that this |
897 | * CPU can safely enter RCU read-side critical sections. In other words, | 923 | * CPU can safely enter RCU read-side critical sections. In other words, |
898 | * if the current CPU is in its idle loop and is neither in an interrupt | 924 | * if the current CPU is not in its idle loop or is in an interrupt or |
899 | * or NMI handler, return true. | 925 | * NMI handler, return true. |
900 | */ | 926 | */ |
901 | bool notrace rcu_is_watching(void) | 927 | bool notrace rcu_is_watching(void) |
902 | { | 928 | { |
@@ -1143,16 +1169,6 @@ static void record_gp_stall_check_time(void) | |||
1143 | } | 1169 | } |
1144 | 1170 | ||
1145 | /* | 1171 | /* |
1146 | * Convert a ->gp_state value to a character string. | ||
1147 | */ | ||
1148 | static const char *gp_state_getname(short gs) | ||
1149 | { | ||
1150 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) | ||
1151 | return "???"; | ||
1152 | return gp_state_names[gs]; | ||
1153 | } | ||
1154 | |||
1155 | /* | ||
1156 | * Complain about starvation of grace-period kthread. | 1172 | * Complain about starvation of grace-period kthread. |
1157 | */ | 1173 | */ |
1158 | static void rcu_check_gp_kthread_starvation(void) | 1174 | static void rcu_check_gp_kthread_starvation(void) |
@@ -1262,8 +1278,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) | |||
1262 | 1278 | ||
1263 | print_cpu_stall_info_end(); | 1279 | print_cpu_stall_info_end(); |
1264 | for_each_possible_cpu(cpu) | 1280 | for_each_possible_cpu(cpu) |
1265 | totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, | 1281 | totqlen += rcu_get_n_cbs_cpu(cpu); |
1266 | cpu)->cblist); | ||
1267 | pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", | 1282 | pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
1268 | smp_processor_id(), (long)(jiffies - rcu_state.gp_start), | 1283 | smp_processor_id(), (long)(jiffies - rcu_state.gp_start), |
1269 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | 1284 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
@@ -1323,8 +1338,7 @@ static void print_cpu_stall(void) | |||
1323 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); | 1338 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); |
1324 | print_cpu_stall_info_end(); | 1339 | print_cpu_stall_info_end(); |
1325 | for_each_possible_cpu(cpu) | 1340 | for_each_possible_cpu(cpu) |
1326 | totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, | 1341 | totqlen += rcu_get_n_cbs_cpu(cpu); |
1327 | cpu)->cblist); | ||
1328 | pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", | 1342 | pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", |
1329 | jiffies - rcu_state.gp_start, | 1343 | jiffies - rcu_state.gp_start, |
1330 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | 1344 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
@@ -1986,7 +2000,8 @@ static void rcu_gp_cleanup(void) | |||
1986 | 2000 | ||
1987 | WRITE_ONCE(rcu_state.gp_activity, jiffies); | 2001 | WRITE_ONCE(rcu_state.gp_activity, jiffies); |
1988 | raw_spin_lock_irq_rcu_node(rnp); | 2002 | raw_spin_lock_irq_rcu_node(rnp); |
1989 | gp_duration = jiffies - rcu_state.gp_start; | 2003 | rcu_state.gp_end = jiffies; |
2004 | gp_duration = rcu_state.gp_end - rcu_state.gp_start; | ||
1990 | if (gp_duration > rcu_state.gp_max) | 2005 | if (gp_duration > rcu_state.gp_max) |
1991 | rcu_state.gp_max = gp_duration; | 2006 | rcu_state.gp_max = gp_duration; |
1992 | 2007 | ||
@@ -2032,9 +2047,9 @@ static void rcu_gp_cleanup(void) | |||
2032 | rnp = rcu_get_root(); | 2047 | rnp = rcu_get_root(); |
2033 | raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ | 2048 | raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ |
2034 | 2049 | ||
2035 | /* Declare grace period done. */ | 2050 | /* Declare grace period done, trace first to use old GP number. */ |
2036 | rcu_seq_end(&rcu_state.gp_seq); | ||
2037 | trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); | 2051 | trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); |
2052 | rcu_seq_end(&rcu_state.gp_seq); | ||
2038 | rcu_state.gp_state = RCU_GP_IDLE; | 2053 | rcu_state.gp_state = RCU_GP_IDLE; |
2039 | /* Check for GP requests since above loop. */ | 2054 | /* Check for GP requests since above loop. */ |
2040 | rdp = this_cpu_ptr(&rcu_data); | 2055 | rdp = this_cpu_ptr(&rcu_data); |
@@ -2600,10 +2615,10 @@ static void force_quiescent_state(void) | |||
2600 | * This function checks for grace-period requests that fail to motivate | 2615 | * This function checks for grace-period requests that fail to motivate |
2601 | * RCU to come out of its idle mode. | 2616 | * RCU to come out of its idle mode. |
2602 | */ | 2617 | */ |
2603 | static void | 2618 | void |
2604 | rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) | 2619 | rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
2620 | const unsigned long gpssdelay) | ||
2605 | { | 2621 | { |
2606 | const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ; | ||
2607 | unsigned long flags; | 2622 | unsigned long flags; |
2608 | unsigned long j; | 2623 | unsigned long j; |
2609 | struct rcu_node *rnp_root = rcu_get_root(); | 2624 | struct rcu_node *rnp_root = rcu_get_root(); |
@@ -2655,6 +2670,48 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) | |||
2655 | } | 2670 | } |
2656 | 2671 | ||
2657 | /* | 2672 | /* |
2673 | * Do a forward-progress check for rcutorture. This is normally invoked | ||
2674 | * due to an OOM event. The argument "j" gives the time period during | ||
2675 | * which rcutorture would like progress to have been made. | ||
2676 | */ | ||
2677 | void rcu_fwd_progress_check(unsigned long j) | ||
2678 | { | ||
2679 | unsigned long cbs; | ||
2680 | int cpu; | ||
2681 | unsigned long max_cbs = 0; | ||
2682 | int max_cpu = -1; | ||
2683 | struct rcu_data *rdp; | ||
2684 | |||
2685 | if (rcu_gp_in_progress()) { | ||
2686 | pr_info("%s: GP age %lu jiffies\n", | ||
2687 | __func__, jiffies - rcu_state.gp_start); | ||
2688 | show_rcu_gp_kthreads(); | ||
2689 | } else { | ||
2690 | pr_info("%s: Last GP end %lu jiffies ago\n", | ||
2691 | __func__, jiffies - rcu_state.gp_end); | ||
2692 | preempt_disable(); | ||
2693 | rdp = this_cpu_ptr(&rcu_data); | ||
2694 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); | ||
2695 | preempt_enable(); | ||
2696 | } | ||
2697 | for_each_possible_cpu(cpu) { | ||
2698 | cbs = rcu_get_n_cbs_cpu(cpu); | ||
2699 | if (!cbs) | ||
2700 | continue; | ||
2701 | if (max_cpu < 0) | ||
2702 | pr_info("%s: callbacks", __func__); | ||
2703 | pr_cont(" %d: %lu", cpu, cbs); | ||
2704 | if (cbs <= max_cbs) | ||
2705 | continue; | ||
2706 | max_cbs = cbs; | ||
2707 | max_cpu = cpu; | ||
2708 | } | ||
2709 | if (max_cpu >= 0) | ||
2710 | pr_cont("\n"); | ||
2711 | } | ||
2712 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); | ||
2713 | |||
2714 | /* | ||
2658 | * This does the RCU core processing work for the specified rcu_data | 2715 | * This does the RCU core processing work for the specified rcu_data |
2659 | * structures. This may be called only from the CPU to whom the rdp | 2716 | * structures. This may be called only from the CPU to whom the rdp |
2660 | * belongs. | 2717 | * belongs. |
@@ -2690,7 +2747,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused | |||
2690 | local_irq_restore(flags); | 2747 | local_irq_restore(flags); |
2691 | } | 2748 | } |
2692 | 2749 | ||
2693 | rcu_check_gp_start_stall(rnp, rdp); | 2750 | rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); |
2694 | 2751 | ||
2695 | /* If there are callbacks ready, invoke them. */ | 2752 | /* If there are callbacks ready, invoke them. */ |
2696 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) | 2753 | if (rcu_segcblist_ready_cbs(&rdp->cblist)) |
@@ -2826,7 +2883,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) | |||
2826 | * Very early boot, before rcu_init(). Initialize if needed | 2883 | * Very early boot, before rcu_init(). Initialize if needed |
2827 | * and then drop through to queue the callback. | 2884 | * and then drop through to queue the callback. |
2828 | */ | 2885 | */ |
2829 | BUG_ON(cpu != -1); | 2886 | WARN_ON_ONCE(cpu != -1); |
2830 | WARN_ON_ONCE(!rcu_is_watching()); | 2887 | WARN_ON_ONCE(!rcu_is_watching()); |
2831 | if (rcu_segcblist_empty(&rdp->cblist)) | 2888 | if (rcu_segcblist_empty(&rdp->cblist)) |
2832 | rcu_segcblist_init(&rdp->cblist); | 2889 | rcu_segcblist_init(&rdp->cblist); |
@@ -3485,7 +3542,8 @@ static int __init rcu_spawn_gp_kthread(void) | |||
3485 | 3542 | ||
3486 | rcu_scheduler_fully_active = 1; | 3543 | rcu_scheduler_fully_active = 1; |
3487 | t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); | 3544 | t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); |
3488 | BUG_ON(IS_ERR(t)); | 3545 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) |
3546 | return 0; | ||
3489 | rnp = rcu_get_root(); | 3547 | rnp = rcu_get_root(); |
3490 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 3548 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
3491 | rcu_state.gp_kthread = t; | 3549 | rcu_state.gp_kthread = t; |