diff options
| -rw-r--r-- | kernel/rcu/tree.c | 12 | ||||
| -rw-r--r-- | kernel/rcu/tree.h | 12 | ||||
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 25 |
3 files changed, 39 insertions, 10 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5f1a11f1f7bc..a2503ef1bbe2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -1954,7 +1954,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) | |||
| 1954 | rcu_gp_slow(rsp, gp_init_delay); | 1954 | rcu_gp_slow(rsp, gp_init_delay); |
| 1955 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | 1955 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 1956 | rdp = this_cpu_ptr(rsp->rda); | 1956 | rdp = this_cpu_ptr(rsp->rda); |
| 1957 | rcu_preempt_check_blocked_tasks(rnp); | 1957 | rcu_preempt_check_blocked_tasks(rsp, rnp); |
| 1958 | rnp->qsmask = rnp->qsmaskinit; | 1958 | rnp->qsmask = rnp->qsmaskinit; |
| 1959 | WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); | 1959 | WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); |
| 1960 | if (rnp == rdp->mynode) | 1960 | if (rnp == rdp->mynode) |
| @@ -2063,7 +2063,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) | |||
| 2063 | rcu_for_each_node_breadth_first(rsp, rnp) { | 2063 | rcu_for_each_node_breadth_first(rsp, rnp) { |
| 2064 | raw_spin_lock_irq_rcu_node(rnp); | 2064 | raw_spin_lock_irq_rcu_node(rnp); |
| 2065 | if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) | 2065 | if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) |
| 2066 | dump_blkd_tasks(rnp, 10); | 2066 | dump_blkd_tasks(rsp, rnp, 10); |
| 2067 | WARN_ON_ONCE(rnp->qsmask); | 2067 | WARN_ON_ONCE(rnp->qsmask); |
| 2068 | WRITE_ONCE(rnp->gp_seq, new_gp_seq); | 2068 | WRITE_ONCE(rnp->gp_seq, new_gp_seq); |
| 2069 | rdp = this_cpu_ptr(rsp->rda); | 2069 | rdp = this_cpu_ptr(rsp->rda); |
| @@ -3516,6 +3516,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 3516 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 3516 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
| 3517 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); | 3517 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); |
| 3518 | WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); | 3518 | WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); |
| 3519 | rdp->rcu_ofl_gp_seq = rsp->gp_seq; | ||
| 3520 | rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; | ||
| 3521 | rdp->rcu_onl_gp_seq = rsp->gp_seq; | ||
| 3522 | rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; | ||
| 3519 | rdp->cpu = cpu; | 3523 | rdp->cpu = cpu; |
| 3520 | rdp->rsp = rsp; | 3524 | rdp->rsp = rsp; |
| 3521 | rcu_boot_init_nocb_percpu_data(rdp); | 3525 | rcu_boot_init_nocb_percpu_data(rdp); |
| @@ -3711,6 +3715,8 @@ void rcu_cpu_starting(unsigned int cpu) | |||
| 3711 | /* Allow lockless access for expedited grace periods. */ | 3715 | /* Allow lockless access for expedited grace periods. */ |
| 3712 | smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ | 3716 | smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ |
| 3713 | rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ | 3717 | rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ |
| 3718 | rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq); | ||
| 3719 | rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags); | ||
| 3714 | if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ | 3720 | if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ |
| 3715 | /* Report QS -after- changing ->qsmaskinitnext! */ | 3721 | /* Report QS -after- changing ->qsmaskinitnext! */ |
| 3716 | rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); | 3722 | rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); |
| @@ -3738,6 +3744,8 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) | |||
| 3738 | mask = rdp->grpmask; | 3744 | mask = rdp->grpmask; |
| 3739 | spin_lock(&rsp->ofl_lock); | 3745 | spin_lock(&rsp->ofl_lock); |
| 3740 | raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ | 3746 | raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ |
| 3747 | rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq); | ||
| 3748 | rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags); | ||
| 3741 | if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ | 3749 | if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ |
| 3742 | /* Report quiescent state -before- changing ->qsmaskinitnext! */ | 3750 | /* Report quiescent state -before- changing ->qsmaskinitnext! */ |
| 3743 | rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); | 3751 | rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 795d469c6f67..f52bc059bfec 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
| @@ -255,12 +255,16 @@ struct rcu_data { | |||
| 255 | /* Leader CPU takes GP-end wakeups. */ | 255 | /* Leader CPU takes GP-end wakeups. */ |
| 256 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | 256 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 257 | 257 | ||
| 258 | /* 7) RCU CPU stall data. */ | 258 | /* 7) Diagnostic data, including RCU CPU stall warnings. */ |
| 259 | unsigned int softirq_snap; /* Snapshot of softirq activity. */ | 259 | unsigned int softirq_snap; /* Snapshot of softirq activity. */ |
| 260 | /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ | 260 | /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ |
| 261 | struct irq_work rcu_iw; /* Check for non-irq activity. */ | 261 | struct irq_work rcu_iw; /* Check for non-irq activity. */ |
| 262 | bool rcu_iw_pending; /* Is ->rcu_iw pending? */ | 262 | bool rcu_iw_pending; /* Is ->rcu_iw pending? */ |
| 263 | unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ | 263 | unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ |
| 264 | unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */ | ||
| 265 | short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */ | ||
| 266 | unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ | ||
| 267 | short rcu_onl_gp_flags; /* ->gp_flags at last online. */ | ||
| 264 | 268 | ||
| 265 | int cpu; | 269 | int cpu; |
| 266 | struct rcu_state *rsp; | 270 | struct rcu_state *rsp; |
| @@ -431,11 +435,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp); | |||
| 431 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 435 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
| 432 | static int rcu_print_task_stall(struct rcu_node *rnp); | 436 | static int rcu_print_task_stall(struct rcu_node *rnp); |
| 433 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); | 437 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); |
| 434 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 438 | static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, |
| 439 | struct rcu_node *rnp); | ||
| 435 | static void rcu_preempt_check_callbacks(void); | 440 | static void rcu_preempt_check_callbacks(void); |
| 436 | void call_rcu(struct rcu_head *head, rcu_callback_t func); | 441 | void call_rcu(struct rcu_head *head, rcu_callback_t func); |
| 437 | static void __init __rcu_init_preempt(void); | 442 | static void __init __rcu_init_preempt(void); |
| 438 | static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); | 443 | static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, |
| 444 | int ncheck); | ||
| 439 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 445 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
| 440 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 446 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
| 441 | static void invoke_rcu_callbacks_kthread(void); | 447 | static void invoke_rcu_callbacks_kthread(void); |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index f45ff97b0d51..613372246a07 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
| @@ -699,13 +699,14 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) | |||
| 699 | * Also, if there are blocked tasks on the list, they automatically | 699 | * Also, if there are blocked tasks on the list, they automatically |
| 700 | * block the newly created grace period, so set up ->gp_tasks accordingly. | 700 | * block the newly created grace period, so set up ->gp_tasks accordingly. |
| 701 | */ | 701 | */ |
| 702 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 702 | static void |
| 703 | rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) | ||
| 703 | { | 704 | { |
| 704 | struct task_struct *t; | 705 | struct task_struct *t; |
| 705 | 706 | ||
| 706 | RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); | 707 | RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); |
| 707 | if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) | 708 | if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) |
| 708 | dump_blkd_tasks(rnp, 10); | 709 | dump_blkd_tasks(rsp, rnp, 10); |
| 709 | if (rcu_preempt_has_tasks(rnp) && | 710 | if (rcu_preempt_has_tasks(rnp) && |
| 710 | (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { | 711 | (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { |
| 711 | rnp->gp_tasks = rnp->blkd_tasks.next; | 712 | rnp->gp_tasks = rnp->blkd_tasks.next; |
| @@ -854,10 +855,14 @@ void exit_rcu(void) | |||
| 854 | * Dump the blocked-tasks state, but limit the list dump to the | 855 | * Dump the blocked-tasks state, but limit the list dump to the |
| 855 | * specified number of elements. | 856 | * specified number of elements. |
| 856 | */ | 857 | */ |
| 857 | static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck) | 858 | static void |
| 859 | dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) | ||
| 858 | { | 860 | { |
| 861 | int cpu; | ||
| 859 | int i; | 862 | int i; |
| 860 | struct list_head *lhp; | 863 | struct list_head *lhp; |
| 864 | bool onl; | ||
| 865 | struct rcu_data *rdp; | ||
| 861 | struct rcu_node *rnp1; | 866 | struct rcu_node *rnp1; |
| 862 | 867 | ||
| 863 | raw_lockdep_assert_held_rcu_node(rnp); | 868 | raw_lockdep_assert_held_rcu_node(rnp); |
| @@ -877,6 +882,14 @@ static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck) | |||
| 877 | break; | 882 | break; |
| 878 | } | 883 | } |
| 879 | pr_cont("\n"); | 884 | pr_cont("\n"); |
| 885 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { | ||
| 886 | rdp = per_cpu_ptr(rsp->rda, cpu); | ||
| 887 | onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); | ||
| 888 | pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", | ||
| 889 | cpu, ".o"[onl], | ||
| 890 | (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, | ||
| 891 | (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); | ||
| 892 | } | ||
| 880 | } | 893 | } |
| 881 | 894 | ||
| 882 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | 895 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| @@ -949,7 +962,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) | |||
| 949 | * so there is no need to check for blocked tasks. So check only for | 962 | * so there is no need to check for blocked tasks. So check only for |
| 950 | * bogus qsmask values. | 963 | * bogus qsmask values. |
| 951 | */ | 964 | */ |
| 952 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 965 | static void |
| 966 | rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) | ||
| 953 | { | 967 | { |
| 954 | WARN_ON_ONCE(rnp->qsmask); | 968 | WARN_ON_ONCE(rnp->qsmask); |
| 955 | } | 969 | } |
| @@ -990,7 +1004,8 @@ void exit_rcu(void) | |||
| 990 | /* | 1004 | /* |
| 991 | * Dump the guaranteed-empty blocked-tasks state. Trust but verify. | 1005 | * Dump the guaranteed-empty blocked-tasks state. Trust but verify. |
| 992 | */ | 1006 | */ |
| 993 | static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck) | 1007 | static void |
| 1008 | dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) | ||
| 994 | { | 1009 | { |
| 995 | WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); | 1010 | WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); |
| 996 | } | 1011 | } |
