diff options
| -rw-r--r-- | kernel/rcu/tree.c | 16 | ||||
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 10 |
2 files changed, 13 insertions, 13 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3e3650e94ae6..08fa586d7f8c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -734,7 +734,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) | |||
| 734 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; | 734 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; |
| 735 | int *fp = &rnp->need_future_gp[idx]; | 735 | int *fp = &rnp->need_future_gp[idx]; |
| 736 | 736 | ||
| 737 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!"); | 737 | lockdep_assert_irqs_disabled(); |
| 738 | return READ_ONCE(*fp); | 738 | return READ_ONCE(*fp); |
| 739 | } | 739 | } |
| 740 | 740 | ||
| @@ -746,7 +746,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) | |||
| 746 | static bool | 746 | static bool |
| 747 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 747 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
| 748 | { | 748 | { |
| 749 | RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!"); | 749 | lockdep_assert_irqs_disabled(); |
| 750 | if (rcu_gp_in_progress(rsp)) | 750 | if (rcu_gp_in_progress(rsp)) |
| 751 | return false; /* No, a grace period is already in progress. */ | 751 | return false; /* No, a grace period is already in progress. */ |
| 752 | if (rcu_future_needs_gp(rsp)) | 752 | if (rcu_future_needs_gp(rsp)) |
| @@ -773,7 +773,7 @@ static void rcu_eqs_enter_common(bool user) | |||
| 773 | struct rcu_data *rdp; | 773 | struct rcu_data *rdp; |
| 774 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 774 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
| 775 | 775 | ||
| 776 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!"); | 776 | lockdep_assert_irqs_disabled(); |
| 777 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); | 777 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); |
| 778 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 778 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
| 779 | !user && !is_idle_task(current)) { | 779 | !user && !is_idle_task(current)) { |
| @@ -840,7 +840,7 @@ static void rcu_eqs_enter(bool user) | |||
| 840 | */ | 840 | */ |
| 841 | void rcu_idle_enter(void) | 841 | void rcu_idle_enter(void) |
| 842 | { | 842 | { |
| 843 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!"); | 843 | lockdep_assert_irqs_disabled(); |
| 844 | rcu_eqs_enter(false); | 844 | rcu_eqs_enter(false); |
| 845 | } | 845 | } |
| 846 | 846 | ||
| @@ -855,7 +855,7 @@ void rcu_idle_enter(void) | |||
| 855 | */ | 855 | */ |
| 856 | void rcu_user_enter(void) | 856 | void rcu_user_enter(void) |
| 857 | { | 857 | { |
| 858 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!"); | 858 | lockdep_assert_irqs_disabled(); |
| 859 | rcu_eqs_enter(true); | 859 | rcu_eqs_enter(true); |
| 860 | } | 860 | } |
| 861 | #endif /* CONFIG_NO_HZ_FULL */ | 861 | #endif /* CONFIG_NO_HZ_FULL */ |
| @@ -880,7 +880,7 @@ void rcu_irq_exit(void) | |||
| 880 | { | 880 | { |
| 881 | struct rcu_dynticks *rdtp; | 881 | struct rcu_dynticks *rdtp; |
| 882 | 882 | ||
| 883 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 883 | lockdep_assert_irqs_disabled(); |
| 884 | rdtp = this_cpu_ptr(&rcu_dynticks); | 884 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 885 | 885 | ||
| 886 | /* Page faults can happen in NMI handlers, so check... */ | 886 | /* Page faults can happen in NMI handlers, so check... */ |
| @@ -947,7 +947,7 @@ static void rcu_eqs_exit(bool user) | |||
| 947 | struct rcu_dynticks *rdtp; | 947 | struct rcu_dynticks *rdtp; |
| 948 | long long oldval; | 948 | long long oldval; |
| 949 | 949 | ||
| 950 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!"); | 950 | lockdep_assert_irqs_disabled(); |
| 951 | rdtp = this_cpu_ptr(&rcu_dynticks); | 951 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 952 | oldval = rdtp->dynticks_nesting; | 952 | oldval = rdtp->dynticks_nesting; |
| 953 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); | 953 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); |
| @@ -1018,7 +1018,7 @@ void rcu_irq_enter(void) | |||
| 1018 | struct rcu_dynticks *rdtp; | 1018 | struct rcu_dynticks *rdtp; |
| 1019 | long long oldval; | 1019 | long long oldval; |
| 1020 | 1020 | ||
| 1021 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); | 1021 | lockdep_assert_irqs_disabled(); |
| 1022 | rdtp = this_cpu_ptr(&rcu_dynticks); | 1022 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 1023 | 1023 | ||
| 1024 | /* Page faults can happen in NMI handlers, so check... */ | 1024 | /* Page faults can happen in NMI handlers, so check... */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e012b9be777e..df08e5c8126b 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
| @@ -325,7 +325,7 @@ static void rcu_preempt_note_context_switch(bool preempt) | |||
| 325 | struct rcu_data *rdp; | 325 | struct rcu_data *rdp; |
| 326 | struct rcu_node *rnp; | 326 | struct rcu_node *rnp; |
| 327 | 327 | ||
| 328 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n"); | 328 | lockdep_assert_irqs_disabled(); |
| 329 | WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); | 329 | WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); |
| 330 | if (t->rcu_read_lock_nesting > 0 && | 330 | if (t->rcu_read_lock_nesting > 0 && |
| 331 | !t->rcu_read_unlock_special.b.blocked) { | 331 | !t->rcu_read_unlock_special.b.blocked) { |
| @@ -1421,7 +1421,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) | |||
| 1421 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 1421 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
| 1422 | unsigned long dj; | 1422 | unsigned long dj; |
| 1423 | 1423 | ||
| 1424 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_needs_cpu() invoked with irqs enabled!!!"); | 1424 | lockdep_assert_irqs_disabled(); |
| 1425 | 1425 | ||
| 1426 | /* Snapshot to detect later posting of non-lazy callback. */ | 1426 | /* Snapshot to detect later posting of non-lazy callback. */ |
| 1427 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | 1427 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; |
| @@ -1470,7 +1470,7 @@ static void rcu_prepare_for_idle(void) | |||
| 1470 | struct rcu_state *rsp; | 1470 | struct rcu_state *rsp; |
| 1471 | int tne; | 1471 | int tne; |
| 1472 | 1472 | ||
| 1473 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_prepare_for_idle() invoked with irqs enabled!!!"); | 1473 | lockdep_assert_irqs_disabled(); |
| 1474 | if (rcu_is_nocb_cpu(smp_processor_id())) | 1474 | if (rcu_is_nocb_cpu(smp_processor_id())) |
| 1475 | return; | 1475 | return; |
| 1476 | 1476 | ||
| @@ -1525,7 +1525,7 @@ static void rcu_prepare_for_idle(void) | |||
| 1525 | */ | 1525 | */ |
| 1526 | static void rcu_cleanup_after_idle(void) | 1526 | static void rcu_cleanup_after_idle(void) |
| 1527 | { | 1527 | { |
| 1528 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_cleanup_after_idle() invoked with irqs enabled!!!"); | 1528 | lockdep_assert_irqs_disabled(); |
| 1529 | if (rcu_is_nocb_cpu(smp_processor_id())) | 1529 | if (rcu_is_nocb_cpu(smp_processor_id())) |
| 1530 | return; | 1530 | return; |
| 1531 | if (rcu_try_advance_all_cbs()) | 1531 | if (rcu_try_advance_all_cbs()) |
| @@ -2012,7 +2012,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, | |||
| 2012 | struct rcu_data *rdp, | 2012 | struct rcu_data *rdp, |
| 2013 | unsigned long flags) | 2013 | unsigned long flags) |
| 2014 | { | 2014 | { |
| 2015 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_nocb_adopt_orphan_cbs() invoked with irqs enabled!!!"); | 2015 | lockdep_assert_irqs_disabled(); |
| 2016 | if (!rcu_is_nocb_cpu(smp_processor_id())) | 2016 | if (!rcu_is_nocb_cpu(smp_processor_id())) |
| 2017 | return false; /* Not NOCBs CPU, caller must migrate CBs. */ | 2017 | return false; /* Not NOCBs CPU, caller must migrate CBs. */ |
| 2018 | __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), | 2018 | __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), |
