diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e4fe06d42385..f9c0ca2ccf0c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -734,7 +734,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) | |||
734 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; | 734 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; |
735 | int *fp = &rnp->need_future_gp[idx]; | 735 | int *fp = &rnp->need_future_gp[idx]; |
736 | 736 | ||
737 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!"); | 737 | lockdep_assert_irqs_disabled(); |
738 | return READ_ONCE(*fp); | 738 | return READ_ONCE(*fp); |
739 | } | 739 | } |
740 | 740 | ||
@@ -746,7 +746,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) | |||
746 | static bool | 746 | static bool |
747 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 747 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
748 | { | 748 | { |
749 | RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!"); | 749 | lockdep_assert_irqs_disabled(); |
750 | if (rcu_gp_in_progress(rsp)) | 750 | if (rcu_gp_in_progress(rsp)) |
751 | return false; /* No, a grace period is already in progress. */ | 751 | return false; /* No, a grace period is already in progress. */ |
752 | if (rcu_future_needs_gp(rsp)) | 752 | if (rcu_future_needs_gp(rsp)) |
@@ -773,7 +773,7 @@ static void rcu_eqs_enter_common(bool user) | |||
773 | struct rcu_data *rdp; | 773 | struct rcu_data *rdp; |
774 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 774 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
775 | 775 | ||
776 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!"); | 776 | lockdep_assert_irqs_disabled(); |
777 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); | 777 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); |
778 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 778 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
779 | !user && !is_idle_task(current)) { | 779 | !user && !is_idle_task(current)) { |
@@ -843,7 +843,7 @@ static void rcu_eqs_enter(bool user) | |||
843 | */ | 843 | */ |
844 | void rcu_idle_enter(void) | 844 | void rcu_idle_enter(void) |
845 | { | 845 | { |
846 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!"); | 846 | lockdep_assert_irqs_disabled(); |
847 | rcu_eqs_enter(false); | 847 | rcu_eqs_enter(false); |
848 | } | 848 | } |
849 | 849 | ||
@@ -861,7 +861,7 @@ void rcu_idle_enter(void) | |||
861 | */ | 861 | */ |
862 | void rcu_user_enter(void) | 862 | void rcu_user_enter(void) |
863 | { | 863 | { |
864 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!"); | 864 | lockdep_assert_irqs_disabled(); |
865 | rcu_eqs_enter(true); | 865 | rcu_eqs_enter(true); |
866 | } | 866 | } |
867 | #endif /* CONFIG_NO_HZ_FULL */ | 867 | #endif /* CONFIG_NO_HZ_FULL */ |
@@ -889,7 +889,7 @@ void rcu_irq_exit(void) | |||
889 | { | 889 | { |
890 | struct rcu_dynticks *rdtp; | 890 | struct rcu_dynticks *rdtp; |
891 | 891 | ||
892 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 892 | lockdep_assert_irqs_disabled(); |
893 | rdtp = this_cpu_ptr(&rcu_dynticks); | 893 | rdtp = this_cpu_ptr(&rcu_dynticks); |
894 | 894 | ||
895 | /* Page faults can happen in NMI handlers, so check... */ | 895 | /* Page faults can happen in NMI handlers, so check... */ |
@@ -959,7 +959,7 @@ static void rcu_eqs_exit(bool user) | |||
959 | struct rcu_dynticks *rdtp; | 959 | struct rcu_dynticks *rdtp; |
960 | long long oldval; | 960 | long long oldval; |
961 | 961 | ||
962 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!"); | 962 | lockdep_assert_irqs_disabled(); |
963 | rdtp = this_cpu_ptr(&rcu_dynticks); | 963 | rdtp = this_cpu_ptr(&rcu_dynticks); |
964 | oldval = rdtp->dynticks_nesting; | 964 | oldval = rdtp->dynticks_nesting; |
965 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); | 965 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); |
@@ -1039,7 +1039,7 @@ void rcu_irq_enter(void) | |||
1039 | struct rcu_dynticks *rdtp; | 1039 | struct rcu_dynticks *rdtp; |
1040 | long long oldval; | 1040 | long long oldval; |
1041 | 1041 | ||
1042 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); | 1042 | lockdep_assert_irqs_disabled(); |
1043 | rdtp = this_cpu_ptr(&rcu_dynticks); | 1043 | rdtp = this_cpu_ptr(&rcu_dynticks); |
1044 | 1044 | ||
1045 | /* Page faults can happen in NMI handlers, so check... */ | 1045 | /* Page faults can happen in NMI handlers, so check... */ |