aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-04-28 15:32:15 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-06-08 11:25:37 -0400
commitc0b334c5bfa98ab104bde38da330a113a6c7dd56 (patch)
tree49136e8c2b34f7881aa30f412d9d3615679ae472 /kernel/rcu/tree.c
parent0c8e0e3c37955d17cced37222a10c00ab47efd4b (diff)
rcu: Add lockdep_assert_held() teeth to tree.c
Comments can be helpful, but assertions carry more force. This commit therefore adds lockdep_assert_held() and RCU_LOCKDEP_WARN() calls to enforce lock-held and interrupt-disabled preconditions. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index ac8dce15fd74..121c1436a7f3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -762,6 +762,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
762 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; 762 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
763 int *fp = &rnp->need_future_gp[idx]; 763 int *fp = &rnp->need_future_gp[idx];
764 764
765 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
765 return READ_ONCE(*fp); 766 return READ_ONCE(*fp);
766} 767}
767 768
@@ -773,6 +774,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
773static bool 774static bool
774cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 775cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
775{ 776{
777 RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
776 if (rcu_gp_in_progress(rsp)) 778 if (rcu_gp_in_progress(rsp))
777 return false; /* No, a grace period is already in progress. */ 779 return false; /* No, a grace period is already in progress. */
778 if (rcu_future_needs_gp(rsp)) 780 if (rcu_future_needs_gp(rsp))
@@ -799,6 +801,7 @@ static void rcu_eqs_enter_common(bool user)
799 struct rcu_data *rdp; 801 struct rcu_data *rdp;
800 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 802 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
801 803
804 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
802 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); 805 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
803 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 806 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
804 !user && !is_idle_task(current)) { 807 !user && !is_idle_task(current)) {
@@ -972,6 +975,7 @@ static void rcu_eqs_exit(bool user)
972 struct rcu_dynticks *rdtp; 975 struct rcu_dynticks *rdtp;
973 long long oldval; 976 long long oldval;
974 977
978 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
975 rdtp = this_cpu_ptr(&rcu_dynticks); 979 rdtp = this_cpu_ptr(&rcu_dynticks);
976 oldval = rdtp->dynticks_nesting; 980 oldval = rdtp->dynticks_nesting;
977 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 981 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
@@ -1679,6 +1683,8 @@ void rcu_cpu_stall_reset(void)
1679static unsigned long rcu_cbs_completed(struct rcu_state *rsp, 1683static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1680 struct rcu_node *rnp) 1684 struct rcu_node *rnp)
1681{ 1685{
1686 lockdep_assert_held(&rnp->lock);
1687
1682 /* 1688 /*
1683 * If RCU is idle, we just wait for the next grace period. 1689 * If RCU is idle, we just wait for the next grace period.
1684 * But we can only be sure that RCU is idle if we are looking 1690 * But we can only be sure that RCU is idle if we are looking
@@ -1724,6 +1730,8 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1724 bool ret = false; 1730 bool ret = false;
1725 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 1731 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1726 1732
1733 lockdep_assert_held(&rnp->lock);
1734
1727 /* 1735 /*
1728 * Pick up grace-period number for new callbacks. If this 1736 * Pick up grace-period number for new callbacks. If this
1729 * grace period is already marked as needed, return to the caller. 1737 * grace period is already marked as needed, return to the caller.
@@ -1850,6 +1858,8 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1850{ 1858{
1851 bool ret = false; 1859 bool ret = false;
1852 1860
1861 lockdep_assert_held(&rnp->lock);
1862
1853 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1863 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1854 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1864 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1855 return false; 1865 return false;
@@ -1888,6 +1898,8 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1888static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1898static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1889 struct rcu_data *rdp) 1899 struct rcu_data *rdp)
1890{ 1900{
1901 lockdep_assert_held(&rnp->lock);
1902
1891 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1903 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1892 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1904 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1893 return false; 1905 return false;
@@ -1914,6 +1926,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1914 bool ret; 1926 bool ret;
1915 bool need_gp; 1927 bool need_gp;
1916 1928
1929 lockdep_assert_held(&rnp->lock);
1930
1917 /* Handle the ends of any preceding grace periods first. */ 1931 /* Handle the ends of any preceding grace periods first. */
1918 if (rdp->completed == rnp->completed && 1932 if (rdp->completed == rnp->completed &&
1919 !unlikely(READ_ONCE(rdp->gpwrap))) { 1933 !unlikely(READ_ONCE(rdp->gpwrap))) {
@@ -2346,6 +2360,7 @@ static bool
2346rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 2360rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2347 struct rcu_data *rdp) 2361 struct rcu_data *rdp)
2348{ 2362{
2363 lockdep_assert_held(&rnp->lock);
2349 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { 2364 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2350 /* 2365 /*
2351 * Either we have not yet spawned the grace-period 2366 * Either we have not yet spawned the grace-period
@@ -2407,6 +2422,7 @@ static bool rcu_start_gp(struct rcu_state *rsp)
2407static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 2422static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2408 __releases(rcu_get_root(rsp)->lock) 2423 __releases(rcu_get_root(rsp)->lock)
2409{ 2424{
2425 lockdep_assert_held(&rcu_get_root(rsp)->lock);
2410 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2426 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2411 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2427 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2412 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2428 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
@@ -2431,6 +2447,8 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2431 unsigned long oldmask = 0; 2447 unsigned long oldmask = 0;
2432 struct rcu_node *rnp_c; 2448 struct rcu_node *rnp_c;
2433 2449
2450 lockdep_assert_held(&rnp->lock);
2451
2434 /* Walk up the rcu_node hierarchy. */ 2452 /* Walk up the rcu_node hierarchy. */
2435 for (;;) { 2453 for (;;) {
2436 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { 2454 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
@@ -2491,6 +2509,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2491 unsigned long mask; 2509 unsigned long mask;
2492 struct rcu_node *rnp_p; 2510 struct rcu_node *rnp_p;
2493 2511
2512 lockdep_assert_held(&rnp->lock);
2494 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2513 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2495 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2514 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2496 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2515 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2604,6 +2623,8 @@ static void
2604rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, 2623rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2605 struct rcu_node *rnp, struct rcu_data *rdp) 2624 struct rcu_node *rnp, struct rcu_data *rdp)
2606{ 2625{
2626 lockdep_assert_held(&rsp->orphan_lock);
2627
2607 /* No-CBs CPUs do not have orphanable callbacks. */ 2628 /* No-CBs CPUs do not have orphanable callbacks. */
2608 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu)) 2629 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
2609 return; 2630 return;
@@ -2644,6 +2665,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2644{ 2665{
2645 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2666 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2646 2667
2668 lockdep_assert_held(&rsp->orphan_lock);
2669
2647 /* No-CBs CPUs are handled specially. */ 2670 /* No-CBs CPUs are handled specially. */
2648 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2671 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2649 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) 2672 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
@@ -2710,6 +2733,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2710 long mask; 2733 long mask;
2711 struct rcu_node *rnp = rnp_leaf; 2734 struct rcu_node *rnp = rnp_leaf;
2712 2735
2736 lockdep_assert_held(&rnp->lock);
2713 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2737 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2714 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) 2738 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2715 return; 2739 return;
@@ -3703,6 +3727,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3703 long mask; 3727 long mask;
3704 struct rcu_node *rnp = rnp_leaf; 3728 struct rcu_node *rnp = rnp_leaf;
3705 3729
3730 lockdep_assert_held(&rnp->lock);
3706 for (;;) { 3731 for (;;) {
3707 mask = rnp->grpmask; 3732 mask = rnp->grpmask;
3708 rnp = rnp->parent; 3733 rnp = rnp->parent;