aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2018-01-17 09:24:30 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-02-20 19:12:26 -0500
commita32e01ee689794a26bdfdbaa7e8c334576cee36c (patch)
tree7c6f36a0dd649bbbfc40a85e90fd4ba277044b88
parent65518db86b9ed1180b013c8a34c73c6ff7275886 (diff)
rcu: Use wrapper for lockdep asserts
Commits c0b334c5bfa9 and ea9b0c8a26a2 introduced new sparse warnings by accessing rcu_node->lock directly and ignoring the __private marker. Introduce a new wrapper and use it. Also fix a similar problem in srcutree.c introduced by a3883df3935e. Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcu/rcu.h5
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/tree.c24
-rw-r--r--kernel/rcu/tree_plugin.h4
4 files changed, 19 insertions, 16 deletions
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index c90812673d54..5d13f651cf08 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -337,7 +337,7 @@ do { \
337} while (0) 337} while (0)
338 338
339#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ 339#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
340 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ 340 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
341 341
342#define raw_spin_trylock_rcu_node(p) \ 342#define raw_spin_trylock_rcu_node(p) \
343({ \ 343({ \
@@ -348,6 +348,9 @@ do { \
348 ___locked; \ 348 ___locked; \
349}) 349})
350 350
351#define raw_lockdep_assert_held_rcu_node(p) \
352 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
353
351#endif /* #if defined(SRCU) || !defined(TINY_RCU) */ 354#endif /* #if defined(SRCU) || !defined(TINY_RCU) */
352 355
353#ifdef CONFIG_TINY_RCU 356#ifdef CONFIG_TINY_RCU
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index d5cea81378cc..9c6e0eaab4d5 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -439,7 +439,7 @@ static void srcu_gp_start(struct srcu_struct *sp)
439 struct srcu_data *sdp = this_cpu_ptr(sp->sda); 439 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
440 int state; 440 int state;
441 441
442 lockdep_assert_held(&sp->lock); 442 lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
443 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 443 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
444 rcu_segcblist_advance(&sdp->srcu_cblist, 444 rcu_segcblist_advance(&sdp->srcu_cblist,
445 rcu_seq_current(&sp->srcu_gp_seq)); 445 rcu_seq_current(&sp->srcu_gp_seq));
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4d7c727020f0..99d404c6bbbb 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1161,7 +1161,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
1161 */ 1161 */
1162static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1162static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1163{ 1163{
1164 lockdep_assert_held(&rnp->lock); 1164 raw_lockdep_assert_held_rcu_node(rnp);
1165 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum)) 1165 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
1166 WRITE_ONCE(rdp->gpwrap, true); 1166 WRITE_ONCE(rdp->gpwrap, true);
1167 if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum)) 1167 if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
@@ -1629,7 +1629,7 @@ void rcu_cpu_stall_reset(void)
1629static unsigned long rcu_cbs_completed(struct rcu_state *rsp, 1629static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1630 struct rcu_node *rnp) 1630 struct rcu_node *rnp)
1631{ 1631{
1632 lockdep_assert_held(&rnp->lock); 1632 raw_lockdep_assert_held_rcu_node(rnp);
1633 1633
1634 /* 1634 /*
1635 * If RCU is idle, we just wait for the next grace period. 1635 * If RCU is idle, we just wait for the next grace period.
@@ -1676,7 +1676,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1676 bool ret = false; 1676 bool ret = false;
1677 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 1677 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1678 1678
1679 lockdep_assert_held(&rnp->lock); 1679 raw_lockdep_assert_held_rcu_node(rnp);
1680 1680
1681 /* 1681 /*
1682 * Pick up grace-period number for new callbacks. If this 1682 * Pick up grace-period number for new callbacks. If this
@@ -1804,7 +1804,7 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1804{ 1804{
1805 bool ret = false; 1805 bool ret = false;
1806 1806
1807 lockdep_assert_held(&rnp->lock); 1807 raw_lockdep_assert_held_rcu_node(rnp);
1808 1808
1809 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1809 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1810 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1810 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
@@ -1844,7 +1844,7 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1844static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1844static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1845 struct rcu_data *rdp) 1845 struct rcu_data *rdp)
1846{ 1846{
1847 lockdep_assert_held(&rnp->lock); 1847 raw_lockdep_assert_held_rcu_node(rnp);
1848 1848
1849 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1849 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1850 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1850 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
@@ -1872,7 +1872,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1872 bool ret; 1872 bool ret;
1873 bool need_gp; 1873 bool need_gp;
1874 1874
1875 lockdep_assert_held(&rnp->lock); 1875 raw_lockdep_assert_held_rcu_node(rnp);
1876 1876
1877 /* Handle the ends of any preceding grace periods first. */ 1877 /* Handle the ends of any preceding grace periods first. */
1878 if (rdp->completed == rnp->completed && 1878 if (rdp->completed == rnp->completed &&
@@ -2297,7 +2297,7 @@ static bool
2297rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 2297rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2298 struct rcu_data *rdp) 2298 struct rcu_data *rdp)
2299{ 2299{
2300 lockdep_assert_held(&rnp->lock); 2300 raw_lockdep_assert_held_rcu_node(rnp);
2301 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { 2301 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2302 /* 2302 /*
2303 * Either we have not yet spawned the grace-period 2303 * Either we have not yet spawned the grace-period
@@ -2359,7 +2359,7 @@ static bool rcu_start_gp(struct rcu_state *rsp)
2359static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 2359static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2360 __releases(rcu_get_root(rsp)->lock) 2360 __releases(rcu_get_root(rsp)->lock)
2361{ 2361{
2362 lockdep_assert_held(&rcu_get_root(rsp)->lock); 2362 raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
2363 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 2363 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2364 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); 2364 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2365 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); 2365 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
@@ -2384,7 +2384,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2384 unsigned long oldmask = 0; 2384 unsigned long oldmask = 0;
2385 struct rcu_node *rnp_c; 2385 struct rcu_node *rnp_c;
2386 2386
2387 lockdep_assert_held(&rnp->lock); 2387 raw_lockdep_assert_held_rcu_node(rnp);
2388 2388
2389 /* Walk up the rcu_node hierarchy. */ 2389 /* Walk up the rcu_node hierarchy. */
2390 for (;;) { 2390 for (;;) {
@@ -2448,7 +2448,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2448 unsigned long mask; 2448 unsigned long mask;
2449 struct rcu_node *rnp_p; 2449 struct rcu_node *rnp_p;
2450 2450
2451 lockdep_assert_held(&rnp->lock); 2451 raw_lockdep_assert_held_rcu_node(rnp);
2452 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2452 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2453 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2453 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2454 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2454 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2593,7 +2593,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2593 long mask; 2593 long mask;
2594 struct rcu_node *rnp = rnp_leaf; 2594 struct rcu_node *rnp = rnp_leaf;
2595 2595
2596 lockdep_assert_held(&rnp->lock); 2596 raw_lockdep_assert_held_rcu_node(rnp);
2597 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2597 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2598 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) 2598 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2599 return; 2599 return;
@@ -3596,7 +3596,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3596 long mask; 3596 long mask;
3597 struct rcu_node *rnp = rnp_leaf; 3597 struct rcu_node *rnp = rnp_leaf;
3598 3598
3599 lockdep_assert_held(&rnp->lock); 3599 raw_lockdep_assert_held_rcu_node(rnp);
3600 for (;;) { 3600 for (;;) {
3601 mask = rnp->grpmask; 3601 mask = rnp->grpmask;
3602 rnp = rnp->parent; 3602 rnp = rnp->parent;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1c2d58a85511..84fbee4686d3 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -180,7 +180,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
180 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); 180 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
181 struct task_struct *t = current; 181 struct task_struct *t = current;
182 182
183 lockdep_assert_held(&rnp->lock); 183 raw_lockdep_assert_held_rcu_node(rnp);
184 WARN_ON_ONCE(rdp->mynode != rnp); 184 WARN_ON_ONCE(rdp->mynode != rnp);
185 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); 185 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
186 186
@@ -1044,7 +1044,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1044{ 1044{
1045 struct task_struct *t; 1045 struct task_struct *t;
1046 1046
1047 lockdep_assert_held(&rnp->lock); 1047 raw_lockdep_assert_held_rcu_node(rnp);
1048 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { 1048 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1049 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1049 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1050 return; 1050 return;