aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-10-10 16:52:30 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-11-28 18:52:33 -0500
commitd633198088bd9e358566c470ed182994403acc7a (patch)
tree41a5a7824e5e52f81c5c7a8794e80c6b5bd62834
parent4fbd8d194f06c8a3fd2af1ce560ddb31f7ec8323 (diff)
srcu: Prohibit call_srcu() use under raw spinlocks
Invoking queue_delayed_work() while holding a raw spinlock is forbidden in -rt kernels, which is exactly what __call_srcu() does, indirectly via srcu_funnel_gp_start(). This commit therefore downgrades Tree SRCU's locking from raw to non-raw spinlocks, which works because call_srcu() is not ever called while holding a raw spinlock. Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--kernel/rcu/srcutree.c109
2 files changed, 72 insertions, 45 deletions
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index a949f4f9e4d7..4eda108abee0 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
40 unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ 40 unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
41 41
42 /* Update-side state. */ 42 /* Update-side state. */
43 raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp; 43 spinlock_t __private lock ____cacheline_internodealigned_in_smp;
44 struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ 44 struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ 45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ 46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
58 * Node in SRCU combining tree, similar in function to rcu_data. 58 * Node in SRCU combining tree, similar in function to rcu_data.
59 */ 59 */
60struct srcu_node { 60struct srcu_node {
61 raw_spinlock_t __private lock; 61 spinlock_t __private lock;
62 unsigned long srcu_have_cbs[4]; /* GP seq for children */ 62 unsigned long srcu_have_cbs[4]; /* GP seq for children */
63 /* having CBs, but only */ 63 /* having CBs, but only */
64 /* is > ->srcu_gq_seq. */ 64 /* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
78 struct srcu_node *level[RCU_NUM_LVLS + 1]; 78 struct srcu_node *level[RCU_NUM_LVLS + 1];
79 /* First node at each level. */ 79 /* First node at each level. */
80 struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ 80 struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
81 raw_spinlock_t __private lock; /* Protect counters */ 81 spinlock_t __private lock; /* Protect counters */
82 struct mutex srcu_gp_mutex; /* Serialize GP work. */ 82 struct mutex srcu_gp_mutex; /* Serialize GP work. */
83 unsigned int srcu_idx; /* Current rdr array element. */ 83 unsigned int srcu_idx; /* Current rdr array element. */
84 unsigned long srcu_gp_seq; /* Grace-period seq #. */ 84 unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -107,7 +107,7 @@ struct srcu_struct {
107#define __SRCU_STRUCT_INIT(name) \ 107#define __SRCU_STRUCT_INIT(name) \
108 { \ 108 { \
109 .sda = &name##_srcu_data, \ 109 .sda = &name##_srcu_data, \
110 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 110 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
111 .srcu_gp_seq_needed = 0 - 1, \ 111 .srcu_gp_seq_needed = 0 - 1, \
112 __SRCU_DEP_MAP_INIT(name) \ 112 __SRCU_DEP_MAP_INIT(name) \
113 } 113 }
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6d5880089ff6..d5cea81378cc 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -53,6 +53,33 @@ static void srcu_invoke_callbacks(struct work_struct *work);
53static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); 53static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
54static void process_srcu(struct work_struct *work); 54static void process_srcu(struct work_struct *work);
55 55
56/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
57#define spin_lock_rcu_node(p) \
58do { \
59 spin_lock(&ACCESS_PRIVATE(p, lock)); \
60 smp_mb__after_unlock_lock(); \
61} while (0)
62
63#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
64
65#define spin_lock_irq_rcu_node(p) \
66do { \
67 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
68 smp_mb__after_unlock_lock(); \
69} while (0)
70
71#define spin_unlock_irq_rcu_node(p) \
72 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
73
74#define spin_lock_irqsave_rcu_node(p, flags) \
75do { \
76 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
77 smp_mb__after_unlock_lock(); \
78} while (0)
79
80#define spin_unlock_irqrestore_rcu_node(p, flags) \
81 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
82
56/* 83/*
57 * Initialize SRCU combining tree. Note that statically allocated 84 * Initialize SRCU combining tree. Note that statically allocated
58 * srcu_struct structures might already have srcu_read_lock() and 85 * srcu_struct structures might already have srcu_read_lock() and
@@ -77,7 +104,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
77 104
78 /* Each pass through this loop initializes one srcu_node structure. */ 105 /* Each pass through this loop initializes one srcu_node structure. */
79 rcu_for_each_node_breadth_first(sp, snp) { 106 rcu_for_each_node_breadth_first(sp, snp) {
80 raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 107 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
81 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 108 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
82 ARRAY_SIZE(snp->srcu_data_have_cbs)); 109 ARRAY_SIZE(snp->srcu_data_have_cbs));
83 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 110 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
@@ -111,7 +138,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
111 snp_first = sp->level[level]; 138 snp_first = sp->level[level];
112 for_each_possible_cpu(cpu) { 139 for_each_possible_cpu(cpu) {
113 sdp = per_cpu_ptr(sp->sda, cpu); 140 sdp = per_cpu_ptr(sp->sda, cpu);
114 raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 141 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
115 rcu_segcblist_init(&sdp->srcu_cblist); 142 rcu_segcblist_init(&sdp->srcu_cblist);
116 sdp->srcu_cblist_invoking = false; 143 sdp->srcu_cblist_invoking = false;
117 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; 144 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
@@ -170,7 +197,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
170 /* Don't re-initialize a lock while it is held. */ 197 /* Don't re-initialize a lock while it is held. */
171 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 198 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
172 lockdep_init_map(&sp->dep_map, name, key, 0); 199 lockdep_init_map(&sp->dep_map, name, key, 0);
173 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock)); 200 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
174 return init_srcu_struct_fields(sp, false); 201 return init_srcu_struct_fields(sp, false);
175} 202}
176EXPORT_SYMBOL_GPL(__init_srcu_struct); 203EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -187,7 +214,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
187 */ 214 */
188int init_srcu_struct(struct srcu_struct *sp) 215int init_srcu_struct(struct srcu_struct *sp)
189{ 216{
190 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock)); 217 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
191 return init_srcu_struct_fields(sp, false); 218 return init_srcu_struct_fields(sp, false);
192} 219}
193EXPORT_SYMBOL_GPL(init_srcu_struct); 220EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -210,13 +237,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
210 /* The smp_load_acquire() pairs with the smp_store_release(). */ 237 /* The smp_load_acquire() pairs with the smp_store_release(). */
211 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ 238 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
212 return; /* Already initialized. */ 239 return; /* Already initialized. */
213 raw_spin_lock_irqsave_rcu_node(sp, flags); 240 spin_lock_irqsave_rcu_node(sp, flags);
214 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { 241 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
215 raw_spin_unlock_irqrestore_rcu_node(sp, flags); 242 spin_unlock_irqrestore_rcu_node(sp, flags);
216 return; 243 return;
217 } 244 }
218 init_srcu_struct_fields(sp, true); 245 init_srcu_struct_fields(sp, true);
219 raw_spin_unlock_irqrestore_rcu_node(sp, flags); 246 spin_unlock_irqrestore_rcu_node(sp, flags);
220} 247}
221 248
222/* 249/*
@@ -513,7 +540,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
513 mutex_lock(&sp->srcu_cb_mutex); 540 mutex_lock(&sp->srcu_cb_mutex);
514 541
515 /* End the current grace period. */ 542 /* End the current grace period. */
516 raw_spin_lock_irq_rcu_node(sp); 543 spin_lock_irq_rcu_node(sp);
517 idx = rcu_seq_state(sp->srcu_gp_seq); 544 idx = rcu_seq_state(sp->srcu_gp_seq);
518 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 545 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
519 cbdelay = srcu_get_delay(sp); 546 cbdelay = srcu_get_delay(sp);
@@ -522,7 +549,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
522 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 549 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
523 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) 550 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
524 sp->srcu_gp_seq_needed_exp = gpseq; 551 sp->srcu_gp_seq_needed_exp = gpseq;
525 raw_spin_unlock_irq_rcu_node(sp); 552 spin_unlock_irq_rcu_node(sp);
526 mutex_unlock(&sp->srcu_gp_mutex); 553 mutex_unlock(&sp->srcu_gp_mutex);
527 /* A new grace period can start at this point. But only one. */ 554 /* A new grace period can start at this point. But only one. */
528 555
@@ -530,7 +557,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
530 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 557 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
531 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); 558 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
532 rcu_for_each_node_breadth_first(sp, snp) { 559 rcu_for_each_node_breadth_first(sp, snp) {
533 raw_spin_lock_irq_rcu_node(snp); 560 spin_lock_irq_rcu_node(snp);
534 cbs = false; 561 cbs = false;
535 if (snp >= sp->level[rcu_num_lvls - 1]) 562 if (snp >= sp->level[rcu_num_lvls - 1])
536 cbs = snp->srcu_have_cbs[idx] == gpseq; 563 cbs = snp->srcu_have_cbs[idx] == gpseq;
@@ -540,7 +567,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
540 snp->srcu_gp_seq_needed_exp = gpseq; 567 snp->srcu_gp_seq_needed_exp = gpseq;
541 mask = snp->srcu_data_have_cbs[idx]; 568 mask = snp->srcu_data_have_cbs[idx];
542 snp->srcu_data_have_cbs[idx] = 0; 569 snp->srcu_data_have_cbs[idx] = 0;
543 raw_spin_unlock_irq_rcu_node(snp); 570 spin_unlock_irq_rcu_node(snp);
544 if (cbs) 571 if (cbs)
545 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); 572 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
546 573
@@ -548,11 +575,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
548 if (!(gpseq & counter_wrap_check)) 575 if (!(gpseq & counter_wrap_check))
549 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 576 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
550 sdp = per_cpu_ptr(sp->sda, cpu); 577 sdp = per_cpu_ptr(sp->sda, cpu);
551 raw_spin_lock_irqsave_rcu_node(sdp, flags); 578 spin_lock_irqsave_rcu_node(sdp, flags);
552 if (ULONG_CMP_GE(gpseq, 579 if (ULONG_CMP_GE(gpseq,
553 sdp->srcu_gp_seq_needed + 100)) 580 sdp->srcu_gp_seq_needed + 100))
554 sdp->srcu_gp_seq_needed = gpseq; 581 sdp->srcu_gp_seq_needed = gpseq;
555 raw_spin_unlock_irqrestore_rcu_node(sdp, flags); 582 spin_unlock_irqrestore_rcu_node(sdp, flags);
556 } 583 }
557 } 584 }
558 585
@@ -560,17 +587,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
560 mutex_unlock(&sp->srcu_cb_mutex); 587 mutex_unlock(&sp->srcu_cb_mutex);
561 588
562 /* Start a new grace period if needed. */ 589 /* Start a new grace period if needed. */
563 raw_spin_lock_irq_rcu_node(sp); 590 spin_lock_irq_rcu_node(sp);
564 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 591 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
565 if (!rcu_seq_state(gpseq) && 592 if (!rcu_seq_state(gpseq) &&
566 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { 593 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
567 srcu_gp_start(sp); 594 srcu_gp_start(sp);
568 raw_spin_unlock_irq_rcu_node(sp); 595 spin_unlock_irq_rcu_node(sp);
569 /* Throttle expedited grace periods: Should be rare! */ 596 /* Throttle expedited grace periods: Should be rare! */
570 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff 597 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
571 ? 0 : SRCU_INTERVAL); 598 ? 0 : SRCU_INTERVAL);
572 } else { 599 } else {
573 raw_spin_unlock_irq_rcu_node(sp); 600 spin_unlock_irq_rcu_node(sp);
574 } 601 }
575} 602}
576 603
@@ -590,18 +617,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
590 if (rcu_seq_done(&sp->srcu_gp_seq, s) || 617 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
591 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 618 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
592 return; 619 return;
593 raw_spin_lock_irqsave_rcu_node(snp, flags); 620 spin_lock_irqsave_rcu_node(snp, flags);
594 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 621 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
595 raw_spin_unlock_irqrestore_rcu_node(snp, flags); 622 spin_unlock_irqrestore_rcu_node(snp, flags);
596 return; 623 return;
597 } 624 }
598 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 625 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
599 raw_spin_unlock_irqrestore_rcu_node(snp, flags); 626 spin_unlock_irqrestore_rcu_node(snp, flags);
600 } 627 }
601 raw_spin_lock_irqsave_rcu_node(sp, flags); 628 spin_lock_irqsave_rcu_node(sp, flags);
602 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 629 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
603 sp->srcu_gp_seq_needed_exp = s; 630 sp->srcu_gp_seq_needed_exp = s;
604 raw_spin_unlock_irqrestore_rcu_node(sp, flags); 631 spin_unlock_irqrestore_rcu_node(sp, flags);
605} 632}
606 633
607/* 634/*
@@ -623,12 +650,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
623 for (; snp != NULL; snp = snp->srcu_parent) { 650 for (; snp != NULL; snp = snp->srcu_parent) {
624 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) 651 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
625 return; /* GP already done and CBs recorded. */ 652 return; /* GP already done and CBs recorded. */
626 raw_spin_lock_irqsave_rcu_node(snp, flags); 653 spin_lock_irqsave_rcu_node(snp, flags);
627 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 654 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
628 snp_seq = snp->srcu_have_cbs[idx]; 655 snp_seq = snp->srcu_have_cbs[idx];
629 if (snp == sdp->mynode && snp_seq == s) 656 if (snp == sdp->mynode && snp_seq == s)
630 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 657 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
631 raw_spin_unlock_irqrestore_rcu_node(snp, flags); 658 spin_unlock_irqrestore_rcu_node(snp, flags);
632 if (snp == sdp->mynode && snp_seq != s) { 659 if (snp == sdp->mynode && snp_seq != s) {
633 srcu_schedule_cbs_sdp(sdp, do_norm 660 srcu_schedule_cbs_sdp(sdp, do_norm
634 ? SRCU_INTERVAL 661 ? SRCU_INTERVAL
@@ -644,11 +671,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
644 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 671 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
645 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 672 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
646 snp->srcu_gp_seq_needed_exp = s; 673 snp->srcu_gp_seq_needed_exp = s;
647 raw_spin_unlock_irqrestore_rcu_node(snp, flags); 674 spin_unlock_irqrestore_rcu_node(snp, flags);
648 } 675 }
649 676
650 /* Top of tree, must ensure the grace period will be started. */ 677 /* Top of tree, must ensure the grace period will be started. */
651 raw_spin_lock_irqsave_rcu_node(sp, flags); 678 spin_lock_irqsave_rcu_node(sp, flags);
652 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { 679 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
653 /* 680 /*
654 * Record need for grace period s. Pair with load 681 * Record need for grace period s. Pair with load
@@ -667,7 +694,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
667 queue_delayed_work(system_power_efficient_wq, &sp->work, 694 queue_delayed_work(system_power_efficient_wq, &sp->work,
668 srcu_get_delay(sp)); 695 srcu_get_delay(sp));
669 } 696 }
670 raw_spin_unlock_irqrestore_rcu_node(sp, flags); 697 spin_unlock_irqrestore_rcu_node(sp, flags);
671} 698}
672 699
673/* 700/*
@@ -830,7 +857,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
830 rhp->func = func; 857 rhp->func = func;
831 local_irq_save(flags); 858 local_irq_save(flags);
832 sdp = this_cpu_ptr(sp->sda); 859 sdp = this_cpu_ptr(sp->sda);
833 raw_spin_lock_rcu_node(sdp); 860 spin_lock_rcu_node(sdp);
834 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 861 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
835 rcu_segcblist_advance(&sdp->srcu_cblist, 862 rcu_segcblist_advance(&sdp->srcu_cblist,
836 rcu_seq_current(&sp->srcu_gp_seq)); 863 rcu_seq_current(&sp->srcu_gp_seq));
@@ -844,7 +871,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
844 sdp->srcu_gp_seq_needed_exp = s; 871 sdp->srcu_gp_seq_needed_exp = s;
845 needexp = true; 872 needexp = true;
846 } 873 }
847 raw_spin_unlock_irqrestore_rcu_node(sdp, flags); 874 spin_unlock_irqrestore_rcu_node(sdp, flags);
848 if (needgp) 875 if (needgp)
849 srcu_funnel_gp_start(sp, sdp, s, do_norm); 876 srcu_funnel_gp_start(sp, sdp, s, do_norm);
850 else if (needexp) 877 else if (needexp)
@@ -900,7 +927,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
900 927
901 /* 928 /*
902 * Make sure that later code is ordered after the SRCU grace 929 * Make sure that later code is ordered after the SRCU grace
903 * period. This pairs with the raw_spin_lock_irq_rcu_node() 930 * period. This pairs with the spin_lock_irq_rcu_node()
904 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 931 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
905 * because the current CPU might have been totally uninvolved with 932 * because the current CPU might have been totally uninvolved with
906 * (and thus unordered against) that grace period. 933 * (and thus unordered against) that grace period.
@@ -1024,7 +1051,7 @@ void srcu_barrier(struct srcu_struct *sp)
1024 */ 1051 */
1025 for_each_possible_cpu(cpu) { 1052 for_each_possible_cpu(cpu) {
1026 sdp = per_cpu_ptr(sp->sda, cpu); 1053 sdp = per_cpu_ptr(sp->sda, cpu);
1027 raw_spin_lock_irq_rcu_node(sdp); 1054 spin_lock_irq_rcu_node(sdp);
1028 atomic_inc(&sp->srcu_barrier_cpu_cnt); 1055 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1029 sdp->srcu_barrier_head.func = srcu_barrier_cb; 1056 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1030 debug_rcu_head_queue(&sdp->srcu_barrier_head); 1057 debug_rcu_head_queue(&sdp->srcu_barrier_head);
@@ -1033,7 +1060,7 @@ void srcu_barrier(struct srcu_struct *sp)
1033 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1060 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1034 atomic_dec(&sp->srcu_barrier_cpu_cnt); 1061 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1035 } 1062 }
1036 raw_spin_unlock_irq_rcu_node(sdp); 1063 spin_unlock_irq_rcu_node(sdp);
1037 } 1064 }
1038 1065
1039 /* Remove the initial count, at which point reaching zero can happen. */ 1066 /* Remove the initial count, at which point reaching zero can happen. */
@@ -1082,17 +1109,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
1082 */ 1109 */
1083 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ 1110 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1084 if (idx == SRCU_STATE_IDLE) { 1111 if (idx == SRCU_STATE_IDLE) {
1085 raw_spin_lock_irq_rcu_node(sp); 1112 spin_lock_irq_rcu_node(sp);
1086 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1113 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1087 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); 1114 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1088 raw_spin_unlock_irq_rcu_node(sp); 1115 spin_unlock_irq_rcu_node(sp);
1089 mutex_unlock(&sp->srcu_gp_mutex); 1116 mutex_unlock(&sp->srcu_gp_mutex);
1090 return; 1117 return;
1091 } 1118 }
1092 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 1119 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1093 if (idx == SRCU_STATE_IDLE) 1120 if (idx == SRCU_STATE_IDLE)
1094 srcu_gp_start(sp); 1121 srcu_gp_start(sp);
1095 raw_spin_unlock_irq_rcu_node(sp); 1122 spin_unlock_irq_rcu_node(sp);
1096 if (idx != SRCU_STATE_IDLE) { 1123 if (idx != SRCU_STATE_IDLE) {
1097 mutex_unlock(&sp->srcu_gp_mutex); 1124 mutex_unlock(&sp->srcu_gp_mutex);
1098 return; /* Someone else started the grace period. */ 1125 return; /* Someone else started the grace period. */
@@ -1141,19 +1168,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
1141 sdp = container_of(work, struct srcu_data, work.work); 1168 sdp = container_of(work, struct srcu_data, work.work);
1142 sp = sdp->sp; 1169 sp = sdp->sp;
1143 rcu_cblist_init(&ready_cbs); 1170 rcu_cblist_init(&ready_cbs);
1144 raw_spin_lock_irq_rcu_node(sdp); 1171 spin_lock_irq_rcu_node(sdp);
1145 rcu_segcblist_advance(&sdp->srcu_cblist, 1172 rcu_segcblist_advance(&sdp->srcu_cblist,
1146 rcu_seq_current(&sp->srcu_gp_seq)); 1173 rcu_seq_current(&sp->srcu_gp_seq));
1147 if (sdp->srcu_cblist_invoking || 1174 if (sdp->srcu_cblist_invoking ||
1148 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1175 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1149 raw_spin_unlock_irq_rcu_node(sdp); 1176 spin_unlock_irq_rcu_node(sdp);
1150 return; /* Someone else on the job or nothing to do. */ 1177 return; /* Someone else on the job or nothing to do. */
1151 } 1178 }
1152 1179
1153 /* We are on the job! Extract and invoke ready callbacks. */ 1180 /* We are on the job! Extract and invoke ready callbacks. */
1154 sdp->srcu_cblist_invoking = true; 1181 sdp->srcu_cblist_invoking = true;
1155 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1182 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1156 raw_spin_unlock_irq_rcu_node(sdp); 1183 spin_unlock_irq_rcu_node(sdp);
1157 rhp = rcu_cblist_dequeue(&ready_cbs); 1184 rhp = rcu_cblist_dequeue(&ready_cbs);
1158 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1185 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1159 debug_rcu_head_unqueue(rhp); 1186 debug_rcu_head_unqueue(rhp);
@@ -1166,13 +1193,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
1166 * Update counts, accelerate new callbacks, and if needed, 1193 * Update counts, accelerate new callbacks, and if needed,
1167 * schedule another round of callback invocation. 1194 * schedule another round of callback invocation.
1168 */ 1195 */
1169 raw_spin_lock_irq_rcu_node(sdp); 1196 spin_lock_irq_rcu_node(sdp);
1170 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1197 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1171 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1198 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1172 rcu_seq_snap(&sp->srcu_gp_seq)); 1199 rcu_seq_snap(&sp->srcu_gp_seq));
1173 sdp->srcu_cblist_invoking = false; 1200 sdp->srcu_cblist_invoking = false;
1174 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1201 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1175 raw_spin_unlock_irq_rcu_node(sdp); 1202 spin_unlock_irq_rcu_node(sdp);
1176 if (more) 1203 if (more)
1177 srcu_schedule_cbs_sdp(sdp, 0); 1204 srcu_schedule_cbs_sdp(sdp, 0);
1178} 1205}
@@ -1185,7 +1212,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1185{ 1212{
1186 bool pushgp = true; 1213 bool pushgp = true;
1187 1214
1188 raw_spin_lock_irq_rcu_node(sp); 1215 spin_lock_irq_rcu_node(sp);
1189 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1216 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1190 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { 1217 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1191 /* All requests fulfilled, time to go idle. */ 1218 /* All requests fulfilled, time to go idle. */
@@ -1195,7 +1222,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1195 /* Outstanding request and no GP. Start one. */ 1222 /* Outstanding request and no GP. Start one. */
1196 srcu_gp_start(sp); 1223 srcu_gp_start(sp);
1197 } 1224 }
1198 raw_spin_unlock_irq_rcu_node(sp); 1225 spin_unlock_irq_rcu_node(sp);
1199 1226
1200 if (pushgp) 1227 if (pushgp)
1201 queue_delayed_work(system_power_efficient_wq, &sp->work, delay); 1228 queue_delayed_work(system_power_efficient_wq, &sp->work, delay);