aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--kernel/rcu/srcutree.c91
2 files changed, 47 insertions, 52 deletions
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 24e949bda12a..42973f787e7e 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
40 unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ 40 unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
41 41
42 /* Update-side state. */ 42 /* Update-side state. */
43 spinlock_t lock ____cacheline_internodealigned_in_smp; 43 raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
44 struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ 44 struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ 45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ 46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
58 * Node in SRCU combining tree, similar in function to rcu_data. 58 * Node in SRCU combining tree, similar in function to rcu_data.
59 */ 59 */
60struct srcu_node { 60struct srcu_node {
61 spinlock_t lock; 61 raw_spinlock_t __private lock;
62 unsigned long srcu_have_cbs[4]; /* GP seq for children */ 62 unsigned long srcu_have_cbs[4]; /* GP seq for children */
63 /* having CBs, but only */ 63 /* having CBs, but only */
64 /* is > ->srcu_gq_seq. */ 64 /* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
78 struct srcu_node *level[RCU_NUM_LVLS + 1]; 78 struct srcu_node *level[RCU_NUM_LVLS + 1];
79 /* First node at each level. */ 79 /* First node at each level. */
80 struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ 80 struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
81 spinlock_t gp_lock; /* protect ->srcu_cblist */ 81 raw_spinlock_t __private lock; /* Protect counters */
82 struct mutex srcu_gp_mutex; /* Serialize GP work. */ 82 struct mutex srcu_gp_mutex; /* Serialize GP work. */
83 unsigned int srcu_idx; /* Current rdr array element. */ 83 unsigned int srcu_idx; /* Current rdr array element. */
84 unsigned long srcu_gp_seq; /* Grace-period seq #. */ 84 unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -109,7 +109,7 @@ void process_srcu(struct work_struct *work);
109#define __SRCU_STRUCT_INIT(name) \ 109#define __SRCU_STRUCT_INIT(name) \
110 { \ 110 { \
111 .sda = &name##_srcu_data, \ 111 .sda = &name##_srcu_data, \
112 .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \ 112 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
113 .srcu_gp_seq_needed = 0 - 1, \ 113 .srcu_gp_seq_needed = 0 - 1, \
114 __SRCU_DEP_MAP_INIT(name) \ 114 __SRCU_DEP_MAP_INIT(name) \
115 } 115 }
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 66a998f9c5a7..d0ca524bf042 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -76,7 +76,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
76 76
77 /* Each pass through this loop initializes one srcu_node structure. */ 77 /* Each pass through this loop initializes one srcu_node structure. */
78 rcu_for_each_node_breadth_first(sp, snp) { 78 rcu_for_each_node_breadth_first(sp, snp) {
79 spin_lock_init(&snp->lock); 79 raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
80 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 80 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
81 ARRAY_SIZE(snp->srcu_data_have_cbs)); 81 ARRAY_SIZE(snp->srcu_data_have_cbs));
82 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 82 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
@@ -110,7 +110,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
110 snp_first = sp->level[level]; 110 snp_first = sp->level[level];
111 for_each_possible_cpu(cpu) { 111 for_each_possible_cpu(cpu) {
112 sdp = per_cpu_ptr(sp->sda, cpu); 112 sdp = per_cpu_ptr(sp->sda, cpu);
113 spin_lock_init(&sdp->lock); 113 raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
114 rcu_segcblist_init(&sdp->srcu_cblist); 114 rcu_segcblist_init(&sdp->srcu_cblist);
115 sdp->srcu_cblist_invoking = false; 115 sdp->srcu_cblist_invoking = false;
116 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; 116 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
@@ -169,7 +169,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
169 /* Don't re-initialize a lock while it is held. */ 169 /* Don't re-initialize a lock while it is held. */
170 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 170 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
171 lockdep_init_map(&sp->dep_map, name, key, 0); 171 lockdep_init_map(&sp->dep_map, name, key, 0);
172 spin_lock_init(&sp->gp_lock); 172 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
173 return init_srcu_struct_fields(sp, false); 173 return init_srcu_struct_fields(sp, false);
174} 174}
175EXPORT_SYMBOL_GPL(__init_srcu_struct); 175EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -186,7 +186,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
186 */ 186 */
187int init_srcu_struct(struct srcu_struct *sp) 187int init_srcu_struct(struct srcu_struct *sp)
188{ 188{
189 spin_lock_init(&sp->gp_lock); 189 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
190 return init_srcu_struct_fields(sp, false); 190 return init_srcu_struct_fields(sp, false);
191} 191}
192EXPORT_SYMBOL_GPL(init_srcu_struct); 192EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -197,7 +197,7 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
197 * First-use initialization of statically allocated srcu_struct 197 * First-use initialization of statically allocated srcu_struct
198 * structure. Wiring up the combining tree is more than can be 198 * structure. Wiring up the combining tree is more than can be
199 * done with compile-time initialization, so this check is added 199 * done with compile-time initialization, so this check is added
200 * to each update-side SRCU primitive. Use ->gp_lock, which -is- 200 * to each update-side SRCU primitive. Use sp->lock, which -is-
201 * compile-time initialized, to resolve races involving multiple 201 * compile-time initialized, to resolve races involving multiple
202 * CPUs trying to garner first-use privileges. 202 * CPUs trying to garner first-use privileges.
203 */ 203 */
@@ -209,13 +209,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
209 /* The smp_load_acquire() pairs with the smp_store_release(). */ 209 /* The smp_load_acquire() pairs with the smp_store_release(). */
210 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ 210 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
211 return; /* Already initialized. */ 211 return; /* Already initialized. */
212 spin_lock_irqsave(&sp->gp_lock, flags); 212 raw_spin_lock_irqsave_rcu_node(sp, flags);
213 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { 213 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
214 spin_unlock_irqrestore(&sp->gp_lock, flags); 214 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
215 return; 215 return;
216 } 216 }
217 init_srcu_struct_fields(sp, true); 217 init_srcu_struct_fields(sp, true);
218 spin_unlock_irqrestore(&sp->gp_lock, flags); 218 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
219} 219}
220 220
221/* 221/*
@@ -411,8 +411,7 @@ static void srcu_gp_start(struct srcu_struct *sp)
411 struct srcu_data *sdp = this_cpu_ptr(sp->sda); 411 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
412 int state; 412 int state;
413 413
414 RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock), 414 lockdep_assert_held(&sp->lock);
415 "Invoked srcu_gp_start() without ->gp_lock!");
416 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 415 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
417 rcu_segcblist_advance(&sdp->srcu_cblist, 416 rcu_segcblist_advance(&sdp->srcu_cblist,
418 rcu_seq_current(&sp->srcu_gp_seq)); 417 rcu_seq_current(&sp->srcu_gp_seq));
@@ -513,7 +512,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
513 mutex_lock(&sp->srcu_cb_mutex); 512 mutex_lock(&sp->srcu_cb_mutex);
514 513
515 /* End the current grace period. */ 514 /* End the current grace period. */
516 spin_lock_irq(&sp->gp_lock); 515 raw_spin_lock_irq_rcu_node(sp);
517 idx = rcu_seq_state(sp->srcu_gp_seq); 516 idx = rcu_seq_state(sp->srcu_gp_seq);
518 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 517 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
519 cbdelay = srcu_get_delay(sp); 518 cbdelay = srcu_get_delay(sp);
@@ -522,7 +521,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
522 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 521 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
523 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) 522 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
524 sp->srcu_gp_seq_needed_exp = gpseq; 523 sp->srcu_gp_seq_needed_exp = gpseq;
525 spin_unlock_irq(&sp->gp_lock); 524 raw_spin_unlock_irq_rcu_node(sp);
526 mutex_unlock(&sp->srcu_gp_mutex); 525 mutex_unlock(&sp->srcu_gp_mutex);
527 /* A new grace period can start at this point. But only one. */ 526 /* A new grace period can start at this point. But only one. */
528 527
@@ -530,7 +529,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
530 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 529 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
531 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); 530 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
532 rcu_for_each_node_breadth_first(sp, snp) { 531 rcu_for_each_node_breadth_first(sp, snp) {
533 spin_lock_irq(&snp->lock); 532 raw_spin_lock_irq_rcu_node(snp);
534 cbs = false; 533 cbs = false;
535 if (snp >= sp->level[rcu_num_lvls - 1]) 534 if (snp >= sp->level[rcu_num_lvls - 1])
536 cbs = snp->srcu_have_cbs[idx] == gpseq; 535 cbs = snp->srcu_have_cbs[idx] == gpseq;
@@ -540,21 +539,19 @@ static void srcu_gp_end(struct srcu_struct *sp)
540 snp->srcu_gp_seq_needed_exp = gpseq; 539 snp->srcu_gp_seq_needed_exp = gpseq;
541 mask = snp->srcu_data_have_cbs[idx]; 540 mask = snp->srcu_data_have_cbs[idx];
542 snp->srcu_data_have_cbs[idx] = 0; 541 snp->srcu_data_have_cbs[idx] = 0;
543 spin_unlock_irq(&snp->lock); 542 raw_spin_unlock_irq_rcu_node(snp);
544 if (cbs) { 543 if (cbs)
545 smp_mb(); /* GP end before CB invocation. */
546 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); 544 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
547 }
548 545
549 /* Occasionally prevent srcu_data counter wrap. */ 546 /* Occasionally prevent srcu_data counter wrap. */
550 if (!(gpseq & counter_wrap_check)) 547 if (!(gpseq & counter_wrap_check))
551 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 548 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
552 sdp = per_cpu_ptr(sp->sda, cpu); 549 sdp = per_cpu_ptr(sp->sda, cpu);
553 spin_lock_irqsave(&sdp->lock, flags); 550 raw_spin_lock_irqsave_rcu_node(sdp, flags);
554 if (ULONG_CMP_GE(gpseq, 551 if (ULONG_CMP_GE(gpseq,
555 sdp->srcu_gp_seq_needed + 100)) 552 sdp->srcu_gp_seq_needed + 100))
556 sdp->srcu_gp_seq_needed = gpseq; 553 sdp->srcu_gp_seq_needed = gpseq;
557 spin_unlock_irqrestore(&sdp->lock, flags); 554 raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
558 } 555 }
559 } 556 }
560 557
@@ -562,17 +559,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
562 mutex_unlock(&sp->srcu_cb_mutex); 559 mutex_unlock(&sp->srcu_cb_mutex);
563 560
564 /* Start a new grace period if needed. */ 561 /* Start a new grace period if needed. */
565 spin_lock_irq(&sp->gp_lock); 562 raw_spin_lock_irq_rcu_node(sp);
566 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 563 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
567 if (!rcu_seq_state(gpseq) && 564 if (!rcu_seq_state(gpseq) &&
568 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { 565 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
569 srcu_gp_start(sp); 566 srcu_gp_start(sp);
570 spin_unlock_irq(&sp->gp_lock); 567 raw_spin_unlock_irq_rcu_node(sp);
571 /* Throttle expedited grace periods: Should be rare! */ 568 /* Throttle expedited grace periods: Should be rare! */
572 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff 569 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
573 ? 0 : SRCU_INTERVAL); 570 ? 0 : SRCU_INTERVAL);
574 } else { 571 } else {
575 spin_unlock_irq(&sp->gp_lock); 572 raw_spin_unlock_irq_rcu_node(sp);
576 } 573 }
577} 574}
578 575
@@ -592,18 +589,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
592 if (rcu_seq_done(&sp->srcu_gp_seq, s) || 589 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
593 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 590 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
594 return; 591 return;
595 spin_lock_irqsave(&snp->lock, flags); 592 raw_spin_lock_irqsave_rcu_node(snp, flags);
596 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 593 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
597 spin_unlock_irqrestore(&snp->lock, flags); 594 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
598 return; 595 return;
599 } 596 }
600 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 597 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
601 spin_unlock_irqrestore(&snp->lock, flags); 598 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
602 } 599 }
603 spin_lock_irqsave(&sp->gp_lock, flags); 600 raw_spin_lock_irqsave_rcu_node(sp, flags);
604 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 601 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
605 sp->srcu_gp_seq_needed_exp = s; 602 sp->srcu_gp_seq_needed_exp = s;
606 spin_unlock_irqrestore(&sp->gp_lock, flags); 603 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
607} 604}
608 605
609/* 606/*
@@ -625,14 +622,13 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
625 for (; snp != NULL; snp = snp->srcu_parent) { 622 for (; snp != NULL; snp = snp->srcu_parent) {
626 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) 623 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
627 return; /* GP already done and CBs recorded. */ 624 return; /* GP already done and CBs recorded. */
628 spin_lock_irqsave(&snp->lock, flags); 625 raw_spin_lock_irqsave_rcu_node(snp, flags);
629 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 626 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
630 snp_seq = snp->srcu_have_cbs[idx]; 627 snp_seq = snp->srcu_have_cbs[idx];
631 if (snp == sdp->mynode && snp_seq == s) 628 if (snp == sdp->mynode && snp_seq == s)
632 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 629 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
633 spin_unlock_irqrestore(&snp->lock, flags); 630 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
634 if (snp == sdp->mynode && snp_seq != s) { 631 if (snp == sdp->mynode && snp_seq != s) {
635 smp_mb(); /* CBs after GP! */
636 srcu_schedule_cbs_sdp(sdp, do_norm 632 srcu_schedule_cbs_sdp(sdp, do_norm
637 ? SRCU_INTERVAL 633 ? SRCU_INTERVAL
638 : 0); 634 : 0);
@@ -647,11 +643,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
647 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 643 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
648 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 644 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
649 snp->srcu_gp_seq_needed_exp = s; 645 snp->srcu_gp_seq_needed_exp = s;
650 spin_unlock_irqrestore(&snp->lock, flags); 646 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
651 } 647 }
652 648
653 /* Top of tree, must ensure the grace period will be started. */ 649 /* Top of tree, must ensure the grace period will be started. */
654 spin_lock_irqsave(&sp->gp_lock, flags); 650 raw_spin_lock_irqsave_rcu_node(sp, flags);
655 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { 651 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
656 /* 652 /*
657 * Record need for grace period s. Pair with load 653 * Record need for grace period s. Pair with load
@@ -670,7 +666,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
670 queue_delayed_work(system_power_efficient_wq, &sp->work, 666 queue_delayed_work(system_power_efficient_wq, &sp->work,
671 srcu_get_delay(sp)); 667 srcu_get_delay(sp));
672 } 668 }
673 spin_unlock_irqrestore(&sp->gp_lock, flags); 669 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
674} 670}
675 671
676/* 672/*
@@ -833,7 +829,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
833 rhp->func = func; 829 rhp->func = func;
834 local_irq_save(flags); 830 local_irq_save(flags);
835 sdp = this_cpu_ptr(sp->sda); 831 sdp = this_cpu_ptr(sp->sda);
836 spin_lock(&sdp->lock); 832 raw_spin_lock_rcu_node(sdp);
837 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 833 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
838 rcu_segcblist_advance(&sdp->srcu_cblist, 834 rcu_segcblist_advance(&sdp->srcu_cblist,
839 rcu_seq_current(&sp->srcu_gp_seq)); 835 rcu_seq_current(&sp->srcu_gp_seq));
@@ -847,7 +843,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
847 sdp->srcu_gp_seq_needed_exp = s; 843 sdp->srcu_gp_seq_needed_exp = s;
848 needexp = true; 844 needexp = true;
849 } 845 }
850 spin_unlock_irqrestore(&sdp->lock, flags); 846 raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
851 if (needgp) 847 if (needgp)
852 srcu_funnel_gp_start(sp, sdp, s, do_norm); 848 srcu_funnel_gp_start(sp, sdp, s, do_norm);
853 else if (needexp) 849 else if (needexp)
@@ -1018,7 +1014,7 @@ void srcu_barrier(struct srcu_struct *sp)
1018 */ 1014 */
1019 for_each_possible_cpu(cpu) { 1015 for_each_possible_cpu(cpu) {
1020 sdp = per_cpu_ptr(sp->sda, cpu); 1016 sdp = per_cpu_ptr(sp->sda, cpu);
1021 spin_lock_irq(&sdp->lock); 1017 raw_spin_lock_irq_rcu_node(sdp);
1022 atomic_inc(&sp->srcu_barrier_cpu_cnt); 1018 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1023 sdp->srcu_barrier_head.func = srcu_barrier_cb; 1019 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1024 debug_rcu_head_queue(&sdp->srcu_barrier_head); 1020 debug_rcu_head_queue(&sdp->srcu_barrier_head);
@@ -1027,7 +1023,7 @@ void srcu_barrier(struct srcu_struct *sp)
1027 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1023 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1028 atomic_dec(&sp->srcu_barrier_cpu_cnt); 1024 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1029 } 1025 }
1030 spin_unlock_irq(&sdp->lock); 1026 raw_spin_unlock_irq_rcu_node(sdp);
1031 } 1027 }
1032 1028
1033 /* Remove the initial count, at which point reaching zero can happen. */ 1029 /* Remove the initial count, at which point reaching zero can happen. */
@@ -1076,17 +1072,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
1076 */ 1072 */
1077 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ 1073 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1078 if (idx == SRCU_STATE_IDLE) { 1074 if (idx == SRCU_STATE_IDLE) {
1079 spin_lock_irq(&sp->gp_lock); 1075 raw_spin_lock_irq_rcu_node(sp);
1080 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1076 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1081 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); 1077 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1082 spin_unlock_irq(&sp->gp_lock); 1078 raw_spin_unlock_irq_rcu_node(sp);
1083 mutex_unlock(&sp->srcu_gp_mutex); 1079 mutex_unlock(&sp->srcu_gp_mutex);
1084 return; 1080 return;
1085 } 1081 }
1086 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 1082 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1087 if (idx == SRCU_STATE_IDLE) 1083 if (idx == SRCU_STATE_IDLE)
1088 srcu_gp_start(sp); 1084 srcu_gp_start(sp);
1089 spin_unlock_irq(&sp->gp_lock); 1085 raw_spin_unlock_irq_rcu_node(sp);
1090 if (idx != SRCU_STATE_IDLE) { 1086 if (idx != SRCU_STATE_IDLE) {
1091 mutex_unlock(&sp->srcu_gp_mutex); 1087 mutex_unlock(&sp->srcu_gp_mutex);
1092 return; /* Someone else started the grace period. */ 1088 return; /* Someone else started the grace period. */
@@ -1135,20 +1131,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
1135 sdp = container_of(work, struct srcu_data, work.work); 1131 sdp = container_of(work, struct srcu_data, work.work);
1136 sp = sdp->sp; 1132 sp = sdp->sp;
1137 rcu_cblist_init(&ready_cbs); 1133 rcu_cblist_init(&ready_cbs);
1138 spin_lock_irq(&sdp->lock); 1134 raw_spin_lock_irq_rcu_node(sdp);
1139 smp_mb(); /* Old grace periods before callback invocation! */
1140 rcu_segcblist_advance(&sdp->srcu_cblist, 1135 rcu_segcblist_advance(&sdp->srcu_cblist,
1141 rcu_seq_current(&sp->srcu_gp_seq)); 1136 rcu_seq_current(&sp->srcu_gp_seq));
1142 if (sdp->srcu_cblist_invoking || 1137 if (sdp->srcu_cblist_invoking ||
1143 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1138 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1144 spin_unlock_irq(&sdp->lock); 1139 raw_spin_unlock_irq_rcu_node(sdp);
1145 return; /* Someone else on the job or nothing to do. */ 1140 return; /* Someone else on the job or nothing to do. */
1146 } 1141 }
1147 1142
1148 /* We are on the job! Extract and invoke ready callbacks. */ 1143 /* We are on the job! Extract and invoke ready callbacks. */
1149 sdp->srcu_cblist_invoking = true; 1144 sdp->srcu_cblist_invoking = true;
1150 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1145 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1151 spin_unlock_irq(&sdp->lock); 1146 raw_spin_unlock_irq_rcu_node(sdp);
1152 rhp = rcu_cblist_dequeue(&ready_cbs); 1147 rhp = rcu_cblist_dequeue(&ready_cbs);
1153 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1148 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1154 debug_rcu_head_unqueue(rhp); 1149 debug_rcu_head_unqueue(rhp);
@@ -1161,13 +1156,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
1161 * Update counts, accelerate new callbacks, and if needed, 1156 * Update counts, accelerate new callbacks, and if needed,
1162 * schedule another round of callback invocation. 1157 * schedule another round of callback invocation.
1163 */ 1158 */
1164 spin_lock_irq(&sdp->lock); 1159 raw_spin_lock_irq_rcu_node(sdp);
1165 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1160 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1166 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1161 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1167 rcu_seq_snap(&sp->srcu_gp_seq)); 1162 rcu_seq_snap(&sp->srcu_gp_seq));
1168 sdp->srcu_cblist_invoking = false; 1163 sdp->srcu_cblist_invoking = false;
1169 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1164 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1170 spin_unlock_irq(&sdp->lock); 1165 raw_spin_unlock_irq_rcu_node(sdp);
1171 if (more) 1166 if (more)
1172 srcu_schedule_cbs_sdp(sdp, 0); 1167 srcu_schedule_cbs_sdp(sdp, 0);
1173} 1168}
@@ -1180,7 +1175,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1180{ 1175{
1181 bool pushgp = true; 1176 bool pushgp = true;
1182 1177
1183 spin_lock_irq(&sp->gp_lock); 1178 raw_spin_lock_irq_rcu_node(sp);
1184 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1179 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1185 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { 1180 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1186 /* All requests fulfilled, time to go idle. */ 1181 /* All requests fulfilled, time to go idle. */
@@ -1190,7 +1185,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1190 /* Outstanding request and no GP. Start one. */ 1185 /* Outstanding request and no GP. Start one. */
1191 srcu_gp_start(sp); 1186 srcu_gp_start(sp);
1192 } 1187 }
1193 spin_unlock_irq(&sp->gp_lock); 1188 raw_spin_unlock_irq_rcu_node(sp);
1194 1189
1195 if (pushgp) 1190 if (pushgp)
1196 queue_delayed_work(system_power_efficient_wq, &sp->work, delay); 1191 queue_delayed_work(system_power_efficient_wq, &sp->work, delay);