diff options
author | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 19:55:28 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 19:55:28 -0500 |
commit | 84abd88a70090cf00f9e45c3a81680874f17626e (patch) | |
tree | 4f58b80057f6e1f5817af1dc33a5458b3dfc9a99 /kernel/rcutree.c | |
parent | 13ca0fcaa33f6b1984c4111b6ec5df42689fea6f (diff) | |
parent | e28cab42f384745c8a947a9ccd51e4aae52f5d51 (diff) |
Merge remote branch 'linus/master' into x86/bootmem
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 493 |
1 files changed, 342 insertions, 151 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 705f02ac7433..53ae9598f798 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,20 +46,24 @@ | |||
46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
49 | 50 | ||
50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
51 | 52 | ||
52 | /* Data structures. */ | 53 | /* Data structures. */ |
53 | 54 | ||
55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | ||
56 | |||
54 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(name) { \ |
55 | .level = { &name.node[0] }, \ | 58 | .level = { &name.node[0] }, \ |
56 | .levelcnt = { \ | 59 | .levelcnt = { \ |
57 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
58 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
59 | NUM_RCU_LVL_2, \ | 62 | NUM_RCU_LVL_2, \ |
60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 63 | NUM_RCU_LVL_3, \ |
64 | NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ | ||
61 | }, \ | 65 | }, \ |
62 | .signaled = RCU_SIGNAL_INIT, \ | 66 | .signaled = RCU_GP_IDLE, \ |
63 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
64 | .completed = -300, \ | 68 | .completed = -300, \ |
65 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 69 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ |
@@ -77,6 +81,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
77 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
78 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
79 | 83 | ||
84 | static int rcu_scheduler_active __read_mostly; | ||
85 | |||
80 | 86 | ||
81 | /* | 87 | /* |
82 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 88 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
@@ -98,7 +104,7 @@ void rcu_sched_qs(int cpu) | |||
98 | struct rcu_data *rdp; | 104 | struct rcu_data *rdp; |
99 | 105 | ||
100 | rdp = &per_cpu(rcu_sched_data, cpu); | 106 | rdp = &per_cpu(rcu_sched_data, cpu); |
101 | rdp->passed_quiesc_completed = rdp->completed; | 107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
102 | barrier(); | 108 | barrier(); |
103 | rdp->passed_quiesc = 1; | 109 | rdp->passed_quiesc = 1; |
104 | rcu_preempt_note_context_switch(cpu); | 110 | rcu_preempt_note_context_switch(cpu); |
@@ -109,7 +115,7 @@ void rcu_bh_qs(int cpu) | |||
109 | struct rcu_data *rdp; | 115 | struct rcu_data *rdp; |
110 | 116 | ||
111 | rdp = &per_cpu(rcu_bh_data, cpu); | 117 | rdp = &per_cpu(rcu_bh_data, cpu); |
112 | rdp->passed_quiesc_completed = rdp->completed; | 118 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
113 | barrier(); | 119 | barrier(); |
114 | rdp->passed_quiesc = 1; | 120 | rdp->passed_quiesc = 1; |
115 | } | 121 | } |
@@ -335,28 +341,9 @@ void rcu_irq_exit(void) | |||
335 | set_need_resched(); | 341 | set_need_resched(); |
336 | } | 342 | } |
337 | 343 | ||
338 | /* | ||
339 | * Record the specified "completed" value, which is later used to validate | ||
340 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
341 | * unconditionally invalidate any future dynticks manipulations (which is | ||
342 | * useful at the beginning of a grace period). | ||
343 | */ | ||
344 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
345 | { | ||
346 | rsp->dynticks_completed = comp; | ||
347 | } | ||
348 | |||
349 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
350 | 345 | ||
351 | /* | 346 | /* |
352 | * Recall the previously recorded value of the completion for dynticks. | ||
353 | */ | ||
354 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
355 | { | ||
356 | return rsp->dynticks_completed; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Snapshot the specified CPU's dynticks counter so that we can later | 347 | * Snapshot the specified CPU's dynticks counter so that we can later |
361 | * credit them with an implicit quiescent state. Return 1 if this CPU | 348 | * credit them with an implicit quiescent state. Return 1 if this CPU |
362 | * is in dynticks idle mode, which is an extended quiescent state. | 349 | * is in dynticks idle mode, which is an extended quiescent state. |
@@ -419,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
419 | 406 | ||
420 | #else /* #ifdef CONFIG_NO_HZ */ | 407 | #else /* #ifdef CONFIG_NO_HZ */ |
421 | 408 | ||
422 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
423 | { | ||
424 | } | ||
425 | |||
426 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
427 | 410 | ||
428 | /* | ||
429 | * If there are no dynticks, then the only way that a CPU can passively | ||
430 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
431 | * is a point in time during the prior (already finished) grace period, | ||
432 | * an offline CPU is always in a quiescent state, and thus can be | ||
433 | * unconditionally applied. So just return the current value of completed. | ||
434 | */ | ||
435 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
436 | { | ||
437 | return rsp->completed; | ||
438 | } | ||
439 | |||
440 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 411 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
441 | { | 412 | { |
442 | return 0; | 413 | return 0; |
@@ -553,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
553 | /* | 524 | /* |
554 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 525 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
555 | * This is used both when we started the grace period and when we notice | 526 | * This is used both when we started the grace period and when we notice |
556 | * that someone else started the grace period. | 527 | * that someone else started the grace period. The caller must hold the |
528 | * ->lock of the leaf rcu_node structure corresponding to the current CPU, | ||
529 | * and must have irqs disabled. | ||
557 | */ | 530 | */ |
531 | static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
532 | { | ||
533 | if (rdp->gpnum != rnp->gpnum) { | ||
534 | rdp->qs_pending = 1; | ||
535 | rdp->passed_quiesc = 0; | ||
536 | rdp->gpnum = rnp->gpnum; | ||
537 | } | ||
538 | } | ||
539 | |||
558 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | 540 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) |
559 | { | 541 | { |
560 | rdp->qs_pending = 1; | 542 | unsigned long flags; |
561 | rdp->passed_quiesc = 0; | 543 | struct rcu_node *rnp; |
562 | rdp->gpnum = rsp->gpnum; | 544 | |
545 | local_irq_save(flags); | ||
546 | rnp = rdp->mynode; | ||
547 | if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ | ||
548 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
549 | local_irq_restore(flags); | ||
550 | return; | ||
551 | } | ||
552 | __note_new_gpnum(rsp, rnp, rdp); | ||
553 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
563 | } | 554 | } |
564 | 555 | ||
565 | /* | 556 | /* |
@@ -583,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | |||
583 | } | 574 | } |
584 | 575 | ||
585 | /* | 576 | /* |
577 | * Advance this CPU's callbacks, but only if the current grace period | ||
578 | * has ended. This may be called only from the CPU to whom the rdp | ||
579 | * belongs. In addition, the corresponding leaf rcu_node structure's | ||
580 | * ->lock must be held by the caller, with irqs disabled. | ||
581 | */ | ||
582 | static void | ||
583 | __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
584 | { | ||
585 | /* Did another grace period end? */ | ||
586 | if (rdp->completed != rnp->completed) { | ||
587 | |||
588 | /* Advance callbacks. No harm if list empty. */ | ||
589 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
590 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
591 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
592 | |||
593 | /* Remember that we saw this grace-period completion. */ | ||
594 | rdp->completed = rnp->completed; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Advance this CPU's callbacks, but only if the current grace period | ||
600 | * has ended. This may be called only from the CPU to whom the rdp | ||
601 | * belongs. | ||
602 | */ | ||
603 | static void | ||
604 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
605 | { | ||
606 | unsigned long flags; | ||
607 | struct rcu_node *rnp; | ||
608 | |||
609 | local_irq_save(flags); | ||
610 | rnp = rdp->mynode; | ||
611 | if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ | ||
612 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
613 | local_irq_restore(flags); | ||
614 | return; | ||
615 | } | ||
616 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
617 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Do per-CPU grace-period initialization for running CPU. The caller | ||
622 | * must hold the lock of the leaf rcu_node structure corresponding to | ||
623 | * this CPU. | ||
624 | */ | ||
625 | static void | ||
626 | rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
627 | { | ||
628 | /* Prior grace period ended, so advance callbacks for current CPU. */ | ||
629 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
630 | |||
631 | /* | ||
632 | * Because this CPU just now started the new grace period, we know | ||
633 | * that all of its callbacks will be covered by this upcoming grace | ||
634 | * period, even the ones that were registered arbitrarily recently. | ||
635 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
636 | * | ||
637 | * Other CPUs cannot be sure exactly when the grace period started. | ||
638 | * Therefore, their recently registered callbacks must pass through | ||
639 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
640 | * by the next RCU grace period. | ||
641 | */ | ||
642 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
643 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
644 | |||
645 | /* Set state so that this CPU will detect the next quiescent state. */ | ||
646 | __note_new_gpnum(rsp, rnp, rdp); | ||
647 | } | ||
648 | |||
649 | /* | ||
586 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | 650 | * Start a new RCU grace period if warranted, re-initializing the hierarchy |
587 | * in preparation for detecting the next grace period. The caller must hold | 651 | * in preparation for detecting the next grace period. The caller must hold |
588 | * the root node's ->lock, which is released before return. Hard irqs must | 652 | * the root node's ->lock, which is released before return. Hard irqs must |
@@ -596,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
596 | struct rcu_node *rnp = rcu_get_root(rsp); | 660 | struct rcu_node *rnp = rcu_get_root(rsp); |
597 | 661 | ||
598 | if (!cpu_needs_another_gp(rsp, rdp)) { | 662 | if (!cpu_needs_another_gp(rsp, rdp)) { |
599 | spin_unlock_irqrestore(&rnp->lock, flags); | 663 | if (rnp->completed == rsp->completed) { |
664 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
665 | return; | ||
666 | } | ||
667 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
668 | |||
669 | /* | ||
670 | * Propagate new ->completed value to rcu_node structures | ||
671 | * so that other CPUs don't have to wait until the start | ||
672 | * of the next grace period to process their callbacks. | ||
673 | */ | ||
674 | rcu_for_each_node_breadth_first(rsp, rnp) { | ||
675 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
676 | rnp->completed = rsp->completed; | ||
677 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
678 | } | ||
679 | local_irq_restore(flags); | ||
600 | return; | 680 | return; |
601 | } | 681 | } |
602 | 682 | ||
@@ -606,29 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
606 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 686 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
607 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 687 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
608 | record_gp_stall_check_time(rsp); | 688 | record_gp_stall_check_time(rsp); |
609 | dyntick_record_completed(rsp, rsp->completed - 1); | ||
610 | note_new_gpnum(rsp, rdp); | ||
611 | |||
612 | /* | ||
613 | * Because this CPU just now started the new grace period, we know | ||
614 | * that all of its callbacks will be covered by this upcoming grace | ||
615 | * period, even the ones that were registered arbitrarily recently. | ||
616 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
617 | * | ||
618 | * Other CPUs cannot be sure exactly when the grace period started. | ||
619 | * Therefore, their recently registered callbacks must pass through | ||
620 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
621 | * by the next RCU grace period. | ||
622 | */ | ||
623 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
624 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
625 | 689 | ||
626 | /* Special-case the common single-level case. */ | 690 | /* Special-case the common single-level case. */ |
627 | if (NUM_RCU_NODES == 1) { | 691 | if (NUM_RCU_NODES == 1) { |
628 | rcu_preempt_check_blocked_tasks(rnp); | 692 | rcu_preempt_check_blocked_tasks(rnp); |
629 | rnp->qsmask = rnp->qsmaskinit; | 693 | rnp->qsmask = rnp->qsmaskinit; |
630 | rnp->gpnum = rsp->gpnum; | 694 | rnp->gpnum = rsp->gpnum; |
695 | rnp->completed = rsp->completed; | ||
631 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 696 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
697 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
632 | spin_unlock_irqrestore(&rnp->lock, flags); | 698 | spin_unlock_irqrestore(&rnp->lock, flags); |
633 | return; | 699 | return; |
634 | } | 700 | } |
@@ -657,69 +723,50 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
657 | * irqs disabled. | 723 | * irqs disabled. |
658 | */ | 724 | */ |
659 | rcu_for_each_node_breadth_first(rsp, rnp) { | 725 | rcu_for_each_node_breadth_first(rsp, rnp) { |
660 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 726 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
661 | rcu_preempt_check_blocked_tasks(rnp); | 727 | rcu_preempt_check_blocked_tasks(rnp); |
662 | rnp->qsmask = rnp->qsmaskinit; | 728 | rnp->qsmask = rnp->qsmaskinit; |
663 | rnp->gpnum = rsp->gpnum; | 729 | rnp->gpnum = rsp->gpnum; |
664 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 730 | rnp->completed = rsp->completed; |
731 | if (rnp == rdp->mynode) | ||
732 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
733 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
665 | } | 734 | } |
666 | 735 | ||
736 | rnp = rcu_get_root(rsp); | ||
737 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
667 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | 738 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
739 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
668 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 740 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
669 | } | 741 | } |
670 | 742 | ||
671 | /* | 743 | /* |
672 | * Advance this CPU's callbacks, but only if the current grace period | 744 | * Report a full set of quiescent states to the specified rcu_state |
673 | * has ended. This may be called only from the CPU to whom the rdp | 745 | * data structure. This involves cleaning up after the prior grace |
674 | * belongs. | 746 | * period and letting rcu_start_gp() start up the next grace period |
675 | */ | 747 | * if one is needed. Note that the caller must hold rnp->lock, as |
676 | static void | 748 | * required by rcu_start_gp(), which will release it. |
677 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
678 | { | ||
679 | long completed_snap; | ||
680 | unsigned long flags; | ||
681 | |||
682 | local_irq_save(flags); | ||
683 | completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | ||
684 | |||
685 | /* Did another grace period end? */ | ||
686 | if (rdp->completed != completed_snap) { | ||
687 | |||
688 | /* Advance callbacks. No harm if list empty. */ | ||
689 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
690 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
691 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
692 | |||
693 | /* Remember that we saw this grace-period completion. */ | ||
694 | rdp->completed = completed_snap; | ||
695 | } | ||
696 | local_irq_restore(flags); | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
701 | * the next grace period if one is needed. Note that the caller must | ||
702 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
703 | */ | 749 | */ |
704 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | 750 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
705 | __releases(rcu_get_root(rsp)->lock) | 751 | __releases(rcu_get_root(rsp)->lock) |
706 | { | 752 | { |
707 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 753 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
708 | rsp->completed = rsp->gpnum; | 754 | rsp->completed = rsp->gpnum; |
709 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | 755 | rsp->signaled = RCU_GP_IDLE; |
710 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 756 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
711 | } | 757 | } |
712 | 758 | ||
713 | /* | 759 | /* |
714 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 760 | * Similar to rcu_report_qs_rdp(), for which it is a helper function. |
715 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 761 | * Allows quiescent states for a group of CPUs to be reported at one go |
716 | * group must be represented by the same leaf rcu_node structure. | 762 | * to the specified rcu_node structure, though all the CPUs in the group |
717 | * That structure's lock must be held upon entry, and it is released | 763 | * must be represented by the same rcu_node structure (which need not be |
718 | * before return. | 764 | * a leaf rcu_node structure, though it often will be). That structure's |
765 | * lock must be held upon entry, and it is released before return. | ||
719 | */ | 766 | */ |
720 | static void | 767 | static void |
721 | cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | 768 | rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, |
722 | unsigned long flags) | 769 | struct rcu_node *rnp, unsigned long flags) |
723 | __releases(rnp->lock) | 770 | __releases(rnp->lock) |
724 | { | 771 | { |
725 | struct rcu_node *rnp_c; | 772 | struct rcu_node *rnp_c; |
@@ -755,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
755 | 802 | ||
756 | /* | 803 | /* |
757 | * Get here if we are the last CPU to pass through a quiescent | 804 | * Get here if we are the last CPU to pass through a quiescent |
758 | * state for this grace period. Invoke cpu_quiet_msk_finish() | 805 | * state for this grace period. Invoke rcu_report_qs_rsp() |
759 | * to clean up and start the next grace period if one is needed. | 806 | * to clean up and start the next grace period if one is needed. |
760 | */ | 807 | */ |
761 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ | 808 | rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ |
762 | } | 809 | } |
763 | 810 | ||
764 | /* | 811 | /* |
765 | * Record a quiescent state for the specified CPU, which must either be | 812 | * Record a quiescent state for the specified CPU to that CPU's rcu_data |
766 | * the current CPU. The lastcomp argument is used to make sure we are | 813 | * structure. This must be either called from the specified CPU, or |
767 | * still in the grace period of interest. We don't want to end the current | 814 | * called when the specified CPU is known to be offline (and when it is |
768 | * grace period based on quiescent states detected in an earlier grace | 815 | * also known that no other CPU is concurrently trying to help the offline |
769 | * period! | 816 | * CPU). The lastcomp argument is used to make sure we are still in the |
817 | * grace period of interest. We don't want to end the current grace period | ||
818 | * based on quiescent states detected in an earlier grace period! | ||
770 | */ | 819 | */ |
771 | static void | 820 | static void |
772 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | 821 | rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) |
773 | { | 822 | { |
774 | unsigned long flags; | 823 | unsigned long flags; |
775 | unsigned long mask; | 824 | unsigned long mask; |
@@ -777,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
777 | 826 | ||
778 | rnp = rdp->mynode; | 827 | rnp = rdp->mynode; |
779 | spin_lock_irqsave(&rnp->lock, flags); | 828 | spin_lock_irqsave(&rnp->lock, flags); |
780 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | 829 | if (lastcomp != rnp->completed) { |
781 | 830 | ||
782 | /* | 831 | /* |
783 | * Someone beat us to it for this grace period, so leave. | 832 | * Someone beat us to it for this grace period, so leave. |
784 | * The race with GP start is resolved by the fact that we | 833 | * The race with GP start is resolved by the fact that we |
785 | * hold the leaf rcu_node lock, so that the per-CPU bits | 834 | * hold the leaf rcu_node lock, so that the per-CPU bits |
786 | * cannot yet be initialized -- so we would simply find our | 835 | * cannot yet be initialized -- so we would simply find our |
787 | * CPU's bit already cleared in cpu_quiet_msk() if this race | 836 | * CPU's bit already cleared in rcu_report_qs_rnp() if this |
788 | * occurred. | 837 | * race occurred. |
789 | */ | 838 | */ |
790 | rdp->passed_quiesc = 0; /* try again later! */ | 839 | rdp->passed_quiesc = 0; /* try again later! */ |
791 | spin_unlock_irqrestore(&rnp->lock, flags); | 840 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -803,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
803 | */ | 852 | */ |
804 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 853 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
805 | 854 | ||
806 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | 855 | rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
807 | } | 856 | } |
808 | } | 857 | } |
809 | 858 | ||
@@ -834,8 +883,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
834 | if (!rdp->passed_quiesc) | 883 | if (!rdp->passed_quiesc) |
835 | return; | 884 | return; |
836 | 885 | ||
837 | /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ | 886 | /* |
838 | cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | 887 | * Tell RCU we are done (but rcu_report_qs_rdp() will be the |
888 | * judge of that). | ||
889 | */ | ||
890 | rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | ||
839 | } | 891 | } |
840 | 892 | ||
841 | #ifdef CONFIG_HOTPLUG_CPU | 893 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -895,8 +947,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
895 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 947 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
896 | { | 948 | { |
897 | unsigned long flags; | 949 | unsigned long flags; |
898 | long lastcomp; | ||
899 | unsigned long mask; | 950 | unsigned long mask; |
951 | int need_report = 0; | ||
900 | struct rcu_data *rdp = rsp->rda[cpu]; | 952 | struct rcu_data *rdp = rsp->rda[cpu]; |
901 | struct rcu_node *rnp; | 953 | struct rcu_node *rnp; |
902 | 954 | ||
@@ -910,17 +962,32 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
910 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 962 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
911 | rnp->qsmaskinit &= ~mask; | 963 | rnp->qsmaskinit &= ~mask; |
912 | if (rnp->qsmaskinit != 0) { | 964 | if (rnp->qsmaskinit != 0) { |
913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 965 | if (rnp != rdp->mynode) |
966 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
914 | break; | 967 | break; |
915 | } | 968 | } |
916 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 969 | if (rnp == rdp->mynode) |
970 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); | ||
971 | else | ||
972 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
917 | mask = rnp->grpmask; | 973 | mask = rnp->grpmask; |
918 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
919 | rnp = rnp->parent; | 974 | rnp = rnp->parent; |
920 | } while (rnp != NULL); | 975 | } while (rnp != NULL); |
921 | lastcomp = rsp->completed; | ||
922 | 976 | ||
923 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 977 | /* |
978 | * We still hold the leaf rcu_node structure lock here, and | ||
979 | * irqs are still disabled. The reason for this subterfuge is | ||
980 | * because invoking rcu_report_unblock_qs_rnp() with ->onofflock | ||
981 | * held leads to deadlock. | ||
982 | */ | ||
983 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
984 | rnp = rdp->mynode; | ||
985 | if (need_report & RCU_OFL_TASKS_NORM_GP) | ||
986 | rcu_report_unblock_qs_rnp(rnp, flags); | ||
987 | else | ||
988 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
989 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
990 | rcu_report_exp_rnp(rsp, rnp); | ||
924 | 991 | ||
925 | rcu_adopt_orphan_cbs(rsp); | 992 | rcu_adopt_orphan_cbs(rsp); |
926 | } | 993 | } |
@@ -958,7 +1025,7 @@ static void rcu_offline_cpu(int cpu) | |||
958 | * Invoke any RCU callbacks that have made it to the end of their grace | 1025 | * Invoke any RCU callbacks that have made it to the end of their grace |
959 | * period. Thottle as specified by rdp->blimit. | 1026 | * period. Thottle as specified by rdp->blimit. |
960 | */ | 1027 | */ |
961 | static void rcu_do_batch(struct rcu_data *rdp) | 1028 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
962 | { | 1029 | { |
963 | unsigned long flags; | 1030 | unsigned long flags; |
964 | struct rcu_head *next, *list, **tail; | 1031 | struct rcu_head *next, *list, **tail; |
@@ -1011,6 +1078,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
1011 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1078 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
1012 | rdp->blimit = blimit; | 1079 | rdp->blimit = blimit; |
1013 | 1080 | ||
1081 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1082 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1083 | rdp->qlen_last_fqs_check = 0; | ||
1084 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1085 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1086 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1087 | |||
1014 | local_irq_restore(flags); | 1088 | local_irq_restore(flags); |
1015 | 1089 | ||
1016 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1090 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -1085,7 +1159,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1085 | rcu_for_each_leaf_node(rsp, rnp) { | 1159 | rcu_for_each_leaf_node(rsp, rnp) { |
1086 | mask = 0; | 1160 | mask = 0; |
1087 | spin_lock_irqsave(&rnp->lock, flags); | 1161 | spin_lock_irqsave(&rnp->lock, flags); |
1088 | if (rsp->completed != lastcomp) { | 1162 | if (rnp->completed != lastcomp) { |
1089 | spin_unlock_irqrestore(&rnp->lock, flags); | 1163 | spin_unlock_irqrestore(&rnp->lock, flags); |
1090 | return 1; | 1164 | return 1; |
1091 | } | 1165 | } |
@@ -1099,10 +1173,10 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1099 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1173 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) |
1100 | mask |= bit; | 1174 | mask |= bit; |
1101 | } | 1175 | } |
1102 | if (mask != 0 && rsp->completed == lastcomp) { | 1176 | if (mask != 0 && rnp->completed == lastcomp) { |
1103 | 1177 | ||
1104 | /* cpu_quiet_msk() releases rnp->lock. */ | 1178 | /* rcu_report_qs_rnp() releases rnp->lock. */ |
1105 | cpu_quiet_msk(mask, rsp, rnp, flags); | 1179 | rcu_report_qs_rnp(mask, rsp, rnp, flags); |
1106 | continue; | 1180 | continue; |
1107 | } | 1181 | } |
1108 | spin_unlock_irqrestore(&rnp->lock, flags); | 1182 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1120,6 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1120 | long lastcomp; | 1194 | long lastcomp; |
1121 | struct rcu_node *rnp = rcu_get_root(rsp); | 1195 | struct rcu_node *rnp = rcu_get_root(rsp); |
1122 | u8 signaled; | 1196 | u8 signaled; |
1197 | u8 forcenow; | ||
1123 | 1198 | ||
1124 | if (!rcu_gp_in_progress(rsp)) | 1199 | if (!rcu_gp_in_progress(rsp)) |
1125 | return; /* No grace period in progress, nothing to force. */ | 1200 | return; /* No grace period in progress, nothing to force. */ |
@@ -1132,19 +1207,20 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1132 | goto unlock_ret; /* no emergency and done recently. */ | 1207 | goto unlock_ret; /* no emergency and done recently. */ |
1133 | rsp->n_force_qs++; | 1208 | rsp->n_force_qs++; |
1134 | spin_lock(&rnp->lock); | 1209 | spin_lock(&rnp->lock); |
1135 | lastcomp = rsp->completed; | 1210 | lastcomp = rsp->gpnum - 1; |
1136 | signaled = rsp->signaled; | 1211 | signaled = rsp->signaled; |
1137 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1212 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1138 | if (lastcomp == rsp->gpnum) { | 1213 | if(!rcu_gp_in_progress(rsp)) { |
1139 | rsp->n_force_qs_ngp++; | 1214 | rsp->n_force_qs_ngp++; |
1140 | spin_unlock(&rnp->lock); | 1215 | spin_unlock(&rnp->lock); |
1141 | goto unlock_ret; /* no GP in progress, time updated. */ | 1216 | goto unlock_ret; /* no GP in progress, time updated. */ |
1142 | } | 1217 | } |
1143 | spin_unlock(&rnp->lock); | 1218 | spin_unlock(&rnp->lock); |
1144 | switch (signaled) { | 1219 | switch (signaled) { |
1220 | case RCU_GP_IDLE: | ||
1145 | case RCU_GP_INIT: | 1221 | case RCU_GP_INIT: |
1146 | 1222 | ||
1147 | break; /* grace period still initializing, ignore. */ | 1223 | break; /* grace period idle or initializing, ignore. */ |
1148 | 1224 | ||
1149 | case RCU_SAVE_DYNTICK: | 1225 | case RCU_SAVE_DYNTICK: |
1150 | 1226 | ||
@@ -1155,20 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1155 | if (rcu_process_dyntick(rsp, lastcomp, | 1231 | if (rcu_process_dyntick(rsp, lastcomp, |
1156 | dyntick_save_progress_counter)) | 1232 | dyntick_save_progress_counter)) |
1157 | goto unlock_ret; | 1233 | goto unlock_ret; |
1234 | /* fall into next case. */ | ||
1235 | |||
1236 | case RCU_SAVE_COMPLETED: | ||
1158 | 1237 | ||
1159 | /* Update state, record completion counter. */ | 1238 | /* Update state, record completion counter. */ |
1239 | forcenow = 0; | ||
1160 | spin_lock(&rnp->lock); | 1240 | spin_lock(&rnp->lock); |
1161 | if (lastcomp == rsp->completed) { | 1241 | if (lastcomp + 1 == rsp->gpnum && |
1242 | lastcomp == rsp->completed && | ||
1243 | rsp->signaled == signaled) { | ||
1162 | rsp->signaled = RCU_FORCE_QS; | 1244 | rsp->signaled = RCU_FORCE_QS; |
1163 | dyntick_record_completed(rsp, lastcomp); | 1245 | rsp->completed_fqs = lastcomp; |
1246 | forcenow = signaled == RCU_SAVE_COMPLETED; | ||
1164 | } | 1247 | } |
1165 | spin_unlock(&rnp->lock); | 1248 | spin_unlock(&rnp->lock); |
1166 | break; | 1249 | if (!forcenow) |
1250 | break; | ||
1251 | /* fall into next case. */ | ||
1167 | 1252 | ||
1168 | case RCU_FORCE_QS: | 1253 | case RCU_FORCE_QS: |
1169 | 1254 | ||
1170 | /* Check dyntick-idle state, send IPI to laggarts. */ | 1255 | /* Check dyntick-idle state, send IPI to laggarts. */ |
1171 | if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), | 1256 | if (rcu_process_dyntick(rsp, rsp->completed_fqs, |
1172 | rcu_implicit_dynticks_qs)) | 1257 | rcu_implicit_dynticks_qs)) |
1173 | goto unlock_ret; | 1258 | goto unlock_ret; |
1174 | 1259 | ||
@@ -1224,7 +1309,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1224 | } | 1309 | } |
1225 | 1310 | ||
1226 | /* If there are callbacks ready, invoke them. */ | 1311 | /* If there are callbacks ready, invoke them. */ |
1227 | rcu_do_batch(rdp); | 1312 | rcu_do_batch(rsp, rdp); |
1228 | } | 1313 | } |
1229 | 1314 | ||
1230 | /* | 1315 | /* |
@@ -1288,10 +1373,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1288 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1373 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1289 | } | 1374 | } |
1290 | 1375 | ||
1291 | /* Force the grace period if too many callbacks or too long waiting. */ | 1376 | /* |
1292 | if (unlikely(++rdp->qlen > qhimark)) { | 1377 | * Force the grace period if too many callbacks or too long waiting. |
1378 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1379 | * if some other CPU has recently done so. Also, don't bother | ||
1380 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1381 | * is the only one waiting for a grace period to complete. | ||
1382 | */ | ||
1383 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1293 | rdp->blimit = LONG_MAX; | 1384 | rdp->blimit = LONG_MAX; |
1294 | force_quiescent_state(rsp, 0); | 1385 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1386 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1387 | force_quiescent_state(rsp, 0); | ||
1388 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1389 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1295 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | 1390 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1296 | force_quiescent_state(rsp, 1); | 1391 | force_quiescent_state(rsp, 1); |
1297 | local_irq_restore(flags); | 1392 | local_irq_restore(flags); |
@@ -1315,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
1315 | } | 1410 | } |
1316 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 1411 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
1317 | 1412 | ||
1413 | /** | ||
1414 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
1415 | * | ||
1416 | * Control will return to the caller some time after a full rcu-sched | ||
1417 | * grace period has elapsed, in other words after all currently executing | ||
1418 | * rcu-sched read-side critical sections have completed. These read-side | ||
1419 | * critical sections are delimited by rcu_read_lock_sched() and | ||
1420 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
1421 | * local_irq_disable(), and so on may be used in place of | ||
1422 | * rcu_read_lock_sched(). | ||
1423 | * | ||
1424 | * This means that all preempt_disable code sequences, including NMI and | ||
1425 | * hardware-interrupt handlers, in progress on entry will have completed | ||
1426 | * before this primitive returns. However, this does not guarantee that | ||
1427 | * softirq handlers will have completed, since in some kernels, these | ||
1428 | * handlers can run in process context, and can block. | ||
1429 | * | ||
1430 | * This primitive provides the guarantees made by the (now removed) | ||
1431 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
1432 | * guarantees that rcu_read_lock() sections will have completed. | ||
1433 | * In "classic RCU", these two guarantees happen to be one and | ||
1434 | * the same, but can differ in realtime RCU implementations. | ||
1435 | */ | ||
1436 | void synchronize_sched(void) | ||
1437 | { | ||
1438 | struct rcu_synchronize rcu; | ||
1439 | |||
1440 | if (rcu_blocking_is_gp()) | ||
1441 | return; | ||
1442 | |||
1443 | init_completion(&rcu.completion); | ||
1444 | /* Will wake me after RCU finished. */ | ||
1445 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1446 | /* Wait for it. */ | ||
1447 | wait_for_completion(&rcu.completion); | ||
1448 | } | ||
1449 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
1450 | |||
1451 | /** | ||
1452 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
1453 | * | ||
1454 | * Control will return to the caller some time after a full rcu_bh grace | ||
1455 | * period has elapsed, in other words after all currently executing rcu_bh | ||
1456 | * read-side critical sections have completed. RCU read-side critical | ||
1457 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
1458 | * and may be nested. | ||
1459 | */ | ||
1460 | void synchronize_rcu_bh(void) | ||
1461 | { | ||
1462 | struct rcu_synchronize rcu; | ||
1463 | |||
1464 | if (rcu_blocking_is_gp()) | ||
1465 | return; | ||
1466 | |||
1467 | init_completion(&rcu.completion); | ||
1468 | /* Will wake me after RCU finished. */ | ||
1469 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
1470 | /* Wait for it. */ | ||
1471 | wait_for_completion(&rcu.completion); | ||
1472 | } | ||
1473 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
1474 | |||
1318 | /* | 1475 | /* |
1319 | * Check to see if there is any immediate RCU-related work to be done | 1476 | * Check to see if there is any immediate RCU-related work to be done |
1320 | * by the current CPU, for the specified type of RCU, returning 1 if so. | 1477 | * by the current CPU, for the specified type of RCU, returning 1 if so. |
@@ -1324,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
1324 | */ | 1481 | */ |
1325 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | 1482 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) |
1326 | { | 1483 | { |
1484 | struct rcu_node *rnp = rdp->mynode; | ||
1485 | |||
1327 | rdp->n_rcu_pending++; | 1486 | rdp->n_rcu_pending++; |
1328 | 1487 | ||
1329 | /* Check for CPU stalls, if enabled. */ | 1488 | /* Check for CPU stalls, if enabled. */ |
@@ -1348,13 +1507,13 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1348 | } | 1507 | } |
1349 | 1508 | ||
1350 | /* Has another RCU grace period completed? */ | 1509 | /* Has another RCU grace period completed? */ |
1351 | if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ | 1510 | if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
1352 | rdp->n_rp_gp_completed++; | 1511 | rdp->n_rp_gp_completed++; |
1353 | return 1; | 1512 | return 1; |
1354 | } | 1513 | } |
1355 | 1514 | ||
1356 | /* Has a new RCU grace period started? */ | 1515 | /* Has a new RCU grace period started? */ |
1357 | if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ | 1516 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ |
1358 | rdp->n_rp_gp_started++; | 1517 | rdp->n_rp_gp_started++; |
1359 | return 1; | 1518 | return 1; |
1360 | } | 1519 | } |
@@ -1397,6 +1556,21 @@ int rcu_needs_cpu(int cpu) | |||
1397 | rcu_preempt_needs_cpu(cpu); | 1556 | rcu_preempt_needs_cpu(cpu); |
1398 | } | 1557 | } |
1399 | 1558 | ||
1559 | /* | ||
1560 | * This function is invoked towards the end of the scheduler's initialization | ||
1561 | * process. Before this is called, the idle task might contain | ||
1562 | * RCU read-side critical sections (during which time, this idle | ||
1563 | * task is booting the system). After this function is called, the | ||
1564 | * idle tasks are prohibited from containing RCU read-side critical | ||
1565 | * sections. | ||
1566 | */ | ||
1567 | void rcu_scheduler_starting(void) | ||
1568 | { | ||
1569 | WARN_ON(num_online_cpus() != 1); | ||
1570 | WARN_ON(nr_context_switches() > 0); | ||
1571 | rcu_scheduler_active = 1; | ||
1572 | } | ||
1573 | |||
1400 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 1574 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
1401 | static atomic_t rcu_barrier_cpu_count; | 1575 | static atomic_t rcu_barrier_cpu_count; |
1402 | static DEFINE_MUTEX(rcu_barrier_mutex); | 1576 | static DEFINE_MUTEX(rcu_barrier_mutex); |
@@ -1508,21 +1682,18 @@ static void __cpuinit | |||
1508 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | 1682 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) |
1509 | { | 1683 | { |
1510 | unsigned long flags; | 1684 | unsigned long flags; |
1511 | long lastcomp; | ||
1512 | unsigned long mask; | 1685 | unsigned long mask; |
1513 | struct rcu_data *rdp = rsp->rda[cpu]; | 1686 | struct rcu_data *rdp = rsp->rda[cpu]; |
1514 | struct rcu_node *rnp = rcu_get_root(rsp); | 1687 | struct rcu_node *rnp = rcu_get_root(rsp); |
1515 | 1688 | ||
1516 | /* Set up local state, ensuring consistent view of global state. */ | 1689 | /* Set up local state, ensuring consistent view of global state. */ |
1517 | spin_lock_irqsave(&rnp->lock, flags); | 1690 | spin_lock_irqsave(&rnp->lock, flags); |
1518 | lastcomp = rsp->completed; | ||
1519 | rdp->completed = lastcomp; | ||
1520 | rdp->gpnum = lastcomp; | ||
1521 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1691 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1522 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1692 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1523 | rdp->beenonline = 1; /* We have now been online. */ | 1693 | rdp->beenonline = 1; /* We have now been online. */ |
1524 | rdp->preemptable = preemptable; | 1694 | rdp->preemptable = preemptable; |
1525 | rdp->passed_quiesc_completed = lastcomp - 1; | 1695 | rdp->qlen_last_fqs_check = 0; |
1696 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1526 | rdp->blimit = blimit; | 1697 | rdp->blimit = blimit; |
1527 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1698 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1528 | 1699 | ||
@@ -1542,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1542 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 1713 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
1543 | rnp->qsmaskinit |= mask; | 1714 | rnp->qsmaskinit |= mask; |
1544 | mask = rnp->grpmask; | 1715 | mask = rnp->grpmask; |
1716 | if (rnp == rdp->mynode) { | ||
1717 | rdp->gpnum = rnp->completed; /* if GP in progress... */ | ||
1718 | rdp->completed = rnp->completed; | ||
1719 | rdp->passed_quiesc_completed = rnp->completed - 1; | ||
1720 | } | ||
1545 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 1721 | spin_unlock(&rnp->lock); /* irqs already disabled. */ |
1546 | rnp = rnp->parent; | 1722 | rnp = rnp->parent; |
1547 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | 1723 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
@@ -1559,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu) | |||
1559 | /* | 1735 | /* |
1560 | * Handle CPU online/offline notification events. | 1736 | * Handle CPU online/offline notification events. |
1561 | */ | 1737 | */ |
1562 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1738 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1563 | unsigned long action, void *hcpu) | 1739 | unsigned long action, void *hcpu) |
1564 | { | 1740 | { |
1565 | long cpu = (long)hcpu; | 1741 | long cpu = (long)hcpu; |
1566 | 1742 | ||
@@ -1647,8 +1823,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1647 | cpustride *= rsp->levelspread[i]; | 1823 | cpustride *= rsp->levelspread[i]; |
1648 | rnp = rsp->level[i]; | 1824 | rnp = rsp->level[i]; |
1649 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1825 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1650 | if (rnp != rcu_get_root(rsp)) | 1826 | spin_lock_init(&rnp->lock); |
1651 | spin_lock_init(&rnp->lock); | 1827 | lockdep_set_class(&rnp->lock, &rcu_node_class[i]); |
1652 | rnp->gpnum = 0; | 1828 | rnp->gpnum = 0; |
1653 | rnp->qsmask = 0; | 1829 | rnp->qsmask = 0; |
1654 | rnp->qsmaskinit = 0; | 1830 | rnp->qsmaskinit = 0; |
@@ -1669,9 +1845,10 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1669 | rnp->level = i; | 1845 | rnp->level = i; |
1670 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 1846 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); |
1671 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | 1847 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); |
1848 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1672 | } | 1850 | } |
1673 | } | 1851 | } |
1674 | spin_lock_init(&rcu_get_root(rsp)->lock); | ||
1675 | } | 1852 | } |
1676 | 1853 | ||
1677 | /* | 1854 | /* |
@@ -1697,16 +1874,30 @@ do { \ | |||
1697 | } \ | 1874 | } \ |
1698 | } while (0) | 1875 | } while (0) |
1699 | 1876 | ||
1700 | void __init __rcu_init(void) | 1877 | void __init rcu_init(void) |
1701 | { | 1878 | { |
1879 | int i; | ||
1880 | |||
1702 | rcu_bootup_announce(); | 1881 | rcu_bootup_announce(); |
1703 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1704 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1705 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1885 | #if NUM_RCU_LVL_4 != 0 | ||
1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); | ||
1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ | ||
1706 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1707 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1708 | __rcu_init_preempt(); | 1890 | __rcu_init_preempt(); |
1709 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1891 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1892 | |||
1893 | /* | ||
1894 | * We don't need protection against CPU-hotplug here because | ||
1895 | * this is called early in boot, before either interrupts | ||
1896 | * or the scheduler are operational. | ||
1897 | */ | ||
1898 | cpu_notifier(rcu_cpu_notify, 0); | ||
1899 | for_each_online_cpu(i) | ||
1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
1710 | } | 1901 | } |
1711 | 1902 | ||
1712 | #include "rcutree_plugin.h" | 1903 | #include "rcutree_plugin.h" |