diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 66 |
1 files changed, 49 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7680fc275036..4c106fcc0d54 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -759,39 +759,71 @@ void rcu_irq_enter(void) | |||
759 | /** | 759 | /** |
760 | * rcu_nmi_enter - inform RCU of entry to NMI context | 760 | * rcu_nmi_enter - inform RCU of entry to NMI context |
761 | * | 761 | * |
762 | * If the CPU was idle with dynamic ticks active, and there is no | 762 | * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and |
763 | * irq handler running, this updates rdtp->dynticks_nmi to let the | 763 | * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know |
764 | * RCU grace-period handling know that the CPU is active. | 764 | * that the CPU is active. This implementation permits nested NMIs, as |
765 | * long as the nesting level does not overflow an int. (You will probably | ||
766 | * run out of stack space first.) | ||
765 | */ | 767 | */ |
766 | void rcu_nmi_enter(void) | 768 | void rcu_nmi_enter(void) |
767 | { | 769 | { |
768 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 770 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
771 | int incby = 2; | ||
769 | 772 | ||
770 | if (rdtp->dynticks_nmi_nesting == 0 && | 773 | /* Complain about underflow. */ |
771 | (atomic_read(&rdtp->dynticks) & 0x1)) | 774 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); |
772 | return; | 775 | |
773 | rdtp->dynticks_nmi_nesting++; | 776 | /* |
774 | smp_mb__before_atomic(); /* Force delay from prior write. */ | 777 | * If idle from RCU viewpoint, atomically increment ->dynticks |
775 | atomic_inc(&rdtp->dynticks); | 778 | * to mark non-idle and increment ->dynticks_nmi_nesting by one. |
776 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 779 | * Otherwise, increment ->dynticks_nmi_nesting by two. This means |
777 | smp_mb__after_atomic(); /* See above. */ | 780 | * if ->dynticks_nmi_nesting is equal to one, we are guaranteed |
778 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 781 | * to be in the outermost NMI handler that interrupted an RCU-idle |
782 | * period (observation due to Andy Lutomirski). | ||
783 | */ | ||
784 | if (!(atomic_read(&rdtp->dynticks) & 0x1)) { | ||
785 | smp_mb__before_atomic(); /* Force delay from prior write. */ | ||
786 | atomic_inc(&rdtp->dynticks); | ||
787 | /* atomic_inc() before later RCU read-side crit sects */ | ||
788 | smp_mb__after_atomic(); /* See above. */ | ||
789 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | ||
790 | incby = 1; | ||
791 | } | ||
792 | rdtp->dynticks_nmi_nesting += incby; | ||
793 | barrier(); | ||
779 | } | 794 | } |
780 | 795 | ||
781 | /** | 796 | /** |
782 | * rcu_nmi_exit - inform RCU of exit from NMI context | 797 | * rcu_nmi_exit - inform RCU of exit from NMI context |
783 | * | 798 | * |
784 | * If the CPU was idle with dynamic ticks active, and there is no | 799 | * If we are returning from the outermost NMI handler that interrupted an |
785 | * irq handler running, this updates rdtp->dynticks_nmi to let the | 800 | * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting |
786 | * RCU grace-period handling know that the CPU is no longer active. | 801 | * to let the RCU grace-period handling know that the CPU is back to |
802 | * being RCU-idle. | ||
787 | */ | 803 | */ |
788 | void rcu_nmi_exit(void) | 804 | void rcu_nmi_exit(void) |
789 | { | 805 | { |
790 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 806 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
791 | 807 | ||
792 | if (rdtp->dynticks_nmi_nesting == 0 || | 808 | /* |
793 | --rdtp->dynticks_nmi_nesting != 0) | 809 | * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. |
810 | * (We are exiting an NMI handler, so RCU better be paying attention | ||
811 | * to us!) | ||
812 | */ | ||
813 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); | ||
814 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | ||
815 | |||
816 | /* | ||
817 | * If the nesting level is not 1, the CPU wasn't RCU-idle, so | ||
818 | * leave it in non-RCU-idle state. | ||
819 | */ | ||
820 | if (rdtp->dynticks_nmi_nesting != 1) { | ||
821 | rdtp->dynticks_nmi_nesting -= 2; | ||
794 | return; | 822 | return; |
823 | } | ||
824 | |||
825 | /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ | ||
826 | rdtp->dynticks_nmi_nesting = 0; | ||
795 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 827 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
796 | smp_mb__before_atomic(); /* See above. */ | 828 | smp_mb__before_atomic(); /* See above. */ |
797 | atomic_inc(&rdtp->dynticks); | 829 | atomic_inc(&rdtp->dynticks); |