aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-01-28 09:32:03 -0500
committerIngo Molnar <mingo@kernel.org>2015-01-28 09:33:26 -0500
commit772a9aca12567badb5b9caf2af249a5991f47ea8 (patch)
tree82515ae74c4f3a0740aeec13dd671f18f58d5c96 /kernel
parent41ca5d4e9be11ea6ae040b51d9628a189fd82896 (diff)
parentf6f64681d9d87ded48a90b644b2991c6ee05da2d (diff)
Merge tag 'pr-20150114-x86-entry' of git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux into x86/asm
Pull x86/entry enhancements from Andy Lutomirski: " This is my accumulated x86 entry work, part 1, for 3.20. The meat of this is an IST rework. When an IST exception interrupts user space, we will handle it on the per-thread kernel stack instead of on the IST stack. This sounds messy, but it actually simplifies the IST entry/exit code, because it eliminates some ugly games we used to play in order to handle rescheduling, signal delivery, etc on the way out of an IST exception. The IST rework introduces proper context tracking to IST exception handlers. I haven't seen any bug reports, but the old code could have incorrectly treated an IST exception handler as an RCU extended quiescent state. The memory failure change (included in this pull request with Borislav and Tony's permission) eliminates a bunch of code that is no longer needed now that user memory failure handlers are called in process context. Finally, this includes a few on Denys' uncontroversial and Obviously Correct (tm) cleanups. The IST and memory failure changes have been in -next for a while. LKML references: IST rework: http://lkml.kernel.org/r/cover.1416604491.git.luto@amacapital.net Memory failure change: http://lkml.kernel.org/r/54ab2ffa301102cd6e@agluck-desk.sc.intel.com Denys' cleanups: http://lkml.kernel.org/r/1420927210-19738-1-git-send-email-dvlasenk@redhat.com " This tree semantically depends on and is based on the following RCU commit: 734d16801349 ("rcu: Make rcu_nmi_enter() handle nesting") ... and for that reason won't be pushed upstream before the RCU bits hit Linus's tree. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c66
1 files changed, 49 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7680fc275036..4c106fcc0d54 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -759,39 +759,71 @@ void rcu_irq_enter(void)
759/** 759/**
760 * rcu_nmi_enter - inform RCU of entry to NMI context 760 * rcu_nmi_enter - inform RCU of entry to NMI context
761 * 761 *
762 * If the CPU was idle with dynamic ticks active, and there is no 762 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
763 * irq handler running, this updates rdtp->dynticks_nmi to let the 763 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
764 * RCU grace-period handling know that the CPU is active. 764 * that the CPU is active. This implementation permits nested NMIs, as
765 * long as the nesting level does not overflow an int. (You will probably
766 * run out of stack space first.)
765 */ 767 */
766void rcu_nmi_enter(void) 768void rcu_nmi_enter(void)
767{ 769{
768 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 770 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
771 int incby = 2;
769 772
770 if (rdtp->dynticks_nmi_nesting == 0 && 773 /* Complain about underflow. */
771 (atomic_read(&rdtp->dynticks) & 0x1)) 774 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
772 return; 775
773 rdtp->dynticks_nmi_nesting++; 776 /*
774 smp_mb__before_atomic(); /* Force delay from prior write. */ 777 * If idle from RCU viewpoint, atomically increment ->dynticks
775 atomic_inc(&rdtp->dynticks); 778 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
776 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 779 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
777 smp_mb__after_atomic(); /* See above. */ 780 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
778 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 781 * to be in the outermost NMI handler that interrupted an RCU-idle
782 * period (observation due to Andy Lutomirski).
783 */
784 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
785 smp_mb__before_atomic(); /* Force delay from prior write. */
786 atomic_inc(&rdtp->dynticks);
787 /* atomic_inc() before later RCU read-side crit sects */
788 smp_mb__after_atomic(); /* See above. */
789 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
790 incby = 1;
791 }
792 rdtp->dynticks_nmi_nesting += incby;
793 barrier();
779} 794}
780 795
781/** 796/**
782 * rcu_nmi_exit - inform RCU of exit from NMI context 797 * rcu_nmi_exit - inform RCU of exit from NMI context
783 * 798 *
784 * If the CPU was idle with dynamic ticks active, and there is no 799 * If we are returning from the outermost NMI handler that interrupted an
785 * irq handler running, this updates rdtp->dynticks_nmi to let the 800 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
786 * RCU grace-period handling know that the CPU is no longer active. 801 * to let the RCU grace-period handling know that the CPU is back to
802 * being RCU-idle.
787 */ 803 */
788void rcu_nmi_exit(void) 804void rcu_nmi_exit(void)
789{ 805{
790 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 806 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
791 807
792 if (rdtp->dynticks_nmi_nesting == 0 || 808 /*
793 --rdtp->dynticks_nmi_nesting != 0) 809 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
810 * (We are exiting an NMI handler, so RCU better be paying attention
811 * to us!)
812 */
813 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
814 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
815
816 /*
817 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
818 * leave it in non-RCU-idle state.
819 */
820 if (rdtp->dynticks_nmi_nesting != 1) {
821 rdtp->dynticks_nmi_nesting -= 2;
794 return; 822 return;
823 }
824
825 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
826 rdtp->dynticks_nmi_nesting = 0;
795 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 827 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
796 smp_mb__before_atomic(); /* See above. */ 828 smp_mb__before_atomic(); /* See above. */
797 atomic_inc(&rdtp->dynticks); 829 atomic_inc(&rdtp->dynticks);