diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-11-21 17:45:12 -0500 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-12-30 20:40:16 -0500 |
| commit | 734d16801349fbe951d2f780191d32c5b8a892d1 (patch) | |
| tree | 4fb555bc57b31298c38b769e64caf5c0672acc07 | |
| parent | 97bf6af1f928216fd6c5a66e8a57bfa95a659672 (diff) | |
rcu: Make rcu_nmi_enter() handle nesting
The x86 architecture has multiple types of NMI-like interrupts: real
NMIs, machine checks, and, for some values of NMI-like, debugging
and breakpoint interrupts. These interrupts can nest inside each
other. Andy Lutomirski is adding RCU support to these interrupts,
so rcu_nmi_enter() and rcu_nmi_exit() must now correctly handle nesting.
This commit therefore introduces nesting, using a clever NMI-coordination
algorithm suggested by Andy. The trick is to atomically increment
->dynticks (if needed) before manipulating ->dynticks_nmi_nesting on entry
(and, accordingly, after on exit). In addition, ->dynticks_nmi_nesting
is incremented by one if ->dynticks was incremented and by two otherwise.
This means that when rcu_nmi_exit() sees ->dynticks_nmi_nesting equal
to one, it knows that ->dynticks must be atomically incremented.
This NMI-coordination algorithms has been validated by the following
Promela model:
------------------------------------------------------------------------
/*
* Promela model for Andy Lutomirski's suggested change to rcu_nmi_enter()
* that allows nesting.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2014
*
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
byte dynticks_nmi_nesting = 0;
byte dynticks = 0;
/*
* Promela verision of rcu_nmi_enter().
*/
inline rcu_nmi_enter()
{
byte incby;
byte tmp;
incby = BUSY_INCBY;
assert(dynticks_nmi_nesting >= 0);
if
:: (dynticks & 1) == 0 ->
atomic {
dynticks = dynticks + 1;
}
assert((dynticks & 1) == 1);
incby = 1;
:: else ->
skip;
fi;
tmp = dynticks_nmi_nesting;
tmp = tmp + incby;
dynticks_nmi_nesting = tmp;
assert(dynticks_nmi_nesting >= 1);
}
/*
* Promela verision of rcu_nmi_exit().
*/
inline rcu_nmi_exit()
{
byte tmp;
assert(dynticks_nmi_nesting > 0);
assert((dynticks & 1) != 0);
if
:: dynticks_nmi_nesting != 1 ->
tmp = dynticks_nmi_nesting;
tmp = tmp - BUSY_INCBY;
dynticks_nmi_nesting = tmp;
:: else ->
dynticks_nmi_nesting = 0;
atomic {
dynticks = dynticks + 1;
}
assert((dynticks & 1) == 0);
fi;
}
/*
* Base-level NMI runs non-atomically. Crudely emulates process-level
* dynticks-idle entry/exit.
*/
proctype base_NMI()
{
byte busy;
busy = 0;
do
:: /* Emulate base-level dynticks and not. */
if
:: 1 -> atomic {
dynticks = dynticks + 1;
}
busy = 1;
:: 1 -> skip;
fi;
/* Verify that we only sometimes have base-level dynticks. */
if
:: busy == 0 -> skip;
:: busy == 1 -> skip;
fi;
/* Model RCU's NMI entry and exit actions. */
rcu_nmi_enter();
assert((dynticks & 1) == 1);
rcu_nmi_exit();
/* Emulated re-entering base-level dynticks and not. */
if
:: !busy -> skip;
:: busy ->
atomic {
dynticks = dynticks + 1;
}
busy = 0;
fi;
/* We had better now be in dyntick-idle mode. */
assert((dynticks & 1) == 0);
od;
}
/*
* Nested NMI runs atomically to emulate interrupting base_level().
*/
proctype nested_NMI()
{
do
:: /*
* Use an atomic section to model a nested NMI. This is
* guaranteed to interleave into base_NMI() between a pair
* of base_NMI() statements, just as a nested NMI would.
*/
atomic {
/* Verify that we only sometimes are in dynticks. */
if
:: (dynticks & 1) == 0 -> skip;
:: (dynticks & 1) == 1 -> skip;
fi;
/* Model RCU's NMI entry and exit actions. */
rcu_nmi_enter();
assert((dynticks & 1) == 1);
rcu_nmi_exit();
}
od;
}
init {
run base_NMI();
run nested_NMI();
}
------------------------------------------------------------------------
The following script can be used to run this model if placed in
rcu_nmi.spin:
------------------------------------------------------------------------
if ! spin -a rcu_nmi.spin
then
echo Spin errors!!!
exit 1
fi
if ! cc -DSAFETY -o pan pan.c
then
echo Compilation errors!!!
exit 1
fi
./pan -m100000
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
| -rw-r--r-- | kernel/rcu/tree.c | 66 |
1 files changed, 49 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7680fc275036..4c106fcc0d54 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -759,39 +759,71 @@ void rcu_irq_enter(void) | |||
| 759 | /** | 759 | /** |
| 760 | * rcu_nmi_enter - inform RCU of entry to NMI context | 760 | * rcu_nmi_enter - inform RCU of entry to NMI context |
| 761 | * | 761 | * |
| 762 | * If the CPU was idle with dynamic ticks active, and there is no | 762 | * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and |
| 763 | * irq handler running, this updates rdtp->dynticks_nmi to let the | 763 | * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know |
| 764 | * RCU grace-period handling know that the CPU is active. | 764 | * that the CPU is active. This implementation permits nested NMIs, as |
| 765 | * long as the nesting level does not overflow an int. (You will probably | ||
| 766 | * run out of stack space first.) | ||
| 765 | */ | 767 | */ |
| 766 | void rcu_nmi_enter(void) | 768 | void rcu_nmi_enter(void) |
| 767 | { | 769 | { |
| 768 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 770 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
| 771 | int incby = 2; | ||
| 769 | 772 | ||
| 770 | if (rdtp->dynticks_nmi_nesting == 0 && | 773 | /* Complain about underflow. */ |
| 771 | (atomic_read(&rdtp->dynticks) & 0x1)) | 774 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); |
| 772 | return; | 775 | |
| 773 | rdtp->dynticks_nmi_nesting++; | 776 | /* |
| 774 | smp_mb__before_atomic(); /* Force delay from prior write. */ | 777 | * If idle from RCU viewpoint, atomically increment ->dynticks |
| 775 | atomic_inc(&rdtp->dynticks); | 778 | * to mark non-idle and increment ->dynticks_nmi_nesting by one. |
| 776 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 779 | * Otherwise, increment ->dynticks_nmi_nesting by two. This means |
| 777 | smp_mb__after_atomic(); /* See above. */ | 780 | * if ->dynticks_nmi_nesting is equal to one, we are guaranteed |
| 778 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 781 | * to be in the outermost NMI handler that interrupted an RCU-idle |
| 782 | * period (observation due to Andy Lutomirski). | ||
| 783 | */ | ||
| 784 | if (!(atomic_read(&rdtp->dynticks) & 0x1)) { | ||
| 785 | smp_mb__before_atomic(); /* Force delay from prior write. */ | ||
| 786 | atomic_inc(&rdtp->dynticks); | ||
| 787 | /* atomic_inc() before later RCU read-side crit sects */ | ||
| 788 | smp_mb__after_atomic(); /* See above. */ | ||
| 789 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | ||
| 790 | incby = 1; | ||
| 791 | } | ||
| 792 | rdtp->dynticks_nmi_nesting += incby; | ||
| 793 | barrier(); | ||
| 779 | } | 794 | } |
| 780 | 795 | ||
| 781 | /** | 796 | /** |
| 782 | * rcu_nmi_exit - inform RCU of exit from NMI context | 797 | * rcu_nmi_exit - inform RCU of exit from NMI context |
| 783 | * | 798 | * |
| 784 | * If the CPU was idle with dynamic ticks active, and there is no | 799 | * If we are returning from the outermost NMI handler that interrupted an |
| 785 | * irq handler running, this updates rdtp->dynticks_nmi to let the | 800 | * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting |
| 786 | * RCU grace-period handling know that the CPU is no longer active. | 801 | * to let the RCU grace-period handling know that the CPU is back to |
| 802 | * being RCU-idle. | ||
| 787 | */ | 803 | */ |
| 788 | void rcu_nmi_exit(void) | 804 | void rcu_nmi_exit(void) |
| 789 | { | 805 | { |
| 790 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 806 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
| 791 | 807 | ||
| 792 | if (rdtp->dynticks_nmi_nesting == 0 || | 808 | /* |
| 793 | --rdtp->dynticks_nmi_nesting != 0) | 809 | * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. |
| 810 | * (We are exiting an NMI handler, so RCU better be paying attention | ||
| 811 | * to us!) | ||
| 812 | */ | ||
| 813 | WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); | ||
| 814 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | ||
| 815 | |||
| 816 | /* | ||
| 817 | * If the nesting level is not 1, the CPU wasn't RCU-idle, so | ||
| 818 | * leave it in non-RCU-idle state. | ||
| 819 | */ | ||
| 820 | if (rdtp->dynticks_nmi_nesting != 1) { | ||
| 821 | rdtp->dynticks_nmi_nesting -= 2; | ||
| 794 | return; | 822 | return; |
| 823 | } | ||
| 824 | |||
| 825 | /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ | ||
| 826 | rdtp->dynticks_nmi_nesting = 0; | ||
| 795 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 827 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
| 796 | smp_mb__before_atomic(); /* See above. */ | 828 | smp_mb__before_atomic(); /* See above. */ |
| 797 | atomic_inc(&rdtp->dynticks); | 829 | atomic_inc(&rdtp->dynticks); |
