diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-08-01 17:29:20 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-09-23 10:42:50 -0400 |
commit | a82dcc76021e22c174ba85d90b7a8c750b7362d0 (patch) | |
tree | f65ef1af700610b9e3e59545d2bf13e4cac33876 /kernel/rcutree.c | |
parent | 5cc900cf55fe58aaad93767c5a526e2a69cbcbc6 (diff) |
rcu: Make offline-CPU checking allow for indefinite delays
The rcu_implicit_offline_qs() function implicitly assumed that execution
would progress predictably when interrupts are disabled, which is of course
not guaranteed when running on a hypervisor. Furthermore, this function
is short, and is called from one place only in a short function.
This commit therefore ensures that the timing is checked before
checking the condition, which guarantees correct behavior even given
indefinite delays. It also inlines rcu_implicit_offline_qs() into
rcu_implicit_dynticks_qs().
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 53 |
1 files changed, 21 insertions, 32 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f7bcd9e6c054..2c4ee4cdbc0e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -319,35 +319,6 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
319 | } | 319 | } |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * If the specified CPU is offline, tell the caller that it is in | ||
323 | * a quiescent state. Otherwise, whack it with a reschedule IPI. | ||
324 | * Grace periods can end up waiting on an offline CPU when that | ||
325 | * CPU is in the process of coming online -- it will be added to the | ||
326 | * rcu_node bitmasks before it actually makes it online. The same thing | ||
327 | * can happen while a CPU is in the process of coming online. Because this | ||
328 | * race is quite rare, we check for it after detecting that the grace | ||
329 | * period has been delayed rather than checking each and every CPU | ||
330 | * each and every time we start a new grace period. | ||
331 | */ | ||
332 | static int rcu_implicit_offline_qs(struct rcu_data *rdp) | ||
333 | { | ||
334 | /* | ||
335 | * If the CPU is offline for more than a jiffy, it is in a quiescent | ||
336 | * state. We can trust its state not to change because interrupts | ||
337 | * are disabled. The reason for the jiffy's worth of slack is to | ||
338 | * handle CPUs initializing on the way up and finding their way | ||
339 | * to the idle loop on the way down. | ||
340 | */ | ||
341 | if (cpu_is_offline(rdp->cpu) && | ||
342 | ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) { | ||
343 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); | ||
344 | rdp->offline_fqs++; | ||
345 | return 1; | ||
346 | } | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle | 322 | * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle |
352 | * | 323 | * |
353 | * If the new value of the ->dynticks_nesting counter now is zero, | 324 | * If the new value of the ->dynticks_nesting counter now is zero, |
@@ -675,7 +646,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) | |||
675 | * Return true if the specified CPU has passed through a quiescent | 646 | * Return true if the specified CPU has passed through a quiescent |
676 | * state by virtue of being in or having passed through an dynticks | 647 | * state by virtue of being in or having passed through an dynticks |
677 | * idle state since the last call to dyntick_save_progress_counter() | 648 | * idle state since the last call to dyntick_save_progress_counter() |
678 | * for this same CPU. | 649 | * for this same CPU, or by virtue of having been offline. |
679 | */ | 650 | */ |
680 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | 651 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) |
681 | { | 652 | { |
@@ -699,8 +670,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
699 | return 1; | 670 | return 1; |
700 | } | 671 | } |
701 | 672 | ||
702 | /* Go check for the CPU being offline. */ | 673 | /* |
703 | return rcu_implicit_offline_qs(rdp); | 674 | * Check for the CPU being offline, but only if the grace period |
675 | * is old enough. We don't need to worry about the CPU changing | ||
676 | * state: If we see it offline even once, it has been through a | ||
677 | * quiescent state. | ||
678 | * | ||
679 | * The reason for insisting that the grace period be at least | ||
680 | * one jiffy old is that CPUs that are not quite online and that | ||
681 | * have just gone offline can still execute RCU read-side critical | ||
682 | * sections. | ||
683 | */ | ||
684 | if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies)) | ||
685 | return 0; /* Grace period is not old enough. */ | ||
686 | barrier(); | ||
687 | if (cpu_is_offline(rdp->cpu)) { | ||
688 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); | ||
689 | rdp->offline_fqs++; | ||
690 | return 1; | ||
691 | } | ||
692 | return 0; | ||
704 | } | 693 | } |
705 | 694 | ||
706 | static int jiffies_till_stall_check(void) | 695 | static int jiffies_till_stall_check(void) |