diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-05-03 18:35:32 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-06-08 21:52:34 -0400 |
commit | c350c008297643dad3c395c2fd92230142da5cf6 (patch) | |
tree | 731bff2816d661ee0c1015a6e5e4423ccfd87f7f /kernel/rcu/srcutree.c | |
parent | 71c40fd0b5ceb300c6cb8753835d9d94a8bfc56f (diff) |
srcu: Prevent sdp->srcu_gp_seq_needed counter wrap
If a given CPU never happens to ever start an SRCU grace period, the
grace-period sequence counter might wrap. If this CPU were to decide to
finally start a grace period, the state of its sdp->srcu_gp_seq_needed
might make it appear that it has already requested this grace period,
which would prevent starting the grace period. If no other CPU ever started
a grace period again, this would look like a grace-period hang. Even
if some other CPU took pity and started the needed grace period, the
leaf rcu_node structure's ->srcu_data_have_cbs field won't have record
of the fact that this CPU has a callback pending, which would look like
a very localized grace-period hang.
This might seem very unlikely, but SRCU grace periods can take less than
a microsecond on small systems, which means that overflow can happen
in much less than an hour on a 32-bit embedded system. And embedded
systems are especially likely to have long-term idle CPUs. Therefore,
it makes sense to prevent this scenario from happening.
This commit therefore scans each srcu_data structure occasionally,
with frequency controlled by the srcutree.counter_wrap_check kernel
boot parameter. This parameter can be set to something like 255
in order to exercise the counter-wrap-prevention code.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/srcutree.c')
-rw-r--r-- | kernel/rcu/srcutree.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index c6e2a4a1628b..cc06dbfc9692 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c | |||
@@ -45,6 +45,10 @@ | |||
45 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; | 45 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; |
46 | module_param(exp_holdoff, ulong, 0444); | 46 | module_param(exp_holdoff, ulong, 0444); |
47 | 47 | ||
48 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ | ||
49 | static ulong counter_wrap_check = (ULONG_MAX >> 2); | ||
50 | module_param(counter_wrap_check, ulong, 0444); | ||
51 | |||
48 | static void srcu_invoke_callbacks(struct work_struct *work); | 52 | static void srcu_invoke_callbacks(struct work_struct *work); |
49 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); | 53 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); |
50 | 54 | ||
@@ -496,10 +500,13 @@ static void srcu_gp_end(struct srcu_struct *sp) | |||
496 | { | 500 | { |
497 | unsigned long cbdelay; | 501 | unsigned long cbdelay; |
498 | bool cbs; | 502 | bool cbs; |
503 | int cpu; | ||
504 | unsigned long flags; | ||
499 | unsigned long gpseq; | 505 | unsigned long gpseq; |
500 | int idx; | 506 | int idx; |
501 | int idxnext; | 507 | int idxnext; |
502 | unsigned long mask; | 508 | unsigned long mask; |
509 | struct srcu_data *sdp; | ||
503 | struct srcu_node *snp; | 510 | struct srcu_node *snp; |
504 | 511 | ||
505 | /* Prevent more than one additional grace period. */ | 512 | /* Prevent more than one additional grace period. */ |
@@ -538,6 +545,17 @@ static void srcu_gp_end(struct srcu_struct *sp) | |||
538 | smp_mb(); /* GP end before CB invocation. */ | 545 | smp_mb(); /* GP end before CB invocation. */ |
539 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); | 546 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); |
540 | } | 547 | } |
548 | |||
549 | /* Occasionally prevent srcu_data counter wrap. */ | ||
550 | if (!(gpseq & counter_wrap_check)) | ||
551 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | ||
552 | sdp = per_cpu_ptr(sp->sda, cpu); | ||
553 | spin_lock_irqsave(&sdp->lock, flags); | ||
554 | if (ULONG_CMP_GE(gpseq, | ||
555 | sdp->srcu_gp_seq_needed + 100)) | ||
556 | sdp->srcu_gp_seq_needed = gpseq; | ||
557 | spin_unlock_irqrestore(&sdp->lock, flags); | ||
558 | } | ||
541 | } | 559 | } |
542 | 560 | ||
543 | /* Callback initiation done, allow grace periods after next. */ | 561 | /* Callback initiation done, allow grace periods after next. */ |