diff options
-rw-r--r-- | kernel/rcutree_plugin.h | 29 |
1 files changed, 24 insertions, 5 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 4d2c068ba13e..d9d7a89da8bb 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |||
68 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 68 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
69 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 69 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
70 | 70 | ||
71 | static void rcu_read_unlock_special(struct task_struct *t); | ||
71 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 72 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
72 | 73 | ||
73 | /* | 74 | /* |
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
147 | struct rcu_data *rdp; | 148 | struct rcu_data *rdp; |
148 | struct rcu_node *rnp; | 149 | struct rcu_node *rnp; |
149 | 150 | ||
150 | if (t->rcu_read_lock_nesting && | 151 | if (t->rcu_read_lock_nesting > 0 && |
151 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 152 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
152 | 153 | ||
153 | /* Possibly blocking in an RCU read-side critical section. */ | 154 | /* Possibly blocking in an RCU read-side critical section. */ |
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
190 | rnp->gp_tasks = &t->rcu_node_entry; | 191 | rnp->gp_tasks = &t->rcu_node_entry; |
191 | } | 192 | } |
192 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 193 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
194 | } else if (t->rcu_read_lock_nesting < 0 && | ||
195 | t->rcu_read_unlock_special) { | ||
196 | |||
197 | /* | ||
198 | * Complete exit from RCU read-side critical section on | ||
199 | * behalf of preempted instance of __rcu_read_unlock(). | ||
200 | */ | ||
201 | rcu_read_unlock_special(t); | ||
193 | } | 202 | } |
194 | 203 | ||
195 | /* | 204 | /* |
@@ -391,13 +400,22 @@ void __rcu_read_unlock(void) | |||
391 | struct task_struct *t = current; | 400 | struct task_struct *t = current; |
392 | 401 | ||
393 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | 402 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ |
394 | if (--t->rcu_read_lock_nesting == 0) { | 403 | if (t->rcu_read_lock_nesting != 1) |
395 | barrier(); /* decr before ->rcu_read_unlock_special load */ | 404 | --t->rcu_read_lock_nesting; |
405 | else { | ||
406 | t->rcu_read_lock_nesting = INT_MIN; | ||
407 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
396 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 408 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
397 | rcu_read_unlock_special(t); | 409 | rcu_read_unlock_special(t); |
410 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
411 | t->rcu_read_lock_nesting = 0; | ||
398 | } | 412 | } |
399 | #ifdef CONFIG_PROVE_LOCKING | 413 | #ifdef CONFIG_PROVE_LOCKING |
400 | WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); | 414 | { |
415 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
416 | |||
417 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
418 | } | ||
401 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 419 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
402 | } | 420 | } |
403 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | 421 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
@@ -593,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
593 | rcu_preempt_qs(cpu); | 611 | rcu_preempt_qs(cpu); |
594 | return; | 612 | return; |
595 | } | 613 | } |
596 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) | 614 | if (t->rcu_read_lock_nesting > 0 && |
615 | per_cpu(rcu_preempt_data, cpu).qs_pending) | ||
597 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 616 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
598 | } | 617 | } |
599 | 618 | ||