diff options
-rw-r--r-- | kernel/rcutiny_plugin.h | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 4b905404a5bd..432ed2bc05ad 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | |||
132 | RCU_TRACE(.rcb.name = "rcu_preempt") | 132 | RCU_TRACE(.rcb.name = "rcu_preempt") |
133 | }; | 133 | }; |
134 | 134 | ||
135 | static void rcu_read_unlock_special(struct task_struct *t); | ||
135 | static int rcu_preempted_readers_exp(void); | 136 | static int rcu_preempted_readers_exp(void); |
136 | static void rcu_report_exp_done(void); | 137 | static void rcu_report_exp_done(void); |
137 | 138 | ||
@@ -146,6 +147,16 @@ static int rcu_cpu_blocking_cur_gp(void) | |||
146 | /* | 147 | /* |
147 | * Check for a running RCU reader. Because there is only one CPU, | 148 | * Check for a running RCU reader. Because there is only one CPU, |
148 | * there can be but one running RCU reader at a time. ;-) | 149 | * there can be but one running RCU reader at a time. ;-) |
150 | * | ||
151 | * Returns zero if there are no running readers. Returns a positive | ||
152 | * number if there is at least one reader within its RCU read-side | ||
153 | * critical section. Returns a negative number if an outermost reader | ||
154 | * is in the midst of exiting from its RCU read-side critical section | ||
155 | * | ||
156 | * Returns zero if there are no running readers. Returns a positive | ||
157 | * number if there is at least one reader within its RCU read-side | ||
158 | * critical section. Returns a negative number if an outermost reader | ||
159 | * is in the midst of exiting from its RCU read-side critical section. | ||
149 | */ | 160 | */ |
150 | static int rcu_preempt_running_reader(void) | 161 | static int rcu_preempt_running_reader(void) |
151 | { | 162 | { |
@@ -475,7 +486,7 @@ void rcu_preempt_note_context_switch(void) | |||
475 | unsigned long flags; | 486 | unsigned long flags; |
476 | 487 | ||
477 | local_irq_save(flags); /* must exclude scheduler_tick(). */ | 488 | local_irq_save(flags); /* must exclude scheduler_tick(). */ |
478 | if (rcu_preempt_running_reader() && | 489 | if (rcu_preempt_running_reader() > 0 && |
479 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 490 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
480 | 491 | ||
481 | /* Possibly blocking in an RCU read-side critical section. */ | 492 | /* Possibly blocking in an RCU read-side critical section. */ |
@@ -494,6 +505,13 @@ void rcu_preempt_note_context_switch(void) | |||
494 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | 505 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); |
495 | if (rcu_cpu_blocking_cur_gp()) | 506 | if (rcu_cpu_blocking_cur_gp()) |
496 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; | 507 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; |
508 | } else if (rcu_preempt_running_reader() < 0 && | ||
509 | t->rcu_read_unlock_special) { | ||
510 | /* | ||
511 | * Complete exit from RCU read-side critical section on | ||
512 | * behalf of preempted instance of __rcu_read_unlock(). | ||
513 | */ | ||
514 | rcu_read_unlock_special(t); | ||
497 | } | 515 | } |
498 | 516 | ||
499 | /* | 517 | /* |
@@ -618,13 +636,22 @@ void __rcu_read_unlock(void) | |||
618 | struct task_struct *t = current; | 636 | struct task_struct *t = current; |
619 | 637 | ||
620 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | 638 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ |
621 | --t->rcu_read_lock_nesting; | 639 | if (t->rcu_read_lock_nesting != 1) |
622 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | 640 | --t->rcu_read_lock_nesting; |
623 | if (t->rcu_read_lock_nesting == 0 && | 641 | else { |
624 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 642 | t->rcu_read_lock_nesting = INT_MIN; |
625 | rcu_read_unlock_special(t); | 643 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
644 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
645 | rcu_read_unlock_special(t); | ||
646 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
647 | t->rcu_read_lock_nesting = 0; | ||
648 | } | ||
626 | #ifdef CONFIG_PROVE_LOCKING | 649 | #ifdef CONFIG_PROVE_LOCKING |
627 | WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); | 650 | { |
651 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
652 | |||
653 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
654 | } | ||
628 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 655 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
629 | } | 656 | } |
630 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | 657 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
@@ -649,7 +676,7 @@ static void rcu_preempt_check_callbacks(void) | |||
649 | invoke_rcu_callbacks(); | 676 | invoke_rcu_callbacks(); |
650 | if (rcu_preempt_gp_in_progress() && | 677 | if (rcu_preempt_gp_in_progress() && |
651 | rcu_cpu_blocking_cur_gp() && | 678 | rcu_cpu_blocking_cur_gp() && |
652 | rcu_preempt_running_reader()) | 679 | rcu_preempt_running_reader() > 0) |
653 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 680 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
654 | } | 681 | } |
655 | 682 | ||