diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 10:08:42 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 14:39:19 -0400 |
| commit | cba6d0d64ee53772b285d0c0c288deefbeaf7775 (patch) | |
| tree | b2d3de62d15a77d23708c1af6f188c86f0d87fb9 /kernel | |
| parent | 6887a4131da3adaab011613776d865f4bcfb5678 (diff) | |
Revert "rcu: Move PREEMPT_RCU preemption to switch_to() invocation"
This reverts commit 616c310e83b872024271c915c1b9ab505b9efad9.
(Move PREEMPT_RCU preemption to switch_to() invocation).
Testing by Sasha Levin <levinsasha928@gmail.com> showed that this
can result in deadlock due to invoking the scheduler when one of
the runqueue locks is held. Because this commit was simply a
performance optimization, revert it.
Reported-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Sasha Levin <levinsasha928@gmail.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcutree.c | 1 | ||||
| -rw-r--r-- | kernel/rcutree.h | 1 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 14 | ||||
| -rw-r--r-- | kernel/sched/core.c | 1 |
4 files changed, 13 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 38ecdda3f55f..4b97bba7396e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -201,6 +201,7 @@ void rcu_note_context_switch(int cpu) | |||
| 201 | { | 201 | { |
| 202 | trace_rcu_utilization("Start context switch"); | 202 | trace_rcu_utilization("Start context switch"); |
| 203 | rcu_sched_qs(cpu); | 203 | rcu_sched_qs(cpu); |
| 204 | rcu_preempt_note_context_switch(cpu); | ||
| 204 | trace_rcu_utilization("End context switch"); | 205 | trace_rcu_utilization("End context switch"); |
| 205 | } | 206 | } |
| 206 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 207 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index ea056495783e..19b61ac1079f 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -444,6 +444,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); | |||
| 444 | /* Forward declarations for rcutree_plugin.h */ | 444 | /* Forward declarations for rcutree_plugin.h */ |
| 445 | static void rcu_bootup_announce(void); | 445 | static void rcu_bootup_announce(void); |
| 446 | long rcu_batches_completed(void); | 446 | long rcu_batches_completed(void); |
| 447 | static void rcu_preempt_note_context_switch(int cpu); | ||
| 447 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 448 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
| 448 | #ifdef CONFIG_HOTPLUG_CPU | 449 | #ifdef CONFIG_HOTPLUG_CPU |
| 449 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 450 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 5271a020887e..3e4899459f3d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu) | |||
| 153 | * | 153 | * |
| 154 | * Caller must disable preemption. | 154 | * Caller must disable preemption. |
| 155 | */ | 155 | */ |
| 156 | void rcu_preempt_note_context_switch(void) | 156 | static void rcu_preempt_note_context_switch(int cpu) |
| 157 | { | 157 | { |
| 158 | struct task_struct *t = current; | 158 | struct task_struct *t = current; |
| 159 | unsigned long flags; | 159 | unsigned long flags; |
| @@ -164,7 +164,7 @@ void rcu_preempt_note_context_switch(void) | |||
| 164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
| 165 | 165 | ||
| 166 | /* Possibly blocking in an RCU read-side critical section. */ | 166 | /* Possibly blocking in an RCU read-side critical section. */ |
| 167 | rdp = __this_cpu_ptr(rcu_preempt_state.rda); | 167 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
| 168 | rnp = rdp->mynode; | 168 | rnp = rdp->mynode; |
| 169 | raw_spin_lock_irqsave(&rnp->lock, flags); | 169 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
| @@ -228,7 +228,7 @@ void rcu_preempt_note_context_switch(void) | |||
| 228 | * means that we continue to block the current grace period. | 228 | * means that we continue to block the current grace period. |
| 229 | */ | 229 | */ |
| 230 | local_irq_save(flags); | 230 | local_irq_save(flags); |
| 231 | rcu_preempt_qs(smp_processor_id()); | 231 | rcu_preempt_qs(cpu); |
| 232 | local_irq_restore(flags); | 232 | local_irq_restore(flags); |
| 233 | } | 233 | } |
| 234 | 234 | ||
| @@ -1002,6 +1002,14 @@ void rcu_force_quiescent_state(void) | |||
| 1002 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 1002 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
| 1003 | 1003 | ||
| 1004 | /* | 1004 | /* |
| 1005 | * Because preemptible RCU does not exist, we never have to check for | ||
| 1006 | * CPUs being in quiescent states. | ||
| 1007 | */ | ||
| 1008 | static void rcu_preempt_note_context_switch(int cpu) | ||
| 1009 | { | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | /* | ||
| 1005 | * Because preemptible RCU does not exist, there are never any preempted | 1013 | * Because preemptible RCU does not exist, there are never any preempted |
| 1006 | * RCU readers. | 1014 | * RCU readers. |
| 1007 | */ | 1015 | */ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d5594a4268d4..eaead2df6aa8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2081,7 +2081,6 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
| 2081 | #endif | 2081 | #endif |
| 2082 | 2082 | ||
| 2083 | /* Here we just switch the register state and the stack. */ | 2083 | /* Here we just switch the register state and the stack. */ |
| 2084 | rcu_switch_from(prev); | ||
| 2085 | switch_to(prev, next, prev); | 2084 | switch_to(prev, next, prev); |
| 2086 | 2085 | ||
| 2087 | barrier(); | 2086 | barrier(); |
