diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2008-03-28 04:11:48 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-03-28 07:51:02 -0400 |
commit | a2ceff5e555e664751bc653a4d9b133efa18c742 (patch) | |
tree | 07ec73c8b105b50f3f3363c3dd80a5b39f200d19 /arch/powerpc/kernel/process.c | |
parent | 5c29934de29ddd7ecd913d83cad28e872f2e8c78 (diff) |
[POWERPC] Fix missed hardware breakpoints across multiple threads
There is a bug in the powerpc DABR (data access breakpoint) handling,
which can result in us missing breakpoints if several threads are trying
to break on the same address.
The circumstances are that do_page_fault() calls do_dabr(), this clears
the DABR (sets it to 0) and sets up the signal which will report to
userspace that the DABR was hit. The do_signal() code will restore the DABR
value on the way out to userspace.
If we reschedule before calling do_signal(), __switch_to() will check the
cached DABR value and compare it to the new thread's value, if they match
we don't set the DABR in hardware.
So if two threads have the same DABR value, and we schedule from one to
the other after taking the interrupt for the first thread hitting the DABR,
the second thread will run without the DABR set in hardware.
The cleanest fix is to move the cache update into set_dabr(), that way we
can't forget to do it.
Reported-by: Jan Kratochvil <jan.kratochvil@redhat.com>
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59311ec0d422..4ec605521504 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -241,8 +241,12 @@ void discard_lazy_cpu_state(void) | |||
241 | } | 241 | } |
242 | #endif /* CONFIG_SMP */ | 242 | #endif /* CONFIG_SMP */ |
243 | 243 | ||
244 | static DEFINE_PER_CPU(unsigned long, current_dabr); | ||
245 | |||
244 | int set_dabr(unsigned long dabr) | 246 | int set_dabr(unsigned long dabr) |
245 | { | 247 | { |
248 | __get_cpu_var(current_dabr) = dabr; | ||
249 | |||
246 | #ifdef CONFIG_PPC_MERGE /* XXX for now */ | 250 | #ifdef CONFIG_PPC_MERGE /* XXX for now */ |
247 | if (ppc_md.set_dabr) | 251 | if (ppc_md.set_dabr) |
248 | return ppc_md.set_dabr(dabr); | 252 | return ppc_md.set_dabr(dabr); |
@@ -259,8 +263,6 @@ int set_dabr(unsigned long dabr) | |||
259 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); | 263 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); |
260 | #endif | 264 | #endif |
261 | 265 | ||
262 | static DEFINE_PER_CPU(unsigned long, current_dabr); | ||
263 | |||
264 | struct task_struct *__switch_to(struct task_struct *prev, | 266 | struct task_struct *__switch_to(struct task_struct *prev, |
265 | struct task_struct *new) | 267 | struct task_struct *new) |
266 | { | 268 | { |
@@ -325,10 +327,8 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
325 | 327 | ||
326 | #endif /* CONFIG_SMP */ | 328 | #endif /* CONFIG_SMP */ |
327 | 329 | ||
328 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { | 330 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) |
329 | set_dabr(new->thread.dabr); | 331 | set_dabr(new->thread.dabr); |
330 | __get_cpu_var(current_dabr) = new->thread.dabr; | ||
331 | } | ||
332 | 332 | ||
333 | new_thread = &new->thread; | 333 | new_thread = &new->thread; |
334 | old_thread = ¤t->thread; | 334 | old_thread = ¤t->thread; |