aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorK.Prasad <prasad@linux.vnet.ibm.com>2009-06-01 14:14:55 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-06-02 16:46:59 -0400
commit66cb5917295958652ff6ba36d83f98f2379c46b4 (patch)
tree1e30c4524cb5d53ecd4980b83ead4e9548dfb8f5
parent1e3500666f7c5daaadadb8431a2927cdbbdb7dd4 (diff)
hw-breakpoints: use the new wrapper routines to access debug registers in process/thread code
This patch enables the use of abstract debug registers in process-handling routines, according to the new hardware breakpoint Api. [ Impact: adapt thread breakpoints handling code to the new breakpoint Api ] Original-patch-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com> Reviewed-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r--arch/x86/kernel/process.c22
-rw-r--r--arch/x86/kernel/process_32.c28
-rw-r--r--arch/x86/kernel/process_64.c31
3 files changed, 65 insertions, 16 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 291527cb438a..19a686c401b5 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -15,6 +15,8 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/i387.h> 16#include <asm/i387.h>
17#include <asm/ds.h> 17#include <asm/ds.h>
18#include <asm/debugreg.h>
19#include <asm/hw_breakpoint.h>
18 20
19unsigned long idle_halt; 21unsigned long idle_halt;
20EXPORT_SYMBOL(idle_halt); 22EXPORT_SYMBOL(idle_halt);
@@ -46,6 +48,8 @@ void free_thread_xstate(struct task_struct *tsk)
46 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); 48 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
47 tsk->thread.xstate = NULL; 49 tsk->thread.xstate = NULL;
48 } 50 }
51 if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG)))
52 flush_thread_hw_breakpoint(tsk);
49 53
50 WARN(tsk->thread.ds_ctx, "leaking DS context\n"); 54 WARN(tsk->thread.ds_ctx, "leaking DS context\n");
51} 55}
@@ -106,12 +110,8 @@ void flush_thread(void)
106 110
107 clear_tsk_thread_flag(tsk, TIF_DEBUG); 111 clear_tsk_thread_flag(tsk, TIF_DEBUG);
108 112
109 tsk->thread.debugreg[0] = 0; 113 if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG)))
110 tsk->thread.debugreg[1] = 0; 114 flush_thread_hw_breakpoint(tsk);
111 tsk->thread.debugreg[2] = 0;
112 tsk->thread.debugreg[3] = 0;
113 tsk->thread.debugreg6 = 0;
114 tsk->thread.debugreg7 = 0;
115 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 115 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
116 /* 116 /*
117 * Forget coprocessor state.. 117 * Forget coprocessor state..
@@ -193,16 +193,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
193 else if (next->debugctlmsr != prev->debugctlmsr) 193 else if (next->debugctlmsr != prev->debugctlmsr)
194 update_debugctlmsr(next->debugctlmsr); 194 update_debugctlmsr(next->debugctlmsr);
195 195
196 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
197 set_debugreg(next->debugreg[0], 0);
198 set_debugreg(next->debugreg[1], 1);
199 set_debugreg(next->debugreg[2], 2);
200 set_debugreg(next->debugreg[3], 3);
201 /* no 4 and 5 */
202 set_debugreg(next->debugreg6, 6);
203 set_debugreg(next->debugreg7, 7);
204 }
205
206 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 196 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
207 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 197 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
208 /* prev and next are different */ 198 /* prev and next are different */
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index b5e4bfef4472..297ffff2ffc2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -61,6 +61,8 @@
61#include <asm/idle.h> 61#include <asm/idle.h>
62#include <asm/syscalls.h> 62#include <asm/syscalls.h>
63#include <asm/ds.h> 63#include <asm/ds.h>
64#include <asm/debugreg.h>
65#include <asm/hw_breakpoint.h>
64 66
65asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 67asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
66 68
@@ -265,7 +267,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
265 267
266 task_user_gs(p) = get_user_gs(regs); 268 task_user_gs(p) = get_user_gs(regs);
267 269
270 p->thread.io_bitmap_ptr = NULL;
268 tsk = current; 271 tsk = current;
272 err = -ENOMEM;
273 if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG)))
274 if (copy_thread_hw_breakpoint(tsk, p, clone_flags))
275 goto out;
276
269 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { 277 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
270 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, 278 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
271 IO_BITMAP_BYTES, GFP_KERNEL); 279 IO_BITMAP_BYTES, GFP_KERNEL);
@@ -285,10 +293,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
285 err = do_set_thread_area(p, -1, 293 err = do_set_thread_area(p, -1,
286 (struct user_desc __user *)childregs->si, 0); 294 (struct user_desc __user *)childregs->si, 0);
287 295
296out:
288 if (err && p->thread.io_bitmap_ptr) { 297 if (err && p->thread.io_bitmap_ptr) {
289 kfree(p->thread.io_bitmap_ptr); 298 kfree(p->thread.io_bitmap_ptr);
290 p->thread.io_bitmap_max = 0; 299 p->thread.io_bitmap_max = 0;
291 } 300 }
301 if (err)
302 flush_thread_hw_breakpoint(p);
292 303
293 clear_tsk_thread_flag(p, TIF_DS_AREA_MSR); 304 clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
294 p->thread.ds_ctx = NULL; 305 p->thread.ds_ctx = NULL;
@@ -427,6 +438,23 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
427 lazy_load_gs(next->gs); 438 lazy_load_gs(next->gs);
428 439
429 percpu_write(current_task, next_p); 440 percpu_write(current_task, next_p);
441 /*
442 * There's a problem with moving the arch_install_thread_hw_breakpoint()
443 * call before current is updated. Suppose a kernel breakpoint is
444 * triggered in between the two, the hw-breakpoint handler will see that
445 * the 'current' task does not have TIF_DEBUG flag set and will think it
446 * is leftover from an old task (lazy switching) and will erase it. Then
447 * until the next context switch, no user-breakpoints will be installed.
448 *
449 * The real problem is that it's impossible to update both current and
450 * physical debug registers at the same instant, so there will always be
451 * a window in which they disagree and a breakpoint might get triggered.
452 * Since we use lazy switching, we are forced to assume that a
453 * disagreement means that current is correct and the exception is due
454 * to lazy debug register switching.
455 */
456 if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG)))
457 arch_install_thread_hw_breakpoint(next_p);
430 458
431 return prev_p; 459 return prev_p;
432} 460}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 5a1a1de292ec..f7b276d4b3fb 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -55,6 +55,8 @@
55#include <asm/idle.h> 55#include <asm/idle.h>
56#include <asm/syscalls.h> 56#include <asm/syscalls.h>
57#include <asm/ds.h> 57#include <asm/ds.h>
58#include <asm/debugreg.h>
59#include <asm/hw_breakpoint.h>
58 60
59asmlinkage extern void ret_from_fork(void); 61asmlinkage extern void ret_from_fork(void);
60 62
@@ -248,6 +250,8 @@ void release_thread(struct task_struct *dead_task)
248 BUG(); 250 BUG();
249 } 251 }
250 } 252 }
253 if (unlikely(dead_task->thread.debugreg7))
254 flush_thread_hw_breakpoint(dead_task);
251} 255}
252 256
253static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) 257static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
@@ -303,12 +307,18 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
303 307
304 p->thread.fs = me->thread.fs; 308 p->thread.fs = me->thread.fs;
305 p->thread.gs = me->thread.gs; 309 p->thread.gs = me->thread.gs;
310 p->thread.io_bitmap_ptr = NULL;
306 311
307 savesegment(gs, p->thread.gsindex); 312 savesegment(gs, p->thread.gsindex);
308 savesegment(fs, p->thread.fsindex); 313 savesegment(fs, p->thread.fsindex);
309 savesegment(es, p->thread.es); 314 savesegment(es, p->thread.es);
310 savesegment(ds, p->thread.ds); 315 savesegment(ds, p->thread.ds);
311 316
317 err = -ENOMEM;
318 if (unlikely(test_tsk_thread_flag(me, TIF_DEBUG)))
319 if (copy_thread_hw_breakpoint(me, p, clone_flags))
320 goto out;
321
312 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { 322 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
313 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); 323 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
314 if (!p->thread.io_bitmap_ptr) { 324 if (!p->thread.io_bitmap_ptr) {
@@ -347,6 +357,9 @@ out:
347 kfree(p->thread.io_bitmap_ptr); 357 kfree(p->thread.io_bitmap_ptr);
348 p->thread.io_bitmap_max = 0; 358 p->thread.io_bitmap_max = 0;
349 } 359 }
360 if (err)
361 flush_thread_hw_breakpoint(p);
362
350 return err; 363 return err;
351} 364}
352 365
@@ -492,6 +505,24 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
492 */ 505 */
493 if (tsk_used_math(next_p) && next_p->fpu_counter > 5) 506 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
494 math_state_restore(); 507 math_state_restore();
508 /*
509 * There's a problem with moving the arch_install_thread_hw_breakpoint()
510 * call before current is updated. Suppose a kernel breakpoint is
511 * triggered in between the two, the hw-breakpoint handler will see that
512 * the 'current' task does not have TIF_DEBUG flag set and will think it
513 * is leftover from an old task (lazy switching) and will erase it. Then
514 * until the next context switch, no user-breakpoints will be installed.
515 *
516 * The real problem is that it's impossible to update both current and
517 * physical debug registers at the same instant, so there will always be
518 * a window in which they disagree and a breakpoint might get triggered.
519 * Since we use lazy switching, we are forced to assume that a
520 * disagreement means that current is correct and the exception is due
521 * to lazy debug register switching.
522 */
523 if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG)))
524 arch_install_thread_hw_breakpoint(next_p);
525
495 return prev_p; 526 return prev_p;
496} 527}
497 528