aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/ptrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/ptrace.c')
-rw-r--r--arch/ia64/kernel/ptrace.c27
1 files changed, 0 insertions, 27 deletions
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 4265ff64219b..b7a5fffe0924 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
672 read_unlock(&tasklist_lock); 672 read_unlock(&tasklist_lock);
673} 673}
674 674
675static inline int
676thread_matches (struct task_struct *thread, unsigned long addr)
677{
678 unsigned long thread_rbs_end;
679 struct pt_regs *thread_regs;
680
681 if (ptrace_check_attach(thread, 0) < 0)
682 /*
683 * If the thread is not in an attachable state, we'll
684 * ignore it. The net effect is that if ADDR happens
685 * to overlap with the portion of the thread's
686 * register backing store that is currently residing
687 * on the thread's kernel stack, then ptrace() may end
688 * up accessing a stale value. But if the thread
689 * isn't stopped, that's a problem anyhow, so we're
690 * doing as well as we can...
691 */
692 return 0;
693
694 thread_regs = task_pt_regs(thread);
695 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
696 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
697 return 0;
698
699 return 1; /* looks like we've got a winner */
700}
701
702/* 675/*
703 * Write f32-f127 back to task->thread.fph if it has been modified. 676 * Write f32-f127 back to task->thread.fph if it has been modified.
704 */ 677 */