aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/oprofile/backtrace.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:32 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:41 -0400
commitc34d1b4d165c67b966bca4aba026443d7ff161eb (patch)
tree27ffca9daba2a6b16d29bd508faf3e68bda2aad1 /arch/i386/oprofile/backtrace.c
parentc0718806cf955d5eb51ea77bffb5b21d9bba4972 (diff)
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used only by oprofile's i386 and arm backtrace code, at interrupt time, to establish whether a userspace stackframe is currently readable. This is problematic, because we want to push the page_table_lock down inside follow_page, and later split it; whereas oprofile is doing a spin_trylock on it (in the i386 case, forgotten in the arm case), and needs that to pin perhaps two pages spanned by the stackframe (which might be covered by different locks when we split). I think oprofile is going about this in the wrong way: it doesn't need to know the area is readable (neither i386 nor arm uses read protection of user pages), it doesn't need to pin the memory, it should simply __copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've not got around to devising the sparse __user annotations for this. Then we can eliminate check_user_page_readable, and return to a single follow_page without the __follow_page variants. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/oprofile/backtrace.c')
-rw-r--r--arch/i386/oprofile/backtrace.c38
1 files changed, 13 insertions, 25 deletions
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 65dfd2edb671..21654be3f73f 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
15 16
16struct frame_head { 17struct frame_head {
17 struct frame_head * ebp; 18 struct frame_head * ebp;
@@ -21,26 +22,22 @@ struct frame_head {
21static struct frame_head * 22static struct frame_head *
22dump_backtrace(struct frame_head * head) 23dump_backtrace(struct frame_head * head)
23{ 24{
24 oprofile_add_trace(head->ret); 25 struct frame_head bufhead[2];
25 26
26 /* frame pointers should strictly progress back up the stack 27 /* Also check accessibility of one struct frame_head beyond */
27 * (towards higher addresses) */ 28 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
28 if (head >= head->ebp) 29 return NULL;
30 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
29 return NULL; 31 return NULL;
30 32
31 return head->ebp; 33 oprofile_add_trace(bufhead[0].ret);
32}
33
34/* check that the page(s) containing the frame head are present */
35static int pages_present(struct frame_head * head)
36{
37 struct mm_struct * mm = current->mm;
38 34
39 /* FIXME: only necessary once per page */ 35 /* frame pointers should strictly progress back up the stack
40 if (!check_user_page_readable(mm, (unsigned long)head)) 36 * (towards higher addresses) */
41 return 0; 37 if (head >= bufhead[0].ebp)
38 return NULL;
42 39
43 return check_user_page_readable(mm, (unsigned long)(head + 1)); 40 return bufhead[0].ebp;
44} 41}
45 42
46/* 43/*
@@ -97,15 +94,6 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
97 return; 94 return;
98 } 95 }
99 96
100#ifdef CONFIG_SMP 97 while (depth-- && head)
101 if (!spin_trylock(&current->mm->page_table_lock))
102 return;
103#endif
104
105 while (depth-- && head && pages_present(head))
106 head = dump_backtrace(head); 98 head = dump_backtrace(head);
107
108#ifdef CONFIG_SMP
109 spin_unlock(&current->mm->page_table_lock);
110#endif
111} 99}