diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:41 -0400 |
commit | c34d1b4d165c67b966bca4aba026443d7ff161eb (patch) | |
tree | 27ffca9daba2a6b16d29bd508faf3e68bda2aad1 | |
parent | c0718806cf955d5eb51ea77bffb5b21d9bba4972 (diff) |
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used
only by oprofile's i386 and arm backtrace code, at interrupt time, to
establish whether a userspace stackframe is currently readable.
This is problematic, because we want to push the page_table_lock down inside
follow_page, and later split it; whereas oprofile is doing a spin_trylock on
it (in the i386 case, forgotten in the arm case), and needs that to pin
perhaps two pages spanned by the stackframe (which might be covered by
different locks when we split).
I think oprofile is going about this in the wrong way: it doesn't need to know
the area is readable (neither i386 nor arm uses read protection of user
pages), it doesn't need to pin the memory, it should simply
__copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've
not got around to devising the sparse __user annotations for this.
Then we can eliminate check_user_page_readable, and return to a single
follow_page without the __follow_page variants.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/arm/oprofile/backtrace.c | 46 | ||||
-rw-r--r-- | arch/i386/oprofile/backtrace.c | 38 | ||||
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | mm/memory.c | 29 |
4 files changed, 26 insertions, 88 deletions
diff --git a/arch/arm/oprofile/backtrace.c b/arch/arm/oprofile/backtrace.c index df35c452a8bf..7c22c12618cc 100644 --- a/arch/arm/oprofile/backtrace.c +++ b/arch/arm/oprofile/backtrace.c | |||
@@ -49,42 +49,22 @@ static struct frame_tail* kernel_backtrace(struct frame_tail *tail) | |||
49 | 49 | ||
50 | static struct frame_tail* user_backtrace(struct frame_tail *tail) | 50 | static struct frame_tail* user_backtrace(struct frame_tail *tail) |
51 | { | 51 | { |
52 | struct frame_tail buftail; | 52 | struct frame_tail buftail[2]; |
53 | 53 | ||
54 | /* hardware pte might not be valid due to dirty/accessed bit emulation | 54 | /* Also check accessibility of one struct frame_tail beyond */ |
55 | * so we use copy_from_user and benefit from exception fixups */ | 55 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) |
56 | if (copy_from_user(&buftail, tail, sizeof(struct frame_tail))) | 56 | return NULL; |
57 | if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) | ||
57 | return NULL; | 58 | return NULL; |
58 | 59 | ||
59 | oprofile_add_trace(buftail.lr); | 60 | oprofile_add_trace(buftail[0].lr); |
60 | 61 | ||
61 | /* frame pointers should strictly progress back up the stack | 62 | /* frame pointers should strictly progress back up the stack |
62 | * (towards higher addresses) */ | 63 | * (towards higher addresses) */ |
63 | if (tail >= buftail.fp) | 64 | if (tail >= buftail[0].fp) |
64 | return NULL; | 65 | return NULL; |
65 | 66 | ||
66 | return buftail.fp-1; | 67 | return buftail[0].fp-1; |
67 | } | ||
68 | |||
69 | /* Compare two addresses and see if they're on the same page */ | ||
70 | #define CMP_ADDR_EQUAL(x,y,offset) ((((unsigned long) x) >> PAGE_SHIFT) \ | ||
71 | == ((((unsigned long) y) + offset) >> PAGE_SHIFT)) | ||
72 | |||
73 | /* check that the page(s) containing the frame tail are present */ | ||
74 | static int pages_present(struct frame_tail *tail) | ||
75 | { | ||
76 | struct mm_struct * mm = current->mm; | ||
77 | |||
78 | if (!check_user_page_readable(mm, (unsigned long)tail)) | ||
79 | return 0; | ||
80 | |||
81 | if (CMP_ADDR_EQUAL(tail, tail, 8)) | ||
82 | return 1; | ||
83 | |||
84 | if (!check_user_page_readable(mm, ((unsigned long)tail) + 8)) | ||
85 | return 0; | ||
86 | |||
87 | return 1; | ||
88 | } | 68 | } |
89 | 69 | ||
90 | /* | 70 | /* |
@@ -118,7 +98,6 @@ static int valid_kernel_stack(struct frame_tail *tail, struct pt_regs *regs) | |||
118 | void arm_backtrace(struct pt_regs * const regs, unsigned int depth) | 98 | void arm_backtrace(struct pt_regs * const regs, unsigned int depth) |
119 | { | 99 | { |
120 | struct frame_tail *tail; | 100 | struct frame_tail *tail; |
121 | unsigned long last_address = 0; | ||
122 | 101 | ||
123 | tail = ((struct frame_tail *) regs->ARM_fp) - 1; | 102 | tail = ((struct frame_tail *) regs->ARM_fp) - 1; |
124 | 103 | ||
@@ -132,13 +111,6 @@ void arm_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
132 | return; | 111 | return; |
133 | } | 112 | } |
134 | 113 | ||
135 | while (depth-- && tail && !((unsigned long) tail & 3)) { | 114 | while (depth-- && tail && !((unsigned long) tail & 3)) |
136 | if ((!CMP_ADDR_EQUAL(last_address, tail, 0) | ||
137 | || !CMP_ADDR_EQUAL(last_address, tail, 8)) | ||
138 | && !pages_present(tail)) | ||
139 | return; | ||
140 | last_address = (unsigned long) tail; | ||
141 | tail = user_backtrace(tail); | 115 | tail = user_backtrace(tail); |
142 | } | ||
143 | } | 116 | } |
144 | |||
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c index 65dfd2edb671..21654be3f73f 100644 --- a/arch/i386/oprofile/backtrace.c +++ b/arch/i386/oprofile/backtrace.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <asm/ptrace.h> | 14 | #include <asm/ptrace.h> |
15 | #include <asm/uaccess.h> | ||
15 | 16 | ||
16 | struct frame_head { | 17 | struct frame_head { |
17 | struct frame_head * ebp; | 18 | struct frame_head * ebp; |
@@ -21,26 +22,22 @@ struct frame_head { | |||
21 | static struct frame_head * | 22 | static struct frame_head * |
22 | dump_backtrace(struct frame_head * head) | 23 | dump_backtrace(struct frame_head * head) |
23 | { | 24 | { |
24 | oprofile_add_trace(head->ret); | 25 | struct frame_head bufhead[2]; |
25 | 26 | ||
26 | /* frame pointers should strictly progress back up the stack | 27 | /* Also check accessibility of one struct frame_head beyond */ |
27 | * (towards higher addresses) */ | 28 | if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) |
28 | if (head >= head->ebp) | 29 | return NULL; |
30 | if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | ||
29 | return NULL; | 31 | return NULL; |
30 | 32 | ||
31 | return head->ebp; | 33 | oprofile_add_trace(bufhead[0].ret); |
32 | } | ||
33 | |||
34 | /* check that the page(s) containing the frame head are present */ | ||
35 | static int pages_present(struct frame_head * head) | ||
36 | { | ||
37 | struct mm_struct * mm = current->mm; | ||
38 | 34 | ||
39 | /* FIXME: only necessary once per page */ | 35 | /* frame pointers should strictly progress back up the stack |
40 | if (!check_user_page_readable(mm, (unsigned long)head)) | 36 | * (towards higher addresses) */ |
41 | return 0; | 37 | if (head >= bufhead[0].ebp) |
38 | return NULL; | ||
42 | 39 | ||
43 | return check_user_page_readable(mm, (unsigned long)(head + 1)); | 40 | return bufhead[0].ebp; |
44 | } | 41 | } |
45 | 42 | ||
46 | /* | 43 | /* |
@@ -97,15 +94,6 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
97 | return; | 94 | return; |
98 | } | 95 | } |
99 | 96 | ||
100 | #ifdef CONFIG_SMP | 97 | while (depth-- && head) |
101 | if (!spin_trylock(¤t->mm->page_table_lock)) | ||
102 | return; | ||
103 | #endif | ||
104 | |||
105 | while (depth-- && head && pages_present(head)) | ||
106 | head = dump_backtrace(head); | 98 | head = dump_backtrace(head); |
107 | |||
108 | #ifdef CONFIG_SMP | ||
109 | spin_unlock(¤t->mm->page_table_lock); | ||
110 | #endif | ||
111 | } | 99 | } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 972e2ce8e07c..aa8de20e2e80 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -944,7 +944,6 @@ extern struct page * vmalloc_to_page(void *addr); | |||
944 | extern unsigned long vmalloc_to_pfn(void *addr); | 944 | extern unsigned long vmalloc_to_pfn(void *addr); |
945 | extern struct page * follow_page(struct mm_struct *mm, unsigned long address, | 945 | extern struct page * follow_page(struct mm_struct *mm, unsigned long address, |
946 | int write); | 946 | int write); |
947 | extern int check_user_page_readable(struct mm_struct *mm, unsigned long address); | ||
948 | int remap_pfn_range(struct vm_area_struct *, unsigned long, | 947 | int remap_pfn_range(struct vm_area_struct *, unsigned long, |
949 | unsigned long, unsigned long, pgprot_t); | 948 | unsigned long, unsigned long, pgprot_t); |
950 | 949 | ||
diff --git a/mm/memory.c b/mm/memory.c index 622a4ef5409f..51f7c0a220d4 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -809,8 +809,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
809 | * Do a quick page-table lookup for a single page. | 809 | * Do a quick page-table lookup for a single page. |
810 | * mm->page_table_lock must be held. | 810 | * mm->page_table_lock must be held. |
811 | */ | 811 | */ |
812 | static struct page *__follow_page(struct mm_struct *mm, unsigned long address, | 812 | struct page *follow_page(struct mm_struct *mm, unsigned long address, int write) |
813 | int read, int write, int accessed) | ||
814 | { | 813 | { |
815 | pgd_t *pgd; | 814 | pgd_t *pgd; |
816 | pud_t *pud; | 815 | pud_t *pud; |
@@ -846,16 +845,12 @@ static struct page *__follow_page(struct mm_struct *mm, unsigned long address, | |||
846 | if (pte_present(pte)) { | 845 | if (pte_present(pte)) { |
847 | if (write && !pte_write(pte)) | 846 | if (write && !pte_write(pte)) |
848 | goto out; | 847 | goto out; |
849 | if (read && !pte_read(pte)) | ||
850 | goto out; | ||
851 | pfn = pte_pfn(pte); | 848 | pfn = pte_pfn(pte); |
852 | if (pfn_valid(pfn)) { | 849 | if (pfn_valid(pfn)) { |
853 | page = pfn_to_page(pfn); | 850 | page = pfn_to_page(pfn); |
854 | if (accessed) { | 851 | if (write && !pte_dirty(pte) &&!PageDirty(page)) |
855 | if (write && !pte_dirty(pte) &&!PageDirty(page)) | 852 | set_page_dirty(page); |
856 | set_page_dirty(page); | 853 | mark_page_accessed(page); |
857 | mark_page_accessed(page); | ||
858 | } | ||
859 | return page; | 854 | return page; |
860 | } | 855 | } |
861 | } | 856 | } |
@@ -864,22 +859,6 @@ out: | |||
864 | return NULL; | 859 | return NULL; |
865 | } | 860 | } |
866 | 861 | ||
867 | inline struct page * | ||
868 | follow_page(struct mm_struct *mm, unsigned long address, int write) | ||
869 | { | ||
870 | return __follow_page(mm, address, 0, write, 1); | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * check_user_page_readable() can be called frm niterrupt context by oprofile, | ||
875 | * so we need to avoid taking any non-irq-safe locks | ||
876 | */ | ||
877 | int check_user_page_readable(struct mm_struct *mm, unsigned long address) | ||
878 | { | ||
879 | return __follow_page(mm, address, 1, 0, 0) != NULL; | ||
880 | } | ||
881 | EXPORT_SYMBOL(check_user_page_readable); | ||
882 | |||
883 | static inline int | 862 | static inline int |
884 | untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, | 863 | untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, |
885 | unsigned long address) | 864 | unsigned long address) |