diff options
author | Oleg Nesterov <oleg@redhat.com> | 2012-07-29 14:22:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-07-30 05:27:19 -0400 |
commit | c517ee744b96e441d9c731e245f83c6d08dc0a19 (patch) | |
tree | 8d7f7b86767ba2d5c62c2d421f388949f0483c85 /kernel/events | |
parent | f403072c6108e15f319a4a5036b650c77760522c (diff) |
uprobes: __replace_page() should not use page_address_in_vma()
page_address_in_vma(old_page) in __replace_page() is ugly and
wrong. The caller already knows the correct virtual address,
this page was found by get_user_pages(vaddr).
However, page_address_in_vma() can actually fail if
page->mapping was cleared by __delete_from_page_cache() after
get_user_pages() returns. But this means the race with page
reclaim, write_opcode() should not fail, it should retry and
read this page again. Probably the race with remove_mapping() is
not possible due to page_freeze_refs() logic, but afaics at
least shmem_writepage()->shmem_delete_from_page_cache() can
clear ->mapping.
We could change __replace_page() to return -EAGAIN in this case,
but it would be better to simply use the caller's vaddr and rely
on page_check_address().
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Srikar Dronamraju <srikar.vnet.ibm.com>
Cc: Anton Arapov <anton@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20120729182216.GA20311@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/uprobes.c | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a2b32a51d0a2..6fda7996892b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -127,22 +127,19 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) | |||
127 | * based on replace_page in mm/ksm.c | 127 | * based on replace_page in mm/ksm.c |
128 | * | 128 | * |
129 | * @vma: vma that holds the pte pointing to page | 129 | * @vma: vma that holds the pte pointing to page |
130 | * @addr: address the old @page is mapped at | ||
130 | * @page: the cowed page we are replacing by kpage | 131 | * @page: the cowed page we are replacing by kpage |
131 | * @kpage: the modified page we replace page by | 132 | * @kpage: the modified page we replace page by |
132 | * | 133 | * |
133 | * Returns 0 on success, -EFAULT on failure. | 134 | * Returns 0 on success, -EFAULT on failure. |
134 | */ | 135 | */ |
135 | static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) | 136 | static int __replace_page(struct vm_area_struct *vma, unsigned long addr, |
137 | struct page *page, struct page *kpage) | ||
136 | { | 138 | { |
137 | struct mm_struct *mm = vma->vm_mm; | 139 | struct mm_struct *mm = vma->vm_mm; |
138 | unsigned long addr; | ||
139 | spinlock_t *ptl; | 140 | spinlock_t *ptl; |
140 | pte_t *ptep; | 141 | pte_t *ptep; |
141 | 142 | ||
142 | addr = page_address_in_vma(page, vma); | ||
143 | if (addr == -EFAULT) | ||
144 | return -EFAULT; | ||
145 | |||
146 | ptep = page_check_address(page, mm, addr, &ptl, 0); | 143 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
147 | if (!ptep) | 144 | if (!ptep) |
148 | return -EAGAIN; | 145 | return -EAGAIN; |
@@ -243,7 +240,7 @@ retry: | |||
243 | goto unlock_out; | 240 | goto unlock_out; |
244 | 241 | ||
245 | lock_page(new_page); | 242 | lock_page(new_page); |
246 | ret = __replace_page(vma, old_page, new_page); | 243 | ret = __replace_page(vma, vaddr, old_page, new_page); |
247 | unlock_page(new_page); | 244 | unlock_page(new_page); |
248 | 245 | ||
249 | unlock_out: | 246 | unlock_out: |