aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-09-27 04:50:15 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 11:26:14 -0400
commit0ec76a110f432e98277e464b82ace8dd66571689 (patch)
tree3ed8de0ea6869fe17bec7689c493a2db02f73f4a
parent361f6ed1d00f666a1a7c33f3e9aaccb713f9b9e4 (diff)
[PATCH] NOMMU: Check that access_process_vm() has a valid target
Check that access_process_vm() is accessing a valid mapping in the target process. This limits ptrace() accesses and accesses through /proc/<pid>/maps to only those regions actually mapped by a program. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/ptrace.c54
-rw-r--r--mm/memory.c53
-rw-r--r--mm/nommu.c47
3 files changed, 100 insertions, 54 deletions
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 9a111f70145c..8aad0331d82e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -241,60 +241,6 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
241 return 0; 241 return 0;
242} 242}
243 243
244/*
245 * Access another process' address space.
246 * Source/target buffer must be kernel space,
247 * Do not walk the page table directly, use get_user_pages
248 */
249
250int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
251{
252 struct mm_struct *mm;
253 struct vm_area_struct *vma;
254 struct page *page;
255 void *old_buf = buf;
256
257 mm = get_task_mm(tsk);
258 if (!mm)
259 return 0;
260
261 down_read(&mm->mmap_sem);
262 /* ignore errors, just check how much was sucessfully transfered */
263 while (len) {
264 int bytes, ret, offset;
265 void *maddr;
266
267 ret = get_user_pages(tsk, mm, addr, 1,
268 write, 1, &page, &vma);
269 if (ret <= 0)
270 break;
271
272 bytes = len;
273 offset = addr & (PAGE_SIZE-1);
274 if (bytes > PAGE_SIZE-offset)
275 bytes = PAGE_SIZE-offset;
276
277 maddr = kmap(page);
278 if (write) {
279 copy_to_user_page(vma, page, addr,
280 maddr + offset, buf, bytes);
281 set_page_dirty_lock(page);
282 } else {
283 copy_from_user_page(vma, page, addr,
284 buf, maddr + offset, bytes);
285 }
286 kunmap(page);
287 page_cache_release(page);
288 len -= bytes;
289 buf += bytes;
290 addr += bytes;
291 }
292 up_read(&mm->mmap_sem);
293 mmput(mm);
294
295 return buf - old_buf;
296}
297
298int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 244int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
299{ 245{
300 int copied = 0; 246 int copied = 0;
diff --git a/mm/memory.c b/mm/memory.c
index f2ef1dcfff77..601159a46ab6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2604,3 +2604,56 @@ int in_gate_area_no_task(unsigned long addr)
2604} 2604}
2605 2605
2606#endif /* __HAVE_ARCH_GATE_AREA */ 2606#endif /* __HAVE_ARCH_GATE_AREA */
2607
2608/*
2609 * Access another process' address space.
2610 * Source/target buffer must be kernel space,
2611 * Do not walk the page table directly, use get_user_pages
2612 */
2613int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2614{
2615 struct mm_struct *mm;
2616 struct vm_area_struct *vma;
2617 struct page *page;
2618 void *old_buf = buf;
2619
2620 mm = get_task_mm(tsk);
2621 if (!mm)
2622 return 0;
2623
2624 down_read(&mm->mmap_sem);
2625 /* ignore errors, just check how much was sucessfully transfered */
2626 while (len) {
2627 int bytes, ret, offset;
2628 void *maddr;
2629
2630 ret = get_user_pages(tsk, mm, addr, 1,
2631 write, 1, &page, &vma);
2632 if (ret <= 0)
2633 break;
2634
2635 bytes = len;
2636 offset = addr & (PAGE_SIZE-1);
2637 if (bytes > PAGE_SIZE-offset)
2638 bytes = PAGE_SIZE-offset;
2639
2640 maddr = kmap(page);
2641 if (write) {
2642 copy_to_user_page(vma, page, addr,
2643 maddr + offset, buf, bytes);
2644 set_page_dirty_lock(page);
2645 } else {
2646 copy_from_user_page(vma, page, addr,
2647 buf, maddr + offset, bytes);
2648 }
2649 kunmap(page);
2650 page_cache_release(page);
2651 len -= bytes;
2652 buf += bytes;
2653 addr += bytes;
2654 }
2655 up_read(&mm->mmap_sem);
2656 mmput(mm);
2657
2658 return buf - old_buf;
2659}
diff --git a/mm/nommu.c b/mm/nommu.c
index d99dea31e443..d08acdae0036 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1206,3 +1206,50 @@ struct page *filemap_nopage(struct vm_area_struct *area,
1206 BUG(); 1206 BUG();
1207 return NULL; 1207 return NULL;
1208} 1208}
1209
1210/*
1211 * Access another process' address space.
1212 * - source/target buffer must be kernel space
1213 */
1214int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1215{
1216 struct vm_list_struct *vml;
1217 struct vm_area_struct *vma;
1218 struct mm_struct *mm;
1219
1220 if (addr + len < addr)
1221 return 0;
1222
1223 mm = get_task_mm(tsk);
1224 if (!mm)
1225 return 0;
1226
1227 down_read(&mm->mmap_sem);
1228
1229 /* the access must start within one of the target process's mappings */
1230 for (vml = mm->context.vmlist; vml; vml = vml->next)
1231 if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
1232 break;
1233
1234 if (vml) {
1235 vma = vml->vma;
1236
1237 /* don't overrun this mapping */
1238 if (addr + len >= vma->vm_end)
1239 len = vma->vm_end - addr;
1240
1241 /* only read or write mappings where it is permitted */
1242 if (write && vma->vm_flags & VM_WRITE)
1243 len -= copy_to_user((void *) addr, buf, len);
1244 else if (!write && vma->vm_flags & VM_READ)
1245 len -= copy_from_user(buf, (void *) addr, len);
1246 else
1247 len = 0;
1248 } else {
1249 len = 0;
1250 }
1251
1252 up_read(&mm->mmap_sem);
1253 mmput(mm);
1254 return len;
1255}