aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorThomas Tuttle <ttuttle@google.com>2008-06-06 01:46:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-06 14:29:11 -0400
commitaae8679b0ebcaa92f99c1c3cb0cd651594a43915 (patch)
tree26d761d3c4cca09b58538a576948a7ba720f7d5a /fs/proc/task_mmu.c
parentd1ee2971f5bd8a16bc5ecfe1b00e14b4fe407c4f (diff)
pagemap: fix bug in add_to_pagemap, require aligned-length reads of /proc/pid/pagemap
Fix a bug in add_to_pagemap. Previously, since pm->out was a char *, put_user was only copying 1 byte of every PFN, resulting in the top 7 bytes of each PFN not being copied. By requiring that reads be a multiple of 8 bytes, I can make pm->out and pm->end u64*s instead of char*s, which makes put_user work properly, and also simplifies the logic in add_to_pagemap a bit. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Thomas Tuttle <ttuttle@google.com> Cc: Matt Mackall <mpm@selenic.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 88717c0f941b..17403629e330 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -496,7 +496,7 @@ const struct file_operations proc_clear_refs_operations = {
496}; 496};
497 497
498struct pagemapread { 498struct pagemapread {
499 char __user *out, *end; 499 u64 __user *out, *end;
500}; 500};
501 501
502#define PM_ENTRY_BYTES sizeof(u64) 502#define PM_ENTRY_BYTES sizeof(u64)
@@ -519,21 +519,11 @@ struct pagemapread {
519static int add_to_pagemap(unsigned long addr, u64 pfn, 519static int add_to_pagemap(unsigned long addr, u64 pfn,
520 struct pagemapread *pm) 520 struct pagemapread *pm)
521{ 521{
522 /*
523 * Make sure there's room in the buffer for an
524 * entire entry. Otherwise, only copy part of
525 * the pfn.
526 */
527 if (pm->out + PM_ENTRY_BYTES >= pm->end) {
528 if (copy_to_user(pm->out, &pfn, pm->end - pm->out))
529 return -EFAULT;
530 pm->out = pm->end;
531 return PM_END_OF_BUFFER;
532 }
533
534 if (put_user(pfn, pm->out)) 522 if (put_user(pfn, pm->out))
535 return -EFAULT; 523 return -EFAULT;
536 pm->out += PM_ENTRY_BYTES; 524 pm->out++;
525 if (pm->out >= pm->end)
526 return PM_END_OF_BUFFER;
537 return 0; 527 return 0;
538} 528}
539 529
@@ -634,7 +624,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
634 624
635 ret = -EINVAL; 625 ret = -EINVAL;
636 /* file position must be aligned */ 626 /* file position must be aligned */
637 if (*ppos % PM_ENTRY_BYTES) 627 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
638 goto out_task; 628 goto out_task;
639 629
640 ret = 0; 630 ret = 0;
@@ -664,8 +654,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
664 goto out_pages; 654 goto out_pages;
665 } 655 }
666 656
667 pm.out = buf; 657 pm.out = (u64 *)buf;
668 pm.end = buf + count; 658 pm.end = (u64 *)(buf + count);
669 659
670 if (!ptrace_may_attach(task)) { 660 if (!ptrace_may_attach(task)) {
671 ret = -EIO; 661 ret = -EIO;
@@ -690,9 +680,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
690 if (ret == PM_END_OF_BUFFER) 680 if (ret == PM_END_OF_BUFFER)
691 ret = 0; 681 ret = 0;
692 /* don't need mmap_sem for these, but this looks cleaner */ 682 /* don't need mmap_sem for these, but this looks cleaner */
693 *ppos += pm.out - buf; 683 *ppos += (char *)pm.out - buf;
694 if (!ret) 684 if (!ret)
695 ret = pm.out - buf; 685 ret = (char *)pm.out - buf;
696 } 686 }
697 687
698out_pages: 688out_pages: