diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2010-11-24 15:57:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-11-24 16:50:46 -0500 |
commit | ea251c1d5c481cda1cf6b0c9e4965f04a6cf2ffc (patch) | |
tree | 989753f439461fb333432761b8bf39d408572c62 /fs/proc | |
parent | 5f0af70a25593a9d53b87bc8d31902fb7cc63e40 (diff) |
pagemap: set pagemap walk limit to PMD boundary
Currently one pagemap_read() call walks in PAGEMAP_WALK_SIZE bytes (== 512
pages.) But there is a corner case where walk_pmd_range() accidentally
runs over a VMA associated with a hugetlbfs file.
For example, when a process has mappings to VMAs as shown below:
# cat /proc/<pid>/maps
...
3a58f6d000-3a58f72000 rw-p 00000000 00:00 0
7fbd51853000-7fbd51855000 rw-p 00000000 00:00 0
7fbd5186c000-7fbd5186e000 rw-p 00000000 00:00 0
7fbd51a00000-7fbd51c00000 rw-s 00000000 00:12 8614 /hugepages/test
then pagemap_read() goes into walk_pmd_range() path and walks in the range
0x7fbd51853000-0x7fbd51a53000, but the hugetlbfs VMA should be handled by
walk_hugetlb_range(). Otherwise PMD for the hugepage is considered bad
and cleared, which causes undesirable results.
This patch fixes it by separating pagemap walk range into one PMD.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index da6b01d70f01..c126c83b9a45 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -706,6 +706,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
706 | * skip over unmapped regions. | 706 | * skip over unmapped regions. |
707 | */ | 707 | */ |
708 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 708 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
709 | #define PAGEMAP_WALK_MASK (PMD_MASK) | ||
709 | static ssize_t pagemap_read(struct file *file, char __user *buf, | 710 | static ssize_t pagemap_read(struct file *file, char __user *buf, |
710 | size_t count, loff_t *ppos) | 711 | size_t count, loff_t *ppos) |
711 | { | 712 | { |
@@ -776,7 +777,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
776 | unsigned long end; | 777 | unsigned long end; |
777 | 778 | ||
778 | pm.pos = 0; | 779 | pm.pos = 0; |
779 | end = start_vaddr + PAGEMAP_WALK_SIZE; | 780 | end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; |
780 | /* overflow ? */ | 781 | /* overflow ? */ |
781 | if (end < start_vaddr || end > end_vaddr) | 782 | if (end < start_vaddr || end > end_vaddr) |
782 | end = end_vaddr; | 783 | end = end_vaddr; |