diff options
author | Michel Lespinasse <walken@google.com> | 2013-02-22 19:35:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:22 -0500 |
commit | 28a35716d317980ae9bc2ff2f84c33a3cda9e884 (patch) | |
tree | a69307192d34334c6869cfe33b6fea4e358de718 /mm | |
parent | e0fb58152955142f48ed31c8c0541b53e094da6b (diff) |
mm: use long type for page counts in mm_populate() and get_user_pages()
Use long type for page counts in mm_populate() so as to avoid integer
overflow when running the following test code:
int main(void) {
void *p = mmap(NULL, 0x100000000000, PROT_READ,
MAP_PRIVATE | MAP_ANON, -1, 0);
printf("p: %p\n", p);
mlockall(MCL_CURRENT);
printf("done\n");
return 0;
}
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 12 | ||||
-rw-r--r-- | mm/memory.c | 18 | ||||
-rw-r--r-- | mm/mlock.c | 4 | ||||
-rw-r--r-- | mm/nommu.c | 15 |
4 files changed, 25 insertions, 24 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e14a8c79a1eb..cdb64e4d238a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2920,14 +2920,14 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, | |||
2920 | return NULL; | 2920 | return NULL; |
2921 | } | 2921 | } |
2922 | 2922 | ||
2923 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2923 | long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
2924 | struct page **pages, struct vm_area_struct **vmas, | 2924 | struct page **pages, struct vm_area_struct **vmas, |
2925 | unsigned long *position, int *length, int i, | 2925 | unsigned long *position, unsigned long *nr_pages, |
2926 | unsigned int flags) | 2926 | long i, unsigned int flags) |
2927 | { | 2927 | { |
2928 | unsigned long pfn_offset; | 2928 | unsigned long pfn_offset; |
2929 | unsigned long vaddr = *position; | 2929 | unsigned long vaddr = *position; |
2930 | int remainder = *length; | 2930 | unsigned long remainder = *nr_pages; |
2931 | struct hstate *h = hstate_vma(vma); | 2931 | struct hstate *h = hstate_vma(vma); |
2932 | 2932 | ||
2933 | spin_lock(&mm->page_table_lock); | 2933 | spin_lock(&mm->page_table_lock); |
@@ -2997,7 +2997,7 @@ same_page: | |||
2997 | } | 2997 | } |
2998 | } | 2998 | } |
2999 | spin_unlock(&mm->page_table_lock); | 2999 | spin_unlock(&mm->page_table_lock); |
3000 | *length = remainder; | 3000 | *nr_pages = remainder; |
3001 | *position = vaddr; | 3001 | *position = vaddr; |
3002 | 3002 | ||
3003 | return i ? i : -EFAULT; | 3003 | return i ? i : -EFAULT; |
diff --git a/mm/memory.c b/mm/memory.c index 7bd22a621817..bc929dbad215 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1677,15 +1677,15 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add | |||
1677 | * instead of __get_user_pages. __get_user_pages should be used only if | 1677 | * instead of __get_user_pages. __get_user_pages should be used only if |
1678 | * you need some special @gup_flags. | 1678 | * you need some special @gup_flags. |
1679 | */ | 1679 | */ |
1680 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1680 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
1681 | unsigned long start, int nr_pages, unsigned int gup_flags, | 1681 | unsigned long start, unsigned long nr_pages, |
1682 | struct page **pages, struct vm_area_struct **vmas, | 1682 | unsigned int gup_flags, struct page **pages, |
1683 | int *nonblocking) | 1683 | struct vm_area_struct **vmas, int *nonblocking) |
1684 | { | 1684 | { |
1685 | int i; | 1685 | long i; |
1686 | unsigned long vm_flags; | 1686 | unsigned long vm_flags; |
1687 | 1687 | ||
1688 | if (nr_pages <= 0) | 1688 | if (!nr_pages) |
1689 | return 0; | 1689 | return 0; |
1690 | 1690 | ||
1691 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | 1691 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); |
@@ -1981,9 +1981,9 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | |||
1981 | * | 1981 | * |
1982 | * See also get_user_pages_fast, for performance critical applications. | 1982 | * See also get_user_pages_fast, for performance critical applications. |
1983 | */ | 1983 | */ |
1984 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1984 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
1985 | unsigned long start, int nr_pages, int write, int force, | 1985 | unsigned long start, unsigned long nr_pages, int write, |
1986 | struct page **pages, struct vm_area_struct **vmas) | 1986 | int force, struct page **pages, struct vm_area_struct **vmas) |
1987 | { | 1987 | { |
1988 | int flags = FOLL_TOUCH; | 1988 | int flags = FOLL_TOUCH; |
1989 | 1989 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index 38db3b094105..e6638f565d42 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -160,7 +160,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
160 | { | 160 | { |
161 | struct mm_struct *mm = vma->vm_mm; | 161 | struct mm_struct *mm = vma->vm_mm; |
162 | unsigned long addr = start; | 162 | unsigned long addr = start; |
163 | int nr_pages = (end - start) / PAGE_SIZE; | 163 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
164 | int gup_flags; | 164 | int gup_flags; |
165 | 165 | ||
166 | VM_BUG_ON(start & ~PAGE_MASK); | 166 | VM_BUG_ON(start & ~PAGE_MASK); |
@@ -382,7 +382,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |||
382 | unsigned long end, nstart, nend; | 382 | unsigned long end, nstart, nend; |
383 | struct vm_area_struct *vma = NULL; | 383 | struct vm_area_struct *vma = NULL; |
384 | int locked = 0; | 384 | int locked = 0; |
385 | int ret = 0; | 385 | long ret = 0; |
386 | 386 | ||
387 | VM_BUG_ON(start & ~PAGE_MASK); | 387 | VM_BUG_ON(start & ~PAGE_MASK); |
388 | VM_BUG_ON(len != PAGE_ALIGN(len)); | 388 | VM_BUG_ON(len != PAGE_ALIGN(len)); |
diff --git a/mm/nommu.c b/mm/nommu.c index 87854a55829d..6ab706608492 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -140,10 +140,10 @@ unsigned int kobjsize(const void *objp) | |||
140 | return PAGE_SIZE << compound_order(page); | 140 | return PAGE_SIZE << compound_order(page); |
141 | } | 141 | } |
142 | 142 | ||
143 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 143 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
144 | unsigned long start, int nr_pages, unsigned int foll_flags, | 144 | unsigned long start, unsigned long nr_pages, |
145 | struct page **pages, struct vm_area_struct **vmas, | 145 | unsigned int foll_flags, struct page **pages, |
146 | int *retry) | 146 | struct vm_area_struct **vmas, int *nonblocking) |
147 | { | 147 | { |
148 | struct vm_area_struct *vma; | 148 | struct vm_area_struct *vma; |
149 | unsigned long vm_flags; | 149 | unsigned long vm_flags; |
@@ -190,9 +190,10 @@ finish_or_fault: | |||
190 | * slab page or a secondary page from a compound page | 190 | * slab page or a secondary page from a compound page |
191 | * - don't permit access to VMAs that don't support it, such as I/O mappings | 191 | * - don't permit access to VMAs that don't support it, such as I/O mappings |
192 | */ | 192 | */ |
193 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 193 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
194 | unsigned long start, int nr_pages, int write, int force, | 194 | unsigned long start, unsigned long nr_pages, |
195 | struct page **pages, struct vm_area_struct **vmas) | 195 | int write, int force, struct page **pages, |
196 | struct vm_area_struct **vmas) | ||
196 | { | 197 | { |
197 | int flags = 0; | 198 | int flags = 0; |
198 | 199 | ||