aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/mm.h15
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/memory.c18
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/nommu.c15
6 files changed, 36 insertions, 34 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0c80d3f57a5b..eedc334fb6f5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -43,9 +43,9 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
43#endif 43#endif
44 44
45int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 45int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
46int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 46long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
47 struct page **, struct vm_area_struct **, 47 struct page **, struct vm_area_struct **,
48 unsigned long *, int *, int, unsigned int flags); 48 unsigned long *, unsigned long *, long, unsigned int);
49void unmap_hugepage_range(struct vm_area_struct *, 49void unmap_hugepage_range(struct vm_area_struct *,
50 unsigned long, unsigned long, struct page *); 50 unsigned long, unsigned long, struct page *);
51void __unmap_hugepage_range_final(struct mmu_gather *tlb, 51void __unmap_hugepage_range_final(struct mmu_gather *tlb,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97da0302cf51..87b0ef253607 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1013,13 +1013,14 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
1013extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1013extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1014 void *buf, int len, int write); 1014 void *buf, int len, int write);
1015 1015
1016int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1016long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1017 unsigned long start, int len, unsigned int foll_flags, 1017 unsigned long start, unsigned long nr_pages,
1018 struct page **pages, struct vm_area_struct **vmas, 1018 unsigned int foll_flags, struct page **pages,
1019 int *nonblocking); 1019 struct vm_area_struct **vmas, int *nonblocking);
1020int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1020long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1021 unsigned long start, int nr_pages, int write, int force, 1021 unsigned long start, unsigned long nr_pages,
1022 struct page **pages, struct vm_area_struct **vmas); 1022 int write, int force, struct page **pages,
1023 struct vm_area_struct **vmas);
1023int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1024int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1024 struct page **pages); 1025 struct page **pages);
1025struct kvec; 1026struct kvec;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e14a8c79a1eb..cdb64e4d238a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2920,14 +2920,14 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
2920 return NULL; 2920 return NULL;
2921} 2921}
2922 2922
2923int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 2923long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2924 struct page **pages, struct vm_area_struct **vmas, 2924 struct page **pages, struct vm_area_struct **vmas,
2925 unsigned long *position, int *length, int i, 2925 unsigned long *position, unsigned long *nr_pages,
2926 unsigned int flags) 2926 long i, unsigned int flags)
2927{ 2927{
2928 unsigned long pfn_offset; 2928 unsigned long pfn_offset;
2929 unsigned long vaddr = *position; 2929 unsigned long vaddr = *position;
2930 int remainder = *length; 2930 unsigned long remainder = *nr_pages;
2931 struct hstate *h = hstate_vma(vma); 2931 struct hstate *h = hstate_vma(vma);
2932 2932
2933 spin_lock(&mm->page_table_lock); 2933 spin_lock(&mm->page_table_lock);
@@ -2997,7 +2997,7 @@ same_page:
2997 } 2997 }
2998 } 2998 }
2999 spin_unlock(&mm->page_table_lock); 2999 spin_unlock(&mm->page_table_lock);
3000 *length = remainder; 3000 *nr_pages = remainder;
3001 *position = vaddr; 3001 *position = vaddr;
3002 3002
3003 return i ? i : -EFAULT; 3003 return i ? i : -EFAULT;
diff --git a/mm/memory.c b/mm/memory.c
index 7bd22a621817..bc929dbad215 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1677,15 +1677,15 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
1677 * instead of __get_user_pages. __get_user_pages should be used only if 1677 * instead of __get_user_pages. __get_user_pages should be used only if
1678 * you need some special @gup_flags. 1678 * you need some special @gup_flags.
1679 */ 1679 */
1680int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1680long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1681 unsigned long start, int nr_pages, unsigned int gup_flags, 1681 unsigned long start, unsigned long nr_pages,
1682 struct page **pages, struct vm_area_struct **vmas, 1682 unsigned int gup_flags, struct page **pages,
1683 int *nonblocking) 1683 struct vm_area_struct **vmas, int *nonblocking)
1684{ 1684{
1685 int i; 1685 long i;
1686 unsigned long vm_flags; 1686 unsigned long vm_flags;
1687 1687
1688 if (nr_pages <= 0) 1688 if (!nr_pages)
1689 return 0; 1689 return 0;
1690 1690
1691 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 1691 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
@@ -1981,9 +1981,9 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1981 * 1981 *
1982 * See also get_user_pages_fast, for performance critical applications. 1982 * See also get_user_pages_fast, for performance critical applications.
1983 */ 1983 */
1984int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1984long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1985 unsigned long start, int nr_pages, int write, int force, 1985 unsigned long start, unsigned long nr_pages, int write,
1986 struct page **pages, struct vm_area_struct **vmas) 1986 int force, struct page **pages, struct vm_area_struct **vmas)
1987{ 1987{
1988 int flags = FOLL_TOUCH; 1988 int flags = FOLL_TOUCH;
1989 1989
diff --git a/mm/mlock.c b/mm/mlock.c
index 38db3b094105..e6638f565d42 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -160,7 +160,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
160{ 160{
161 struct mm_struct *mm = vma->vm_mm; 161 struct mm_struct *mm = vma->vm_mm;
162 unsigned long addr = start; 162 unsigned long addr = start;
163 int nr_pages = (end - start) / PAGE_SIZE; 163 unsigned long nr_pages = (end - start) / PAGE_SIZE;
164 int gup_flags; 164 int gup_flags;
165 165
166 VM_BUG_ON(start & ~PAGE_MASK); 166 VM_BUG_ON(start & ~PAGE_MASK);
@@ -382,7 +382,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
382 unsigned long end, nstart, nend; 382 unsigned long end, nstart, nend;
383 struct vm_area_struct *vma = NULL; 383 struct vm_area_struct *vma = NULL;
384 int locked = 0; 384 int locked = 0;
385 int ret = 0; 385 long ret = 0;
386 386
387 VM_BUG_ON(start & ~PAGE_MASK); 387 VM_BUG_ON(start & ~PAGE_MASK);
388 VM_BUG_ON(len != PAGE_ALIGN(len)); 388 VM_BUG_ON(len != PAGE_ALIGN(len));
diff --git a/mm/nommu.c b/mm/nommu.c
index 87854a55829d..6ab706608492 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -140,10 +140,10 @@ unsigned int kobjsize(const void *objp)
140 return PAGE_SIZE << compound_order(page); 140 return PAGE_SIZE << compound_order(page);
141} 141}
142 142
143int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 143long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
144 unsigned long start, int nr_pages, unsigned int foll_flags, 144 unsigned long start, unsigned long nr_pages,
145 struct page **pages, struct vm_area_struct **vmas, 145 unsigned int foll_flags, struct page **pages,
146 int *retry) 146 struct vm_area_struct **vmas, int *nonblocking)
147{ 147{
148 struct vm_area_struct *vma; 148 struct vm_area_struct *vma;
149 unsigned long vm_flags; 149 unsigned long vm_flags;
@@ -190,9 +190,10 @@ finish_or_fault:
190 * slab page or a secondary page from a compound page 190 * slab page or a secondary page from a compound page
191 * - don't permit access to VMAs that don't support it, such as I/O mappings 191 * - don't permit access to VMAs that don't support it, such as I/O mappings
192 */ 192 */
193int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 193long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
194 unsigned long start, int nr_pages, int write, int force, 194 unsigned long start, unsigned long nr_pages,
195 struct page **pages, struct vm_area_struct **vmas) 195 int write, int force, struct page **pages,
196 struct vm_area_struct **vmas)
196{ 197{
197 int flags = 0; 198 int flags = 0;
198 199