diff options
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/memory.c | 26 | ||||
-rw-r--r-- | mm/nommu.c | 12 |
3 files changed, 18 insertions, 22 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index d006e93d5c93..ba3a7cb1eaa0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -826,7 +826,7 @@ extern int make_pages_present(unsigned long addr, unsigned long end); | |||
826 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 826 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); |
827 | 827 | ||
828 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 828 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
829 | unsigned long start, int len, int write, int force, | 829 | unsigned long start, int nr_pages, int write, int force, |
830 | struct page **pages, struct vm_area_struct **vmas); | 830 | struct page **pages, struct vm_area_struct **vmas); |
831 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 831 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
832 | struct page **pages); | 832 | struct page **pages); |
diff --git a/mm/memory.c b/mm/memory.c index f46ac18ba231..65216194eb8d 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma) | |||
1207 | 1207 | ||
1208 | 1208 | ||
1209 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1209 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
1210 | unsigned long start, int len, int flags, | 1210 | unsigned long start, int nr_pages, int flags, |
1211 | struct page **pages, struct vm_area_struct **vmas) | 1211 | struct page **pages, struct vm_area_struct **vmas) |
1212 | { | 1212 | { |
1213 | int i; | 1213 | int i; |
1214 | unsigned int vm_flags = 0; | 1214 | unsigned int vm_flags = 0; |
@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1217 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); | 1217 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); |
1218 | int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); | 1218 | int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); |
1219 | 1219 | ||
1220 | if (len <= 0) | 1220 | if (nr_pages <= 0) |
1221 | return 0; | 1221 | return 0; |
1222 | /* | 1222 | /* |
1223 | * Require read or write permissions. | 1223 | * Require read or write permissions. |
@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1269 | vmas[i] = gate_vma; | 1269 | vmas[i] = gate_vma; |
1270 | i++; | 1270 | i++; |
1271 | start += PAGE_SIZE; | 1271 | start += PAGE_SIZE; |
1272 | len--; | 1272 | nr_pages--; |
1273 | continue; | 1273 | continue; |
1274 | } | 1274 | } |
1275 | 1275 | ||
@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1280 | 1280 | ||
1281 | if (is_vm_hugetlb_page(vma)) { | 1281 | if (is_vm_hugetlb_page(vma)) { |
1282 | i = follow_hugetlb_page(mm, vma, pages, vmas, | 1282 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
1283 | &start, &len, i, write); | 1283 | &start, &nr_pages, i, write); |
1284 | continue; | 1284 | continue; |
1285 | } | 1285 | } |
1286 | 1286 | ||
@@ -1357,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1357 | vmas[i] = vma; | 1357 | vmas[i] = vma; |
1358 | i++; | 1358 | i++; |
1359 | start += PAGE_SIZE; | 1359 | start += PAGE_SIZE; |
1360 | len--; | 1360 | nr_pages--; |
1361 | } while (len && start < vma->vm_end); | 1361 | } while (nr_pages && start < vma->vm_end); |
1362 | } while (len); | 1362 | } while (nr_pages); |
1363 | return i; | 1363 | return i; |
1364 | } | 1364 | } |
1365 | 1365 | ||
@@ -1368,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1368 | * @tsk: task_struct of target task | 1368 | * @tsk: task_struct of target task |
1369 | * @mm: mm_struct of target mm | 1369 | * @mm: mm_struct of target mm |
1370 | * @start: starting user address | 1370 | * @start: starting user address |
1371 | * @len: number of pages from start to pin | 1371 | * @nr_pages: number of pages from start to pin |
1372 | * @write: whether pages will be written to by the caller | 1372 | * @write: whether pages will be written to by the caller |
1373 | * @force: whether to force write access even if user mapping is | 1373 | * @force: whether to force write access even if user mapping is |
1374 | * readonly. This will result in the page being COWed even | 1374 | * readonly. This will result in the page being COWed even |
@@ -1380,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1380 | * Or NULL if the caller does not require them. | 1380 | * Or NULL if the caller does not require them. |
1381 | * | 1381 | * |
1382 | * Returns number of pages pinned. This may be fewer than the number | 1382 | * Returns number of pages pinned. This may be fewer than the number |
1383 | * requested. If len is 0 or negative, returns 0. If no pages | 1383 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
1384 | * were pinned, returns -errno. Each page returned must be released | 1384 | * were pinned, returns -errno. Each page returned must be released |
1385 | * with a put_page() call when it is finished with. vmas will only | 1385 | * with a put_page() call when it is finished with. vmas will only |
1386 | * remain valid while mmap_sem is held. | 1386 | * remain valid while mmap_sem is held. |
@@ -1414,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1414 | * See also get_user_pages_fast, for performance critical applications. | 1414 | * See also get_user_pages_fast, for performance critical applications. |
1415 | */ | 1415 | */ |
1416 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1416 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
1417 | unsigned long start, int len, int write, int force, | 1417 | unsigned long start, int nr_pages, int write, int force, |
1418 | struct page **pages, struct vm_area_struct **vmas) | 1418 | struct page **pages, struct vm_area_struct **vmas) |
1419 | { | 1419 | { |
1420 | int flags = 0; | 1420 | int flags = 0; |
@@ -1424,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1424 | if (force) | 1424 | if (force) |
1425 | flags |= GUP_FLAGS_FORCE; | 1425 | flags |= GUP_FLAGS_FORCE; |
1426 | 1426 | ||
1427 | return __get_user_pages(tsk, mm, | 1427 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); |
1428 | start, len, flags, | ||
1429 | pages, vmas); | ||
1430 | } | 1428 | } |
1431 | 1429 | ||
1432 | EXPORT_SYMBOL(get_user_pages); | 1430 | EXPORT_SYMBOL(get_user_pages); |
diff --git a/mm/nommu.c b/mm/nommu.c index 2fd2ad5da98e..bf0cc762a7d2 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 175 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
176 | unsigned long start, int len, int flags, | 176 | unsigned long start, int nr_pages, int flags, |
177 | struct page **pages, struct vm_area_struct **vmas) | 177 | struct page **pages, struct vm_area_struct **vmas) |
178 | { | 178 | { |
179 | struct vm_area_struct *vma; | 179 | struct vm_area_struct *vma; |
180 | unsigned long vm_flags; | 180 | unsigned long vm_flags; |
@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
189 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 189 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
190 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 190 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
191 | 191 | ||
192 | for (i = 0; i < len; i++) { | 192 | for (i = 0; i < nr_pages; i++) { |
193 | vma = find_vma(mm, start); | 193 | vma = find_vma(mm, start); |
194 | if (!vma) | 194 | if (!vma) |
195 | goto finish_or_fault; | 195 | goto finish_or_fault; |
@@ -224,7 +224,7 @@ finish_or_fault: | |||
224 | * - don't permit access to VMAs that don't support it, such as I/O mappings | 224 | * - don't permit access to VMAs that don't support it, such as I/O mappings |
225 | */ | 225 | */ |
226 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 226 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
227 | unsigned long start, int len, int write, int force, | 227 | unsigned long start, int nr_pages, int write, int force, |
228 | struct page **pages, struct vm_area_struct **vmas) | 228 | struct page **pages, struct vm_area_struct **vmas) |
229 | { | 229 | { |
230 | int flags = 0; | 230 | int flags = 0; |
@@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
234 | if (force) | 234 | if (force) |
235 | flags |= GUP_FLAGS_FORCE; | 235 | flags |= GUP_FLAGS_FORCE; |
236 | 236 | ||
237 | return __get_user_pages(tsk, mm, | 237 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); |
238 | start, len, flags, | ||
239 | pages, vmas); | ||
240 | } | 238 | } |
241 | EXPORT_SYMBOL(get_user_pages); | 239 | EXPORT_SYMBOL(get_user_pages); |
242 | 240 | ||