diff options
Diffstat (limited to 'mm/nommu.c')
| -rw-r--r-- | mm/nommu.c | 33 |
1 files changed, 26 insertions, 7 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 2fd2ad5da98e..53cab10fece4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp) | |||
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 175 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 176 | unsigned long start, int len, int flags, | 176 | unsigned long start, int nr_pages, int flags, |
| 177 | struct page **pages, struct vm_area_struct **vmas) | 177 | struct page **pages, struct vm_area_struct **vmas) |
| 178 | { | 178 | { |
| 179 | struct vm_area_struct *vma; | 179 | struct vm_area_struct *vma; |
| 180 | unsigned long vm_flags; | 180 | unsigned long vm_flags; |
| @@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 189 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 189 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
| 190 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 190 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
| 191 | 191 | ||
| 192 | for (i = 0; i < len; i++) { | 192 | for (i = 0; i < nr_pages; i++) { |
| 193 | vma = find_vma(mm, start); | 193 | vma = find_vma(mm, start); |
| 194 | if (!vma) | 194 | if (!vma) |
| 195 | goto finish_or_fault; | 195 | goto finish_or_fault; |
| @@ -224,7 +224,7 @@ finish_or_fault: | |||
| 224 | * - don't permit access to VMAs that don't support it, such as I/O mappings | 224 | * - don't permit access to VMAs that don't support it, such as I/O mappings |
| 225 | */ | 225 | */ |
| 226 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 226 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 227 | unsigned long start, int len, int write, int force, | 227 | unsigned long start, int nr_pages, int write, int force, |
| 228 | struct page **pages, struct vm_area_struct **vmas) | 228 | struct page **pages, struct vm_area_struct **vmas) |
| 229 | { | 229 | { |
| 230 | int flags = 0; | 230 | int flags = 0; |
| @@ -234,12 +234,31 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 234 | if (force) | 234 | if (force) |
| 235 | flags |= GUP_FLAGS_FORCE; | 235 | flags |= GUP_FLAGS_FORCE; |
| 236 | 236 | ||
| 237 | return __get_user_pages(tsk, mm, | 237 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); |
| 238 | start, len, flags, | ||
| 239 | pages, vmas); | ||
| 240 | } | 238 | } |
| 241 | EXPORT_SYMBOL(get_user_pages); | 239 | EXPORT_SYMBOL(get_user_pages); |
| 242 | 240 | ||
| 241 | /** | ||
| 242 | * follow_pfn - look up PFN at a user virtual address | ||
| 243 | * @vma: memory mapping | ||
| 244 | * @address: user virtual address | ||
| 245 | * @pfn: location to store found PFN | ||
| 246 | * | ||
| 247 | * Only IO mappings and raw PFN mappings are allowed. | ||
| 248 | * | ||
| 249 | * Returns zero and the pfn at @pfn on success, -ve otherwise. | ||
| 250 | */ | ||
| 251 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | ||
| 252 | unsigned long *pfn) | ||
| 253 | { | ||
| 254 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | ||
| 255 | return -EINVAL; | ||
| 256 | |||
| 257 | *pfn = address >> PAGE_SHIFT; | ||
| 258 | return 0; | ||
| 259 | } | ||
| 260 | EXPORT_SYMBOL(follow_pfn); | ||
| 261 | |||
| 243 | DEFINE_RWLOCK(vmlist_lock); | 262 | DEFINE_RWLOCK(vmlist_lock); |
| 244 | struct vm_struct *vmlist; | 263 | struct vm_struct *vmlist; |
| 245 | 264 | ||
