diff options
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 662 |
1 files changed, 662 insertions, 0 deletions
diff --git a/mm/gup.c b/mm/gup.c new file mode 100644 index 000000000000..cc5a9e7adea7 --- /dev/null +++ b/mm/gup.c | |||
@@ -0,0 +1,662 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <linux/err.h> | ||
4 | #include <linux/spinlock.h> | ||
5 | |||
6 | #include <linux/hugetlb.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/pagemap.h> | ||
9 | #include <linux/rmap.h> | ||
10 | #include <linux/swap.h> | ||
11 | #include <linux/swapops.h> | ||
12 | |||
13 | #include "internal.h" | ||
14 | |||
15 | static struct page *no_page_table(struct vm_area_struct *vma, | ||
16 | unsigned int flags) | ||
17 | { | ||
18 | /* | ||
19 | * When core dumping an enormous anonymous area that nobody | ||
20 | * has touched so far, we don't want to allocate unnecessary pages or | ||
21 | * page tables. Return error instead of NULL to skip handle_mm_fault, | ||
22 | * then get_dump_page() will return NULL to leave a hole in the dump. | ||
23 | * But we can only make this optimization where a hole would surely | ||
24 | * be zero-filled if handle_mm_fault() actually did handle it. | ||
25 | */ | ||
26 | if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) | ||
27 | return ERR_PTR(-EFAULT); | ||
28 | return NULL; | ||
29 | } | ||
30 | |||
31 | static struct page *follow_page_pte(struct vm_area_struct *vma, | ||
32 | unsigned long address, pmd_t *pmd, unsigned int flags) | ||
33 | { | ||
34 | struct mm_struct *mm = vma->vm_mm; | ||
35 | struct page *page; | ||
36 | spinlock_t *ptl; | ||
37 | pte_t *ptep, pte; | ||
38 | |||
39 | retry: | ||
40 | if (unlikely(pmd_bad(*pmd))) | ||
41 | return no_page_table(vma, flags); | ||
42 | |||
43 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
44 | pte = *ptep; | ||
45 | if (!pte_present(pte)) { | ||
46 | swp_entry_t entry; | ||
47 | /* | ||
48 | * KSM's break_ksm() relies upon recognizing a ksm page | ||
49 | * even while it is being migrated, so for that case we | ||
50 | * need migration_entry_wait(). | ||
51 | */ | ||
52 | if (likely(!(flags & FOLL_MIGRATION))) | ||
53 | goto no_page; | ||
54 | if (pte_none(pte) || pte_file(pte)) | ||
55 | goto no_page; | ||
56 | entry = pte_to_swp_entry(pte); | ||
57 | if (!is_migration_entry(entry)) | ||
58 | goto no_page; | ||
59 | pte_unmap_unlock(ptep, ptl); | ||
60 | migration_entry_wait(mm, pmd, address); | ||
61 | goto retry; | ||
62 | } | ||
63 | if ((flags & FOLL_NUMA) && pte_numa(pte)) | ||
64 | goto no_page; | ||
65 | if ((flags & FOLL_WRITE) && !pte_write(pte)) { | ||
66 | pte_unmap_unlock(ptep, ptl); | ||
67 | return NULL; | ||
68 | } | ||
69 | |||
70 | page = vm_normal_page(vma, address, pte); | ||
71 | if (unlikely(!page)) { | ||
72 | if ((flags & FOLL_DUMP) || | ||
73 | !is_zero_pfn(pte_pfn(pte))) | ||
74 | goto bad_page; | ||
75 | page = pte_page(pte); | ||
76 | } | ||
77 | |||
78 | if (flags & FOLL_GET) | ||
79 | get_page_foll(page); | ||
80 | if (flags & FOLL_TOUCH) { | ||
81 | if ((flags & FOLL_WRITE) && | ||
82 | !pte_dirty(pte) && !PageDirty(page)) | ||
83 | set_page_dirty(page); | ||
84 | /* | ||
85 | * pte_mkyoung() would be more correct here, but atomic care | ||
86 | * is needed to avoid losing the dirty bit: it is easier to use | ||
87 | * mark_page_accessed(). | ||
88 | */ | ||
89 | mark_page_accessed(page); | ||
90 | } | ||
91 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { | ||
92 | /* | ||
93 | * The preliminary mapping check is mainly to avoid the | ||
94 | * pointless overhead of lock_page on the ZERO_PAGE | ||
95 | * which might bounce very badly if there is contention. | ||
96 | * | ||
97 | * If the page is already locked, we don't need to | ||
98 | * handle it now - vmscan will handle it later if and | ||
99 | * when it attempts to reclaim the page. | ||
100 | */ | ||
101 | if (page->mapping && trylock_page(page)) { | ||
102 | lru_add_drain(); /* push cached pages to LRU */ | ||
103 | /* | ||
104 | * Because we lock page here, and migration is | ||
105 | * blocked by the pte's page reference, and we | ||
106 | * know the page is still mapped, we don't even | ||
107 | * need to check for file-cache page truncation. | ||
108 | */ | ||
109 | mlock_vma_page(page); | ||
110 | unlock_page(page); | ||
111 | } | ||
112 | } | ||
113 | pte_unmap_unlock(ptep, ptl); | ||
114 | return page; | ||
115 | bad_page: | ||
116 | pte_unmap_unlock(ptep, ptl); | ||
117 | return ERR_PTR(-EFAULT); | ||
118 | |||
119 | no_page: | ||
120 | pte_unmap_unlock(ptep, ptl); | ||
121 | if (!pte_none(pte)) | ||
122 | return NULL; | ||
123 | return no_page_table(vma, flags); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * follow_page_mask - look up a page descriptor from a user-virtual address | ||
128 | * @vma: vm_area_struct mapping @address | ||
129 | * @address: virtual address to look up | ||
130 | * @flags: flags modifying lookup behaviour | ||
131 | * @page_mask: on output, *page_mask is set according to the size of the page | ||
132 | * | ||
133 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | ||
134 | * | ||
135 | * Returns the mapped (struct page *), %NULL if no mapping exists, or | ||
136 | * an error pointer if there is a mapping to something not represented | ||
137 | * by a page descriptor (see also vm_normal_page()). | ||
138 | */ | ||
139 | struct page *follow_page_mask(struct vm_area_struct *vma, | ||
140 | unsigned long address, unsigned int flags, | ||
141 | unsigned int *page_mask) | ||
142 | { | ||
143 | pgd_t *pgd; | ||
144 | pud_t *pud; | ||
145 | pmd_t *pmd; | ||
146 | spinlock_t *ptl; | ||
147 | struct page *page; | ||
148 | struct mm_struct *mm = vma->vm_mm; | ||
149 | |||
150 | *page_mask = 0; | ||
151 | |||
152 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | ||
153 | if (!IS_ERR(page)) { | ||
154 | BUG_ON(flags & FOLL_GET); | ||
155 | return page; | ||
156 | } | ||
157 | |||
158 | pgd = pgd_offset(mm, address); | ||
159 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
160 | return no_page_table(vma, flags); | ||
161 | |||
162 | pud = pud_offset(pgd, address); | ||
163 | if (pud_none(*pud)) | ||
164 | return no_page_table(vma, flags); | ||
165 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { | ||
166 | if (flags & FOLL_GET) | ||
167 | return NULL; | ||
168 | page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); | ||
169 | return page; | ||
170 | } | ||
171 | if (unlikely(pud_bad(*pud))) | ||
172 | return no_page_table(vma, flags); | ||
173 | |||
174 | pmd = pmd_offset(pud, address); | ||
175 | if (pmd_none(*pmd)) | ||
176 | return no_page_table(vma, flags); | ||
177 | if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { | ||
178 | page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); | ||
179 | if (flags & FOLL_GET) { | ||
180 | /* | ||
181 | * Refcount on tail pages are not well-defined and | ||
182 | * shouldn't be taken. The caller should handle a NULL | ||
183 | * return when trying to follow tail pages. | ||
184 | */ | ||
185 | if (PageHead(page)) | ||
186 | get_page(page); | ||
187 | else | ||
188 | page = NULL; | ||
189 | } | ||
190 | return page; | ||
191 | } | ||
192 | if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) | ||
193 | return no_page_table(vma, flags); | ||
194 | if (pmd_trans_huge(*pmd)) { | ||
195 | if (flags & FOLL_SPLIT) { | ||
196 | split_huge_page_pmd(vma, address, pmd); | ||
197 | return follow_page_pte(vma, address, pmd, flags); | ||
198 | } | ||
199 | ptl = pmd_lock(mm, pmd); | ||
200 | if (likely(pmd_trans_huge(*pmd))) { | ||
201 | if (unlikely(pmd_trans_splitting(*pmd))) { | ||
202 | spin_unlock(ptl); | ||
203 | wait_split_huge_page(vma->anon_vma, pmd); | ||
204 | } else { | ||
205 | page = follow_trans_huge_pmd(vma, address, | ||
206 | pmd, flags); | ||
207 | spin_unlock(ptl); | ||
208 | *page_mask = HPAGE_PMD_NR - 1; | ||
209 | return page; | ||
210 | } | ||
211 | } else | ||
212 | spin_unlock(ptl); | ||
213 | } | ||
214 | return follow_page_pte(vma, address, pmd, flags); | ||
215 | } | ||
216 | |||
217 | static int get_gate_page(struct mm_struct *mm, unsigned long address, | ||
218 | unsigned int gup_flags, struct vm_area_struct **vma, | ||
219 | struct page **page) | ||
220 | { | ||
221 | pgd_t *pgd; | ||
222 | pud_t *pud; | ||
223 | pmd_t *pmd; | ||
224 | pte_t *pte; | ||
225 | int ret = -EFAULT; | ||
226 | |||
227 | /* user gate pages are read-only */ | ||
228 | if (gup_flags & FOLL_WRITE) | ||
229 | return -EFAULT; | ||
230 | if (address > TASK_SIZE) | ||
231 | pgd = pgd_offset_k(address); | ||
232 | else | ||
233 | pgd = pgd_offset_gate(mm, address); | ||
234 | BUG_ON(pgd_none(*pgd)); | ||
235 | pud = pud_offset(pgd, address); | ||
236 | BUG_ON(pud_none(*pud)); | ||
237 | pmd = pmd_offset(pud, address); | ||
238 | if (pmd_none(*pmd)) | ||
239 | return -EFAULT; | ||
240 | VM_BUG_ON(pmd_trans_huge(*pmd)); | ||
241 | pte = pte_offset_map(pmd, address); | ||
242 | if (pte_none(*pte)) | ||
243 | goto unmap; | ||
244 | *vma = get_gate_vma(mm); | ||
245 | if (!page) | ||
246 | goto out; | ||
247 | *page = vm_normal_page(*vma, address, *pte); | ||
248 | if (!*page) { | ||
249 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) | ||
250 | goto unmap; | ||
251 | *page = pte_page(*pte); | ||
252 | } | ||
253 | get_page(*page); | ||
254 | out: | ||
255 | ret = 0; | ||
256 | unmap: | ||
257 | pte_unmap(pte); | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | ||
262 | unsigned long address, unsigned int *flags, int *nonblocking) | ||
263 | { | ||
264 | struct mm_struct *mm = vma->vm_mm; | ||
265 | unsigned int fault_flags = 0; | ||
266 | int ret; | ||
267 | |||
268 | /* For mlock, just skip the stack guard page. */ | ||
269 | if ((*flags & FOLL_MLOCK) && | ||
270 | (stack_guard_page_start(vma, address) || | ||
271 | stack_guard_page_end(vma, address + PAGE_SIZE))) | ||
272 | return -ENOENT; | ||
273 | if (*flags & FOLL_WRITE) | ||
274 | fault_flags |= FAULT_FLAG_WRITE; | ||
275 | if (nonblocking) | ||
276 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | ||
277 | if (*flags & FOLL_NOWAIT) | ||
278 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; | ||
279 | |||
280 | ret = handle_mm_fault(mm, vma, address, fault_flags); | ||
281 | if (ret & VM_FAULT_ERROR) { | ||
282 | if (ret & VM_FAULT_OOM) | ||
283 | return -ENOMEM; | ||
284 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | ||
285 | return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; | ||
286 | if (ret & VM_FAULT_SIGBUS) | ||
287 | return -EFAULT; | ||
288 | BUG(); | ||
289 | } | ||
290 | |||
291 | if (tsk) { | ||
292 | if (ret & VM_FAULT_MAJOR) | ||
293 | tsk->maj_flt++; | ||
294 | else | ||
295 | tsk->min_flt++; | ||
296 | } | ||
297 | |||
298 | if (ret & VM_FAULT_RETRY) { | ||
299 | if (nonblocking) | ||
300 | *nonblocking = 0; | ||
301 | return -EBUSY; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when | ||
306 | * necessary, even if maybe_mkwrite decided not to set pte_write. We | ||
307 | * can thus safely do subsequent page lookups as if they were reads. | ||
308 | * But only do so when looping for pte_write is futile: in some cases | ||
309 | * userspace may also be wanting to write to the gotten user page, | ||
310 | * which a read fault here might prevent (a readonly page might get | ||
311 | * reCOWed by userspace write). | ||
312 | */ | ||
313 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) | ||
314 | *flags &= ~FOLL_WRITE; | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) | ||
319 | { | ||
320 | vm_flags_t vm_flags = vma->vm_flags; | ||
321 | |||
322 | if (vm_flags & (VM_IO | VM_PFNMAP)) | ||
323 | return -EFAULT; | ||
324 | |||
325 | if (gup_flags & FOLL_WRITE) { | ||
326 | if (!(vm_flags & VM_WRITE)) { | ||
327 | if (!(gup_flags & FOLL_FORCE)) | ||
328 | return -EFAULT; | ||
329 | /* | ||
330 | * We used to let the write,force case do COW in a | ||
331 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could | ||
332 | * set a breakpoint in a read-only mapping of an | ||
333 | * executable, without corrupting the file (yet only | ||
334 | * when that file had been opened for writing!). | ||
335 | * Anon pages in shared mappings are surprising: now | ||
336 | * just reject it. | ||
337 | */ | ||
338 | if (!is_cow_mapping(vm_flags)) { | ||
339 | WARN_ON_ONCE(vm_flags & VM_MAYWRITE); | ||
340 | return -EFAULT; | ||
341 | } | ||
342 | } | ||
343 | } else if (!(vm_flags & VM_READ)) { | ||
344 | if (!(gup_flags & FOLL_FORCE)) | ||
345 | return -EFAULT; | ||
346 | /* | ||
347 | * Is there actually any vma we can reach here which does not | ||
348 | * have VM_MAYREAD set? | ||
349 | */ | ||
350 | if (!(vm_flags & VM_MAYREAD)) | ||
351 | return -EFAULT; | ||
352 | } | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * __get_user_pages() - pin user pages in memory | ||
358 | * @tsk: task_struct of target task | ||
359 | * @mm: mm_struct of target mm | ||
360 | * @start: starting user address | ||
361 | * @nr_pages: number of pages from start to pin | ||
362 | * @gup_flags: flags modifying pin behaviour | ||
363 | * @pages: array that receives pointers to the pages pinned. | ||
364 | * Should be at least nr_pages long. Or NULL, if caller | ||
365 | * only intends to ensure the pages are faulted in. | ||
366 | * @vmas: array of pointers to vmas corresponding to each page. | ||
367 | * Or NULL if the caller does not require them. | ||
368 | * @nonblocking: whether waiting for disk IO or mmap_sem contention | ||
369 | * | ||
370 | * Returns number of pages pinned. This may be fewer than the number | ||
371 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
372 | * were pinned, returns -errno. Each page returned must be released | ||
373 | * with a put_page() call when it is finished with. vmas will only | ||
374 | * remain valid while mmap_sem is held. | ||
375 | * | ||
376 | * Must be called with mmap_sem held for read or write. | ||
377 | * | ||
378 | * __get_user_pages walks a process's page tables and takes a reference to | ||
379 | * each struct page that each user address corresponds to at a given | ||
380 | * instant. That is, it takes the page that would be accessed if a user | ||
381 | * thread accesses the given user virtual address at that instant. | ||
382 | * | ||
383 | * This does not guarantee that the page exists in the user mappings when | ||
384 | * __get_user_pages returns, and there may even be a completely different | ||
385 | * page there in some cases (eg. if mmapped pagecache has been invalidated | ||
386 | * and subsequently re faulted). However it does guarantee that the page | ||
387 | * won't be freed completely. And mostly callers simply care that the page | ||
388 | * contains data that was valid *at some point in time*. Typically, an IO | ||
389 | * or similar operation cannot guarantee anything stronger anyway because | ||
390 | * locks can't be held over the syscall boundary. | ||
391 | * | ||
392 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | ||
393 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | ||
394 | * appropriate) must be called after the page is finished with, and | ||
395 | * before put_page is called. | ||
396 | * | ||
397 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO | ||
398 | * or mmap_sem contention, and if waiting is needed to pin all pages, | ||
399 | * *@nonblocking will be set to 0. | ||
400 | * | ||
401 | * In most cases, get_user_pages or get_user_pages_fast should be used | ||
402 | * instead of __get_user_pages. __get_user_pages should be used only if | ||
403 | * you need some special @gup_flags. | ||
404 | */ | ||
405 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
406 | unsigned long start, unsigned long nr_pages, | ||
407 | unsigned int gup_flags, struct page **pages, | ||
408 | struct vm_area_struct **vmas, int *nonblocking) | ||
409 | { | ||
410 | long i = 0; | ||
411 | unsigned int page_mask; | ||
412 | struct vm_area_struct *vma = NULL; | ||
413 | |||
414 | if (!nr_pages) | ||
415 | return 0; | ||
416 | |||
417 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | ||
418 | |||
419 | /* | ||
420 | * If FOLL_FORCE is set then do not force a full fault as the hinting | ||
421 | * fault information is unrelated to the reference behaviour of a task | ||
422 | * using the address space | ||
423 | */ | ||
424 | if (!(gup_flags & FOLL_FORCE)) | ||
425 | gup_flags |= FOLL_NUMA; | ||
426 | |||
427 | do { | ||
428 | struct page *page; | ||
429 | unsigned int foll_flags = gup_flags; | ||
430 | unsigned int page_increm; | ||
431 | |||
432 | /* first iteration or cross vma bound */ | ||
433 | if (!vma || start >= vma->vm_end) { | ||
434 | vma = find_extend_vma(mm, start); | ||
435 | if (!vma && in_gate_area(mm, start)) { | ||
436 | int ret; | ||
437 | ret = get_gate_page(mm, start & PAGE_MASK, | ||
438 | gup_flags, &vma, | ||
439 | pages ? &pages[i] : NULL); | ||
440 | if (ret) | ||
441 | return i ? : ret; | ||
442 | page_mask = 0; | ||
443 | goto next_page; | ||
444 | } | ||
445 | |||
446 | if (!vma || check_vma_flags(vma, gup_flags)) | ||
447 | return i ? : -EFAULT; | ||
448 | if (is_vm_hugetlb_page(vma)) { | ||
449 | i = follow_hugetlb_page(mm, vma, pages, vmas, | ||
450 | &start, &nr_pages, i, | ||
451 | gup_flags); | ||
452 | continue; | ||
453 | } | ||
454 | } | ||
455 | retry: | ||
456 | /* | ||
457 | * If we have a pending SIGKILL, don't keep faulting pages and | ||
458 | * potentially allocating memory. | ||
459 | */ | ||
460 | if (unlikely(fatal_signal_pending(current))) | ||
461 | return i ? i : -ERESTARTSYS; | ||
462 | cond_resched(); | ||
463 | page = follow_page_mask(vma, start, foll_flags, &page_mask); | ||
464 | if (!page) { | ||
465 | int ret; | ||
466 | ret = faultin_page(tsk, vma, start, &foll_flags, | ||
467 | nonblocking); | ||
468 | switch (ret) { | ||
469 | case 0: | ||
470 | goto retry; | ||
471 | case -EFAULT: | ||
472 | case -ENOMEM: | ||
473 | case -EHWPOISON: | ||
474 | return i ? i : ret; | ||
475 | case -EBUSY: | ||
476 | return i; | ||
477 | case -ENOENT: | ||
478 | goto next_page; | ||
479 | } | ||
480 | BUG(); | ||
481 | } | ||
482 | if (IS_ERR(page)) | ||
483 | return i ? i : PTR_ERR(page); | ||
484 | if (pages) { | ||
485 | pages[i] = page; | ||
486 | flush_anon_page(vma, page, start); | ||
487 | flush_dcache_page(page); | ||
488 | page_mask = 0; | ||
489 | } | ||
490 | next_page: | ||
491 | if (vmas) { | ||
492 | vmas[i] = vma; | ||
493 | page_mask = 0; | ||
494 | } | ||
495 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | ||
496 | if (page_increm > nr_pages) | ||
497 | page_increm = nr_pages; | ||
498 | i += page_increm; | ||
499 | start += page_increm * PAGE_SIZE; | ||
500 | nr_pages -= page_increm; | ||
501 | } while (nr_pages); | ||
502 | return i; | ||
503 | } | ||
504 | EXPORT_SYMBOL(__get_user_pages); | ||
505 | |||
506 | /* | ||
507 | * fixup_user_fault() - manually resolve a user page fault | ||
508 | * @tsk: the task_struct to use for page fault accounting, or | ||
509 | * NULL if faults are not to be recorded. | ||
510 | * @mm: mm_struct of target mm | ||
511 | * @address: user address | ||
512 | * @fault_flags:flags to pass down to handle_mm_fault() | ||
513 | * | ||
514 | * This is meant to be called in the specific scenario where for locking reasons | ||
515 | * we try to access user memory in atomic context (within a pagefault_disable() | ||
516 | * section), this returns -EFAULT, and we want to resolve the user fault before | ||
517 | * trying again. | ||
518 | * | ||
519 | * Typically this is meant to be used by the futex code. | ||
520 | * | ||
521 | * The main difference with get_user_pages() is that this function will | ||
522 | * unconditionally call handle_mm_fault() which will in turn perform all the | ||
523 | * necessary SW fixup of the dirty and young bits in the PTE, while | ||
524 | * handle_mm_fault() only guarantees to update these in the struct page. | ||
525 | * | ||
526 | * This is important for some architectures where those bits also gate the | ||
527 | * access permission to the page because they are maintained in software. On | ||
528 | * such architectures, gup() will not be enough to make a subsequent access | ||
529 | * succeed. | ||
530 | * | ||
531 | * This should be called with the mm_sem held for read. | ||
532 | */ | ||
533 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | ||
534 | unsigned long address, unsigned int fault_flags) | ||
535 | { | ||
536 | struct vm_area_struct *vma; | ||
537 | vm_flags_t vm_flags; | ||
538 | int ret; | ||
539 | |||
540 | vma = find_extend_vma(mm, address); | ||
541 | if (!vma || address < vma->vm_start) | ||
542 | return -EFAULT; | ||
543 | |||
544 | vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ; | ||
545 | if (!(vm_flags & vma->vm_flags)) | ||
546 | return -EFAULT; | ||
547 | |||
548 | ret = handle_mm_fault(mm, vma, address, fault_flags); | ||
549 | if (ret & VM_FAULT_ERROR) { | ||
550 | if (ret & VM_FAULT_OOM) | ||
551 | return -ENOMEM; | ||
552 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | ||
553 | return -EHWPOISON; | ||
554 | if (ret & VM_FAULT_SIGBUS) | ||
555 | return -EFAULT; | ||
556 | BUG(); | ||
557 | } | ||
558 | if (tsk) { | ||
559 | if (ret & VM_FAULT_MAJOR) | ||
560 | tsk->maj_flt++; | ||
561 | else | ||
562 | tsk->min_flt++; | ||
563 | } | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * get_user_pages() - pin user pages in memory | ||
569 | * @tsk: the task_struct to use for page fault accounting, or | ||
570 | * NULL if faults are not to be recorded. | ||
571 | * @mm: mm_struct of target mm | ||
572 | * @start: starting user address | ||
573 | * @nr_pages: number of pages from start to pin | ||
574 | * @write: whether pages will be written to by the caller | ||
575 | * @force: whether to force access even when user mapping is currently | ||
576 | * protected (but never forces write access to shared mapping). | ||
577 | * @pages: array that receives pointers to the pages pinned. | ||
578 | * Should be at least nr_pages long. Or NULL, if caller | ||
579 | * only intends to ensure the pages are faulted in. | ||
580 | * @vmas: array of pointers to vmas corresponding to each page. | ||
581 | * Or NULL if the caller does not require them. | ||
582 | * | ||
583 | * Returns number of pages pinned. This may be fewer than the number | ||
584 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
585 | * were pinned, returns -errno. Each page returned must be released | ||
586 | * with a put_page() call when it is finished with. vmas will only | ||
587 | * remain valid while mmap_sem is held. | ||
588 | * | ||
589 | * Must be called with mmap_sem held for read or write. | ||
590 | * | ||
591 | * get_user_pages walks a process's page tables and takes a reference to | ||
592 | * each struct page that each user address corresponds to at a given | ||
593 | * instant. That is, it takes the page that would be accessed if a user | ||
594 | * thread accesses the given user virtual address at that instant. | ||
595 | * | ||
596 | * This does not guarantee that the page exists in the user mappings when | ||
597 | * get_user_pages returns, and there may even be a completely different | ||
598 | * page there in some cases (eg. if mmapped pagecache has been invalidated | ||
599 | * and subsequently re faulted). However it does guarantee that the page | ||
600 | * won't be freed completely. And mostly callers simply care that the page | ||
601 | * contains data that was valid *at some point in time*. Typically, an IO | ||
602 | * or similar operation cannot guarantee anything stronger anyway because | ||
603 | * locks can't be held over the syscall boundary. | ||
604 | * | ||
605 | * If write=0, the page must not be written to. If the page is written to, | ||
606 | * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called | ||
607 | * after the page is finished with, and before put_page is called. | ||
608 | * | ||
609 | * get_user_pages is typically used for fewer-copy IO operations, to get a | ||
610 | * handle on the memory by some means other than accesses via the user virtual | ||
611 | * addresses. The pages may be submitted for DMA to devices or accessed via | ||
612 | * their kernel linear mapping (via the kmap APIs). Care should be taken to | ||
613 | * use the correct cache flushing APIs. | ||
614 | * | ||
615 | * See also get_user_pages_fast, for performance critical applications. | ||
616 | */ | ||
617 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
618 | unsigned long start, unsigned long nr_pages, int write, | ||
619 | int force, struct page **pages, struct vm_area_struct **vmas) | ||
620 | { | ||
621 | int flags = FOLL_TOUCH; | ||
622 | |||
623 | if (pages) | ||
624 | flags |= FOLL_GET; | ||
625 | if (write) | ||
626 | flags |= FOLL_WRITE; | ||
627 | if (force) | ||
628 | flags |= FOLL_FORCE; | ||
629 | |||
630 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, | ||
631 | NULL); | ||
632 | } | ||
633 | EXPORT_SYMBOL(get_user_pages); | ||
634 | |||
635 | /** | ||
636 | * get_dump_page() - pin user page in memory while writing it to core dump | ||
637 | * @addr: user address | ||
638 | * | ||
639 | * Returns struct page pointer of user page pinned for dump, | ||
640 | * to be freed afterwards by page_cache_release() or put_page(). | ||
641 | * | ||
642 | * Returns NULL on any kind of failure - a hole must then be inserted into | ||
643 | * the corefile, to preserve alignment with its headers; and also returns | ||
644 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | ||
645 | * allowing a hole to be left in the corefile to save diskspace. | ||
646 | * | ||
647 | * Called without mmap_sem, but after all other threads have been killed. | ||
648 | */ | ||
649 | #ifdef CONFIG_ELF_CORE | ||
650 | struct page *get_dump_page(unsigned long addr) | ||
651 | { | ||
652 | struct vm_area_struct *vma; | ||
653 | struct page *page; | ||
654 | |||
655 | if (__get_user_pages(current, current->mm, addr, 1, | ||
656 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, | ||
657 | NULL) < 1) | ||
658 | return NULL; | ||
659 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
660 | return page; | ||
661 | } | ||
662 | #endif /* CONFIG_ELF_CORE */ | ||