diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2014-06-04 19:08:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:04 -0400 |
commit | 4bbd4c776a63a063546552de42f6a535395f6d9e (patch) | |
tree | 2a722c3bde3f3dabf85030b391b44c2cb3972df2 /mm/gup.c | |
parent | f4527c90868d8fa175c68ccf216cf9b67a7d8a1a (diff) |
mm: move get_user_pages()-related code to separate file
mm/memory.c is overloaded: over 4k lines. get_user_pages() code is
pretty much self-contained let's move it to separate file.
No other changes made.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 649 |
1 files changed, 649 insertions, 0 deletions
diff --git a/mm/gup.c b/mm/gup.c new file mode 100644 index 000000000000..ea88b65f264d --- /dev/null +++ b/mm/gup.c | |||
@@ -0,0 +1,649 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <linux/err.h> | ||
4 | #include <linux/spinlock.h> | ||
5 | |||
6 | #include <linux/hugetlb.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/pagemap.h> | ||
9 | #include <linux/rmap.h> | ||
10 | #include <linux/swap.h> | ||
11 | #include <linux/swapops.h> | ||
12 | |||
13 | #include "internal.h" | ||
14 | |||
15 | /** | ||
16 | * follow_page_mask - look up a page descriptor from a user-virtual address | ||
17 | * @vma: vm_area_struct mapping @address | ||
18 | * @address: virtual address to look up | ||
19 | * @flags: flags modifying lookup behaviour | ||
20 | * @page_mask: on output, *page_mask is set according to the size of the page | ||
21 | * | ||
22 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | ||
23 | * | ||
24 | * Returns the mapped (struct page *), %NULL if no mapping exists, or | ||
25 | * an error pointer if there is a mapping to something not represented | ||
26 | * by a page descriptor (see also vm_normal_page()). | ||
27 | */ | ||
28 | struct page *follow_page_mask(struct vm_area_struct *vma, | ||
29 | unsigned long address, unsigned int flags, | ||
30 | unsigned int *page_mask) | ||
31 | { | ||
32 | pgd_t *pgd; | ||
33 | pud_t *pud; | ||
34 | pmd_t *pmd; | ||
35 | pte_t *ptep, pte; | ||
36 | spinlock_t *ptl; | ||
37 | struct page *page; | ||
38 | struct mm_struct *mm = vma->vm_mm; | ||
39 | |||
40 | *page_mask = 0; | ||
41 | |||
42 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | ||
43 | if (!IS_ERR(page)) { | ||
44 | BUG_ON(flags & FOLL_GET); | ||
45 | goto out; | ||
46 | } | ||
47 | |||
48 | page = NULL; | ||
49 | pgd = pgd_offset(mm, address); | ||
50 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
51 | goto no_page_table; | ||
52 | |||
53 | pud = pud_offset(pgd, address); | ||
54 | if (pud_none(*pud)) | ||
55 | goto no_page_table; | ||
56 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { | ||
57 | if (flags & FOLL_GET) | ||
58 | goto out; | ||
59 | page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); | ||
60 | goto out; | ||
61 | } | ||
62 | if (unlikely(pud_bad(*pud))) | ||
63 | goto no_page_table; | ||
64 | |||
65 | pmd = pmd_offset(pud, address); | ||
66 | if (pmd_none(*pmd)) | ||
67 | goto no_page_table; | ||
68 | if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { | ||
69 | page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); | ||
70 | if (flags & FOLL_GET) { | ||
71 | /* | ||
72 | * Refcount on tail pages are not well-defined and | ||
73 | * shouldn't be taken. The caller should handle a NULL | ||
74 | * return when trying to follow tail pages. | ||
75 | */ | ||
76 | if (PageHead(page)) | ||
77 | get_page(page); | ||
78 | else { | ||
79 | page = NULL; | ||
80 | goto out; | ||
81 | } | ||
82 | } | ||
83 | goto out; | ||
84 | } | ||
85 | if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) | ||
86 | goto no_page_table; | ||
87 | if (pmd_trans_huge(*pmd)) { | ||
88 | if (flags & FOLL_SPLIT) { | ||
89 | split_huge_page_pmd(vma, address, pmd); | ||
90 | goto split_fallthrough; | ||
91 | } | ||
92 | ptl = pmd_lock(mm, pmd); | ||
93 | if (likely(pmd_trans_huge(*pmd))) { | ||
94 | if (unlikely(pmd_trans_splitting(*pmd))) { | ||
95 | spin_unlock(ptl); | ||
96 | wait_split_huge_page(vma->anon_vma, pmd); | ||
97 | } else { | ||
98 | page = follow_trans_huge_pmd(vma, address, | ||
99 | pmd, flags); | ||
100 | spin_unlock(ptl); | ||
101 | *page_mask = HPAGE_PMD_NR - 1; | ||
102 | goto out; | ||
103 | } | ||
104 | } else | ||
105 | spin_unlock(ptl); | ||
106 | /* fall through */ | ||
107 | } | ||
108 | split_fallthrough: | ||
109 | if (unlikely(pmd_bad(*pmd))) | ||
110 | goto no_page_table; | ||
111 | |||
112 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
113 | |||
114 | pte = *ptep; | ||
115 | if (!pte_present(pte)) { | ||
116 | swp_entry_t entry; | ||
117 | /* | ||
118 | * KSM's break_ksm() relies upon recognizing a ksm page | ||
119 | * even while it is being migrated, so for that case we | ||
120 | * need migration_entry_wait(). | ||
121 | */ | ||
122 | if (likely(!(flags & FOLL_MIGRATION))) | ||
123 | goto no_page; | ||
124 | if (pte_none(pte) || pte_file(pte)) | ||
125 | goto no_page; | ||
126 | entry = pte_to_swp_entry(pte); | ||
127 | if (!is_migration_entry(entry)) | ||
128 | goto no_page; | ||
129 | pte_unmap_unlock(ptep, ptl); | ||
130 | migration_entry_wait(mm, pmd, address); | ||
131 | goto split_fallthrough; | ||
132 | } | ||
133 | if ((flags & FOLL_NUMA) && pte_numa(pte)) | ||
134 | goto no_page; | ||
135 | if ((flags & FOLL_WRITE) && !pte_write(pte)) | ||
136 | goto unlock; | ||
137 | |||
138 | page = vm_normal_page(vma, address, pte); | ||
139 | if (unlikely(!page)) { | ||
140 | if ((flags & FOLL_DUMP) || | ||
141 | !is_zero_pfn(pte_pfn(pte))) | ||
142 | goto bad_page; | ||
143 | page = pte_page(pte); | ||
144 | } | ||
145 | |||
146 | if (flags & FOLL_GET) | ||
147 | get_page_foll(page); | ||
148 | if (flags & FOLL_TOUCH) { | ||
149 | if ((flags & FOLL_WRITE) && | ||
150 | !pte_dirty(pte) && !PageDirty(page)) | ||
151 | set_page_dirty(page); | ||
152 | /* | ||
153 | * pte_mkyoung() would be more correct here, but atomic care | ||
154 | * is needed to avoid losing the dirty bit: it is easier to use | ||
155 | * mark_page_accessed(). | ||
156 | */ | ||
157 | mark_page_accessed(page); | ||
158 | } | ||
159 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { | ||
160 | /* | ||
161 | * The preliminary mapping check is mainly to avoid the | ||
162 | * pointless overhead of lock_page on the ZERO_PAGE | ||
163 | * which might bounce very badly if there is contention. | ||
164 | * | ||
165 | * If the page is already locked, we don't need to | ||
166 | * handle it now - vmscan will handle it later if and | ||
167 | * when it attempts to reclaim the page. | ||
168 | */ | ||
169 | if (page->mapping && trylock_page(page)) { | ||
170 | lru_add_drain(); /* push cached pages to LRU */ | ||
171 | /* | ||
172 | * Because we lock page here, and migration is | ||
173 | * blocked by the pte's page reference, and we | ||
174 | * know the page is still mapped, we don't even | ||
175 | * need to check for file-cache page truncation. | ||
176 | */ | ||
177 | mlock_vma_page(page); | ||
178 | unlock_page(page); | ||
179 | } | ||
180 | } | ||
181 | unlock: | ||
182 | pte_unmap_unlock(ptep, ptl); | ||
183 | out: | ||
184 | return page; | ||
185 | |||
186 | bad_page: | ||
187 | pte_unmap_unlock(ptep, ptl); | ||
188 | return ERR_PTR(-EFAULT); | ||
189 | |||
190 | no_page: | ||
191 | pte_unmap_unlock(ptep, ptl); | ||
192 | if (!pte_none(pte)) | ||
193 | return page; | ||
194 | |||
195 | no_page_table: | ||
196 | /* | ||
197 | * When core dumping an enormous anonymous area that nobody | ||
198 | * has touched so far, we don't want to allocate unnecessary pages or | ||
199 | * page tables. Return error instead of NULL to skip handle_mm_fault, | ||
200 | * then get_dump_page() will return NULL to leave a hole in the dump. | ||
201 | * But we can only make this optimization where a hole would surely | ||
202 | * be zero-filled if handle_mm_fault() actually did handle it. | ||
203 | */ | ||
204 | if ((flags & FOLL_DUMP) && | ||
205 | (!vma->vm_ops || !vma->vm_ops->fault)) | ||
206 | return ERR_PTR(-EFAULT); | ||
207 | return page; | ||
208 | } | ||
209 | |||
210 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
211 | { | ||
212 | return stack_guard_page_start(vma, addr) || | ||
213 | stack_guard_page_end(vma, addr+PAGE_SIZE); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * __get_user_pages() - pin user pages in memory | ||
218 | * @tsk: task_struct of target task | ||
219 | * @mm: mm_struct of target mm | ||
220 | * @start: starting user address | ||
221 | * @nr_pages: number of pages from start to pin | ||
222 | * @gup_flags: flags modifying pin behaviour | ||
223 | * @pages: array that receives pointers to the pages pinned. | ||
224 | * Should be at least nr_pages long. Or NULL, if caller | ||
225 | * only intends to ensure the pages are faulted in. | ||
226 | * @vmas: array of pointers to vmas corresponding to each page. | ||
227 | * Or NULL if the caller does not require them. | ||
228 | * @nonblocking: whether waiting for disk IO or mmap_sem contention | ||
229 | * | ||
230 | * Returns number of pages pinned. This may be fewer than the number | ||
231 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
232 | * were pinned, returns -errno. Each page returned must be released | ||
233 | * with a put_page() call when it is finished with. vmas will only | ||
234 | * remain valid while mmap_sem is held. | ||
235 | * | ||
236 | * Must be called with mmap_sem held for read or write. | ||
237 | * | ||
238 | * __get_user_pages walks a process's page tables and takes a reference to | ||
239 | * each struct page that each user address corresponds to at a given | ||
240 | * instant. That is, it takes the page that would be accessed if a user | ||
241 | * thread accesses the given user virtual address at that instant. | ||
242 | * | ||
243 | * This does not guarantee that the page exists in the user mappings when | ||
244 | * __get_user_pages returns, and there may even be a completely different | ||
245 | * page there in some cases (eg. if mmapped pagecache has been invalidated | ||
246 | * and subsequently re faulted). However it does guarantee that the page | ||
247 | * won't be freed completely. And mostly callers simply care that the page | ||
248 | * contains data that was valid *at some point in time*. Typically, an IO | ||
249 | * or similar operation cannot guarantee anything stronger anyway because | ||
250 | * locks can't be held over the syscall boundary. | ||
251 | * | ||
252 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | ||
253 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | ||
254 | * appropriate) must be called after the page is finished with, and | ||
255 | * before put_page is called. | ||
256 | * | ||
257 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO | ||
258 | * or mmap_sem contention, and if waiting is needed to pin all pages, | ||
259 | * *@nonblocking will be set to 0. | ||
260 | * | ||
261 | * In most cases, get_user_pages or get_user_pages_fast should be used | ||
262 | * instead of __get_user_pages. __get_user_pages should be used only if | ||
263 | * you need some special @gup_flags. | ||
264 | */ | ||
265 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
266 | unsigned long start, unsigned long nr_pages, | ||
267 | unsigned int gup_flags, struct page **pages, | ||
268 | struct vm_area_struct **vmas, int *nonblocking) | ||
269 | { | ||
270 | long i; | ||
271 | unsigned long vm_flags; | ||
272 | unsigned int page_mask; | ||
273 | |||
274 | if (!nr_pages) | ||
275 | return 0; | ||
276 | |||
277 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | ||
278 | |||
279 | /* | ||
280 | * If FOLL_FORCE is set then do not force a full fault as the hinting | ||
281 | * fault information is unrelated to the reference behaviour of a task | ||
282 | * using the address space | ||
283 | */ | ||
284 | if (!(gup_flags & FOLL_FORCE)) | ||
285 | gup_flags |= FOLL_NUMA; | ||
286 | |||
287 | i = 0; | ||
288 | |||
289 | do { | ||
290 | struct vm_area_struct *vma; | ||
291 | |||
292 | vma = find_extend_vma(mm, start); | ||
293 | if (!vma && in_gate_area(mm, start)) { | ||
294 | unsigned long pg = start & PAGE_MASK; | ||
295 | pgd_t *pgd; | ||
296 | pud_t *pud; | ||
297 | pmd_t *pmd; | ||
298 | pte_t *pte; | ||
299 | |||
300 | /* user gate pages are read-only */ | ||
301 | if (gup_flags & FOLL_WRITE) | ||
302 | goto efault; | ||
303 | if (pg > TASK_SIZE) | ||
304 | pgd = pgd_offset_k(pg); | ||
305 | else | ||
306 | pgd = pgd_offset_gate(mm, pg); | ||
307 | BUG_ON(pgd_none(*pgd)); | ||
308 | pud = pud_offset(pgd, pg); | ||
309 | BUG_ON(pud_none(*pud)); | ||
310 | pmd = pmd_offset(pud, pg); | ||
311 | if (pmd_none(*pmd)) | ||
312 | goto efault; | ||
313 | VM_BUG_ON(pmd_trans_huge(*pmd)); | ||
314 | pte = pte_offset_map(pmd, pg); | ||
315 | if (pte_none(*pte)) { | ||
316 | pte_unmap(pte); | ||
317 | goto efault; | ||
318 | } | ||
319 | vma = get_gate_vma(mm); | ||
320 | if (pages) { | ||
321 | struct page *page; | ||
322 | |||
323 | page = vm_normal_page(vma, start, *pte); | ||
324 | if (!page) { | ||
325 | if (!(gup_flags & FOLL_DUMP) && | ||
326 | is_zero_pfn(pte_pfn(*pte))) | ||
327 | page = pte_page(*pte); | ||
328 | else { | ||
329 | pte_unmap(pte); | ||
330 | goto efault; | ||
331 | } | ||
332 | } | ||
333 | pages[i] = page; | ||
334 | get_page(page); | ||
335 | } | ||
336 | pte_unmap(pte); | ||
337 | page_mask = 0; | ||
338 | goto next_page; | ||
339 | } | ||
340 | |||
341 | if (!vma) | ||
342 | goto efault; | ||
343 | vm_flags = vma->vm_flags; | ||
344 | if (vm_flags & (VM_IO | VM_PFNMAP)) | ||
345 | goto efault; | ||
346 | |||
347 | if (gup_flags & FOLL_WRITE) { | ||
348 | if (!(vm_flags & VM_WRITE)) { | ||
349 | if (!(gup_flags & FOLL_FORCE)) | ||
350 | goto efault; | ||
351 | /* | ||
352 | * We used to let the write,force case do COW | ||
353 | * in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so | ||
354 | * ptrace could set a breakpoint in a read-only | ||
355 | * mapping of an executable, without corrupting | ||
356 | * the file (yet only when that file had been | ||
357 | * opened for writing!). Anon pages in shared | ||
358 | * mappings are surprising: now just reject it. | ||
359 | */ | ||
360 | if (!is_cow_mapping(vm_flags)) { | ||
361 | WARN_ON_ONCE(vm_flags & VM_MAYWRITE); | ||
362 | goto efault; | ||
363 | } | ||
364 | } | ||
365 | } else { | ||
366 | if (!(vm_flags & VM_READ)) { | ||
367 | if (!(gup_flags & FOLL_FORCE)) | ||
368 | goto efault; | ||
369 | /* | ||
370 | * Is there actually any vma we can reach here | ||
371 | * which does not have VM_MAYREAD set? | ||
372 | */ | ||
373 | if (!(vm_flags & VM_MAYREAD)) | ||
374 | goto efault; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | if (is_vm_hugetlb_page(vma)) { | ||
379 | i = follow_hugetlb_page(mm, vma, pages, vmas, | ||
380 | &start, &nr_pages, i, gup_flags); | ||
381 | continue; | ||
382 | } | ||
383 | |||
384 | do { | ||
385 | struct page *page; | ||
386 | unsigned int foll_flags = gup_flags; | ||
387 | unsigned int page_increm; | ||
388 | |||
389 | /* | ||
390 | * If we have a pending SIGKILL, don't keep faulting | ||
391 | * pages and potentially allocating memory. | ||
392 | */ | ||
393 | if (unlikely(fatal_signal_pending(current))) | ||
394 | return i ? i : -ERESTARTSYS; | ||
395 | |||
396 | cond_resched(); | ||
397 | while (!(page = follow_page_mask(vma, start, | ||
398 | foll_flags, &page_mask))) { | ||
399 | int ret; | ||
400 | unsigned int fault_flags = 0; | ||
401 | |||
402 | /* For mlock, just skip the stack guard page. */ | ||
403 | if (foll_flags & FOLL_MLOCK) { | ||
404 | if (stack_guard_page(vma, start)) | ||
405 | goto next_page; | ||
406 | } | ||
407 | if (foll_flags & FOLL_WRITE) | ||
408 | fault_flags |= FAULT_FLAG_WRITE; | ||
409 | if (nonblocking) | ||
410 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | ||
411 | if (foll_flags & FOLL_NOWAIT) | ||
412 | fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT); | ||
413 | |||
414 | ret = handle_mm_fault(mm, vma, start, | ||
415 | fault_flags); | ||
416 | |||
417 | if (ret & VM_FAULT_ERROR) { | ||
418 | if (ret & VM_FAULT_OOM) | ||
419 | return i ? i : -ENOMEM; | ||
420 | if (ret & (VM_FAULT_HWPOISON | | ||
421 | VM_FAULT_HWPOISON_LARGE)) { | ||
422 | if (i) | ||
423 | return i; | ||
424 | else if (gup_flags & FOLL_HWPOISON) | ||
425 | return -EHWPOISON; | ||
426 | else | ||
427 | return -EFAULT; | ||
428 | } | ||
429 | if (ret & VM_FAULT_SIGBUS) | ||
430 | goto efault; | ||
431 | BUG(); | ||
432 | } | ||
433 | |||
434 | if (tsk) { | ||
435 | if (ret & VM_FAULT_MAJOR) | ||
436 | tsk->maj_flt++; | ||
437 | else | ||
438 | tsk->min_flt++; | ||
439 | } | ||
440 | |||
441 | if (ret & VM_FAULT_RETRY) { | ||
442 | if (nonblocking) | ||
443 | *nonblocking = 0; | ||
444 | return i; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * The VM_FAULT_WRITE bit tells us that | ||
449 | * do_wp_page has broken COW when necessary, | ||
450 | * even if maybe_mkwrite decided not to set | ||
451 | * pte_write. We can thus safely do subsequent | ||
452 | * page lookups as if they were reads. But only | ||
453 | * do so when looping for pte_write is futile: | ||
454 | * in some cases userspace may also be wanting | ||
455 | * to write to the gotten user page, which a | ||
456 | * read fault here might prevent (a readonly | ||
457 | * page might get reCOWed by userspace write). | ||
458 | */ | ||
459 | if ((ret & VM_FAULT_WRITE) && | ||
460 | !(vma->vm_flags & VM_WRITE)) | ||
461 | foll_flags &= ~FOLL_WRITE; | ||
462 | |||
463 | cond_resched(); | ||
464 | } | ||
465 | if (IS_ERR(page)) | ||
466 | return i ? i : PTR_ERR(page); | ||
467 | if (pages) { | ||
468 | pages[i] = page; | ||
469 | |||
470 | flush_anon_page(vma, page, start); | ||
471 | flush_dcache_page(page); | ||
472 | page_mask = 0; | ||
473 | } | ||
474 | next_page: | ||
475 | if (vmas) { | ||
476 | vmas[i] = vma; | ||
477 | page_mask = 0; | ||
478 | } | ||
479 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | ||
480 | if (page_increm > nr_pages) | ||
481 | page_increm = nr_pages; | ||
482 | i += page_increm; | ||
483 | start += page_increm * PAGE_SIZE; | ||
484 | nr_pages -= page_increm; | ||
485 | } while (nr_pages && start < vma->vm_end); | ||
486 | } while (nr_pages); | ||
487 | return i; | ||
488 | efault: | ||
489 | return i ? : -EFAULT; | ||
490 | } | ||
491 | EXPORT_SYMBOL(__get_user_pages); | ||
492 | |||
493 | /* | ||
494 | * fixup_user_fault() - manually resolve a user page fault | ||
495 | * @tsk: the task_struct to use for page fault accounting, or | ||
496 | * NULL if faults are not to be recorded. | ||
497 | * @mm: mm_struct of target mm | ||
498 | * @address: user address | ||
499 | * @fault_flags:flags to pass down to handle_mm_fault() | ||
500 | * | ||
501 | * This is meant to be called in the specific scenario where for locking reasons | ||
502 | * we try to access user memory in atomic context (within a pagefault_disable() | ||
503 | * section), this returns -EFAULT, and we want to resolve the user fault before | ||
504 | * trying again. | ||
505 | * | ||
506 | * Typically this is meant to be used by the futex code. | ||
507 | * | ||
508 | * The main difference with get_user_pages() is that this function will | ||
509 | * unconditionally call handle_mm_fault() which will in turn perform all the | ||
510 | * necessary SW fixup of the dirty and young bits in the PTE, while | ||
511 | * handle_mm_fault() only guarantees to update these in the struct page. | ||
512 | * | ||
513 | * This is important for some architectures where those bits also gate the | ||
514 | * access permission to the page because they are maintained in software. On | ||
515 | * such architectures, gup() will not be enough to make a subsequent access | ||
516 | * succeed. | ||
517 | * | ||
518 | * This should be called with the mm_sem held for read. | ||
519 | */ | ||
520 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | ||
521 | unsigned long address, unsigned int fault_flags) | ||
522 | { | ||
523 | struct vm_area_struct *vma; | ||
524 | vm_flags_t vm_flags; | ||
525 | int ret; | ||
526 | |||
527 | vma = find_extend_vma(mm, address); | ||
528 | if (!vma || address < vma->vm_start) | ||
529 | return -EFAULT; | ||
530 | |||
531 | vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ; | ||
532 | if (!(vm_flags & vma->vm_flags)) | ||
533 | return -EFAULT; | ||
534 | |||
535 | ret = handle_mm_fault(mm, vma, address, fault_flags); | ||
536 | if (ret & VM_FAULT_ERROR) { | ||
537 | if (ret & VM_FAULT_OOM) | ||
538 | return -ENOMEM; | ||
539 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | ||
540 | return -EHWPOISON; | ||
541 | if (ret & VM_FAULT_SIGBUS) | ||
542 | return -EFAULT; | ||
543 | BUG(); | ||
544 | } | ||
545 | if (tsk) { | ||
546 | if (ret & VM_FAULT_MAJOR) | ||
547 | tsk->maj_flt++; | ||
548 | else | ||
549 | tsk->min_flt++; | ||
550 | } | ||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * get_user_pages() - pin user pages in memory | ||
556 | * @tsk: the task_struct to use for page fault accounting, or | ||
557 | * NULL if faults are not to be recorded. | ||
558 | * @mm: mm_struct of target mm | ||
559 | * @start: starting user address | ||
560 | * @nr_pages: number of pages from start to pin | ||
561 | * @write: whether pages will be written to by the caller | ||
562 | * @force: whether to force access even when user mapping is currently | ||
563 | * protected (but never forces write access to shared mapping). | ||
564 | * @pages: array that receives pointers to the pages pinned. | ||
565 | * Should be at least nr_pages long. Or NULL, if caller | ||
566 | * only intends to ensure the pages are faulted in. | ||
567 | * @vmas: array of pointers to vmas corresponding to each page. | ||
568 | * Or NULL if the caller does not require them. | ||
569 | * | ||
570 | * Returns number of pages pinned. This may be fewer than the number | ||
571 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
572 | * were pinned, returns -errno. Each page returned must be released | ||
573 | * with a put_page() call when it is finished with. vmas will only | ||
574 | * remain valid while mmap_sem is held. | ||
575 | * | ||
576 | * Must be called with mmap_sem held for read or write. | ||
577 | * | ||
578 | * get_user_pages walks a process's page tables and takes a reference to | ||
579 | * each struct page that each user address corresponds to at a given | ||
580 | * instant. That is, it takes the page that would be accessed if a user | ||
581 | * thread accesses the given user virtual address at that instant. | ||
582 | * | ||
583 | * This does not guarantee that the page exists in the user mappings when | ||
584 | * get_user_pages returns, and there may even be a completely different | ||
585 | * page there in some cases (eg. if mmapped pagecache has been invalidated | ||
586 | * and subsequently re faulted). However it does guarantee that the page | ||
587 | * won't be freed completely. And mostly callers simply care that the page | ||
588 | * contains data that was valid *at some point in time*. Typically, an IO | ||
589 | * or similar operation cannot guarantee anything stronger anyway because | ||
590 | * locks can't be held over the syscall boundary. | ||
591 | * | ||
592 | * If write=0, the page must not be written to. If the page is written to, | ||
593 | * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called | ||
594 | * after the page is finished with, and before put_page is called. | ||
595 | * | ||
596 | * get_user_pages is typically used for fewer-copy IO operations, to get a | ||
597 | * handle on the memory by some means other than accesses via the user virtual | ||
598 | * addresses. The pages may be submitted for DMA to devices or accessed via | ||
599 | * their kernel linear mapping (via the kmap APIs). Care should be taken to | ||
600 | * use the correct cache flushing APIs. | ||
601 | * | ||
602 | * See also get_user_pages_fast, for performance critical applications. | ||
603 | */ | ||
604 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
605 | unsigned long start, unsigned long nr_pages, int write, | ||
606 | int force, struct page **pages, struct vm_area_struct **vmas) | ||
607 | { | ||
608 | int flags = FOLL_TOUCH; | ||
609 | |||
610 | if (pages) | ||
611 | flags |= FOLL_GET; | ||
612 | if (write) | ||
613 | flags |= FOLL_WRITE; | ||
614 | if (force) | ||
615 | flags |= FOLL_FORCE; | ||
616 | |||
617 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, | ||
618 | NULL); | ||
619 | } | ||
620 | EXPORT_SYMBOL(get_user_pages); | ||
621 | |||
622 | /** | ||
623 | * get_dump_page() - pin user page in memory while writing it to core dump | ||
624 | * @addr: user address | ||
625 | * | ||
626 | * Returns struct page pointer of user page pinned for dump, | ||
627 | * to be freed afterwards by page_cache_release() or put_page(). | ||
628 | * | ||
629 | * Returns NULL on any kind of failure - a hole must then be inserted into | ||
630 | * the corefile, to preserve alignment with its headers; and also returns | ||
631 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | ||
632 | * allowing a hole to be left in the corefile to save diskspace. | ||
633 | * | ||
634 | * Called without mmap_sem, but after all other threads have been killed. | ||
635 | */ | ||
636 | #ifdef CONFIG_ELF_CORE | ||
637 | struct page *get_dump_page(unsigned long addr) | ||
638 | { | ||
639 | struct vm_area_struct *vma; | ||
640 | struct page *page; | ||
641 | |||
642 | if (__get_user_pages(current, current->mm, addr, 1, | ||
643 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, | ||
644 | NULL) < 1) | ||
645 | return NULL; | ||
646 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
647 | return page; | ||
648 | } | ||
649 | #endif /* CONFIG_ELF_CORE */ | ||