diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 46 |
1 files changed, 17 insertions, 29 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b5b2e4df14e..4424784ac374 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -292,36 +292,23 @@ extern pgprot_t protection_map[16]; | |||
292 | * pgoff should be used in favour of virtual_address, if possible. | 292 | * pgoff should be used in favour of virtual_address, if possible. |
293 | */ | 293 | */ |
294 | struct vm_fault { | 294 | struct vm_fault { |
295 | struct vm_area_struct *vma; /* Target VMA */ | ||
295 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | 296 | unsigned int flags; /* FAULT_FLAG_xxx flags */ |
296 | gfp_t gfp_mask; /* gfp mask to be used for allocations */ | 297 | gfp_t gfp_mask; /* gfp mask to be used for allocations */ |
297 | pgoff_t pgoff; /* Logical page offset based on vma */ | 298 | pgoff_t pgoff; /* Logical page offset based on vma */ |
298 | void __user *virtual_address; /* Faulting virtual address */ | 299 | unsigned long address; /* Faulting virtual address */ |
300 | pmd_t *pmd; /* Pointer to pmd entry matching | ||
301 | * the 'address' */ | ||
302 | pte_t orig_pte; /* Value of PTE at the time of fault */ | ||
299 | 303 | ||
300 | struct page *cow_page; /* Handler may choose to COW */ | 304 | struct page *cow_page; /* Page handler may use for COW fault */ |
305 | struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ | ||
301 | struct page *page; /* ->fault handlers should return a | 306 | struct page *page; /* ->fault handlers should return a |
302 | * page here, unless VM_FAULT_NOPAGE | 307 | * page here, unless VM_FAULT_NOPAGE |
303 | * is set (which is also implied by | 308 | * is set (which is also implied by |
304 | * VM_FAULT_ERROR). | 309 | * VM_FAULT_ERROR). |
305 | */ | 310 | */ |
306 | void *entry; /* ->fault handler can alternatively | 311 | /* These three entries are valid only while holding ptl lock */ |
307 | * return locked DAX entry. In that | ||
308 | * case handler should return | ||
309 | * VM_FAULT_DAX_LOCKED and fill in | ||
310 | * entry here. | ||
311 | */ | ||
312 | }; | ||
313 | |||
314 | /* | ||
315 | * Page fault context: passes though page fault handler instead of endless list | ||
316 | * of function arguments. | ||
317 | */ | ||
318 | struct fault_env { | ||
319 | struct vm_area_struct *vma; /* Target VMA */ | ||
320 | unsigned long address; /* Faulting virtual address */ | ||
321 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | ||
322 | pmd_t *pmd; /* Pointer to pmd entry matching | ||
323 | * the 'address' | ||
324 | */ | ||
325 | pte_t *pte; /* Pointer to pte entry matching | 312 | pte_t *pte; /* Pointer to pte entry matching |
326 | * the 'address'. NULL if the page | 313 | * the 'address'. NULL if the page |
327 | * table hasn't been allocated. | 314 | * table hasn't been allocated. |
@@ -351,7 +338,7 @@ struct vm_operations_struct { | |||
351 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 338 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
352 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, | 339 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, |
353 | pmd_t *, unsigned int flags); | 340 | pmd_t *, unsigned int flags); |
354 | void (*map_pages)(struct fault_env *fe, | 341 | void (*map_pages)(struct vm_fault *vmf, |
355 | pgoff_t start_pgoff, pgoff_t end_pgoff); | 342 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
356 | 343 | ||
357 | /* notification that a previously read-only page is about to become | 344 | /* notification that a previously read-only page is about to become |
@@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
625 | return pte; | 612 | return pte; |
626 | } | 613 | } |
627 | 614 | ||
628 | int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, | 615 | int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, |
629 | struct page *page); | 616 | struct page *page); |
617 | int finish_fault(struct vm_fault *vmf); | ||
618 | int finish_mkwrite_fault(struct vm_fault *vmf); | ||
630 | #endif | 619 | #endif |
631 | 620 | ||
632 | /* | 621 | /* |
@@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page) | |||
1110 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 1099 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
1111 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ | 1100 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ |
1112 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ | 1101 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ |
1113 | #define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ | 1102 | #define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ |
1114 | 1103 | ||
1115 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ | 1104 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ |
1116 | 1105 | ||
@@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
1221 | struct vm_area_struct *vma); | 1210 | struct vm_area_struct *vma); |
1222 | void unmap_mapping_range(struct address_space *mapping, | 1211 | void unmap_mapping_range(struct address_space *mapping, |
1223 | loff_t const holebegin, loff_t const holelen, int even_cows); | 1212 | loff_t const holebegin, loff_t const holelen, int even_cows); |
1213 | int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, | ||
1214 | spinlock_t **ptlp); | ||
1224 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | 1215 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
1225 | unsigned long *pfn); | 1216 | unsigned long *pfn); |
1226 | int follow_phys(struct vm_area_struct *vma, unsigned long address, | 1217 | int follow_phys(struct vm_area_struct *vma, unsigned long address, |
@@ -1276,15 +1267,12 @@ extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
1276 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 1267 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
1277 | unsigned long start, unsigned long nr_pages, | 1268 | unsigned long start, unsigned long nr_pages, |
1278 | unsigned int gup_flags, struct page **pages, | 1269 | unsigned int gup_flags, struct page **pages, |
1279 | struct vm_area_struct **vmas); | 1270 | struct vm_area_struct **vmas, int *locked); |
1280 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 1271 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
1281 | unsigned int gup_flags, struct page **pages, | 1272 | unsigned int gup_flags, struct page **pages, |
1282 | struct vm_area_struct **vmas); | 1273 | struct vm_area_struct **vmas); |
1283 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | 1274 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
1284 | unsigned int gup_flags, struct page **pages, int *locked); | 1275 | unsigned int gup_flags, struct page **pages, int *locked); |
1285 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | ||
1286 | unsigned long start, unsigned long nr_pages, | ||
1287 | struct page **pages, unsigned int gup_flags); | ||
1288 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 1276 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
1289 | struct page **pages, unsigned int gup_flags); | 1277 | struct page **pages, unsigned int gup_flags); |
1290 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 1278 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
@@ -2099,7 +2087,7 @@ extern void truncate_inode_pages_final(struct address_space *); | |||
2099 | 2087 | ||
2100 | /* generic vm_area_ops exported for stackable file systems */ | 2088 | /* generic vm_area_ops exported for stackable file systems */ |
2101 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | 2089 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
2102 | extern void filemap_map_pages(struct fault_env *fe, | 2090 | extern void filemap_map_pages(struct vm_fault *vmf, |
2103 | pgoff_t start_pgoff, pgoff_t end_pgoff); | 2091 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
2104 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 2092 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
2105 | 2093 | ||