aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 20:25:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 20:25:18 -0500
commita57cb1c1d7974c62a5c80f7869e35b492ace12cd (patch)
tree5a42ee9a668f171143464bc86013954c1bbe94ad /include/linux/mm.h
parentcf1b3341afab9d3ad02a76b3a619ea027dcf4e28 (diff)
parente1e14ab8411df344a17687821f8f78f0a1e73cbb (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - a few misc things - kexec updates - DMA-mapping updates to better support networking DMA operations - IPC updates - various MM changes to improve DAX fault handling - lots of radix-tree changes, mainly to the test suite. All leading up to reimplementing the IDA/IDR code to be a wrapper layer over the radix-tree. However the final trigger-pulling patch is held off for 4.11. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits) radix tree test suite: delete unused rcupdate.c radix tree test suite: add new tag check radix-tree: ensure counts are initialised radix tree test suite: cache recently freed objects radix tree test suite: add some more functionality idr: reduce the number of bits per level from 8 to 6 rxrpc: abstract away knowledge of IDR internals tpm: use idr_find(), not idr_find_slowpath() idr: add ida_is_empty radix tree test suite: check multiorder iteration radix-tree: fix replacement for multiorder entries radix-tree: add radix_tree_split_preload() radix-tree: add radix_tree_split radix-tree: add radix_tree_join radix-tree: delete radix_tree_range_tag_if_tagged() radix-tree: delete radix_tree_locate_item() radix-tree: improve multiorder iterators btrfs: fix race in btrfs_free_dummy_fs_info() radix-tree: improve dump output radix-tree: make radix_tree_find_next_bit more useful ...
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h46
1 files changed, 17 insertions, 29 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0b5b2e4df14e..4424784ac374 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -292,36 +292,23 @@ extern pgprot_t protection_map[16];
292 * pgoff should be used in favour of virtual_address, if possible. 292 * pgoff should be used in favour of virtual_address, if possible.
293 */ 293 */
294struct vm_fault { 294struct vm_fault {
295 struct vm_area_struct *vma; /* Target VMA */
295 unsigned int flags; /* FAULT_FLAG_xxx flags */ 296 unsigned int flags; /* FAULT_FLAG_xxx flags */
296 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 297 gfp_t gfp_mask; /* gfp mask to be used for allocations */
297 pgoff_t pgoff; /* Logical page offset based on vma */ 298 pgoff_t pgoff; /* Logical page offset based on vma */
298 void __user *virtual_address; /* Faulting virtual address */ 299 unsigned long address; /* Faulting virtual address */
300 pmd_t *pmd; /* Pointer to pmd entry matching
301 * the 'address' */
302 pte_t orig_pte; /* Value of PTE at the time of fault */
299 303
300 struct page *cow_page; /* Handler may choose to COW */ 304 struct page *cow_page; /* Page handler may use for COW fault */
305 struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
301 struct page *page; /* ->fault handlers should return a 306 struct page *page; /* ->fault handlers should return a
302 * page here, unless VM_FAULT_NOPAGE 307 * page here, unless VM_FAULT_NOPAGE
303 * is set (which is also implied by 308 * is set (which is also implied by
304 * VM_FAULT_ERROR). 309 * VM_FAULT_ERROR).
305 */ 310 */
306 void *entry; /* ->fault handler can alternatively 311 /* These three entries are valid only while holding ptl lock */
307 * return locked DAX entry. In that
308 * case handler should return
309 * VM_FAULT_DAX_LOCKED and fill in
310 * entry here.
311 */
312};
313
314/*
315 * Page fault context: passes though page fault handler instead of endless list
316 * of function arguments.
317 */
318struct fault_env {
319 struct vm_area_struct *vma; /* Target VMA */
320 unsigned long address; /* Faulting virtual address */
321 unsigned int flags; /* FAULT_FLAG_xxx flags */
322 pmd_t *pmd; /* Pointer to pmd entry matching
323 * the 'address'
324 */
325 pte_t *pte; /* Pointer to pte entry matching 312 pte_t *pte; /* Pointer to pte entry matching
326 * the 'address'. NULL if the page 313 * the 'address'. NULL if the page
327 * table hasn't been allocated. 314 * table hasn't been allocated.
@@ -351,7 +338,7 @@ struct vm_operations_struct {
351 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 338 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
352 int (*pmd_fault)(struct vm_area_struct *, unsigned long address, 339 int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
353 pmd_t *, unsigned int flags); 340 pmd_t *, unsigned int flags);
354 void (*map_pages)(struct fault_env *fe, 341 void (*map_pages)(struct vm_fault *vmf,
355 pgoff_t start_pgoff, pgoff_t end_pgoff); 342 pgoff_t start_pgoff, pgoff_t end_pgoff);
356 343
357 /* notification that a previously read-only page is about to become 344 /* notification that a previously read-only page is about to become
@@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
625 return pte; 612 return pte;
626} 613}
627 614
628int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, 615int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
629 struct page *page); 616 struct page *page);
617int finish_fault(struct vm_fault *vmf);
618int finish_mkwrite_fault(struct vm_fault *vmf);
630#endif 619#endif
631 620
632/* 621/*
@@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
1110#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1099#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
1111#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1100#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
1112#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1101#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
1113#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ 1102#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */
1114 1103
1115#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1104#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1116 1105
@@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1221 struct vm_area_struct *vma); 1210 struct vm_area_struct *vma);
1222void unmap_mapping_range(struct address_space *mapping, 1211void unmap_mapping_range(struct address_space *mapping,
1223 loff_t const holebegin, loff_t const holelen, int even_cows); 1212 loff_t const holebegin, loff_t const holelen, int even_cows);
1213int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
1214 spinlock_t **ptlp);
1224int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1215int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1225 unsigned long *pfn); 1216 unsigned long *pfn);
1226int follow_phys(struct vm_area_struct *vma, unsigned long address, 1217int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1276,15 +1267,12 @@ extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1276long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1267long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1277 unsigned long start, unsigned long nr_pages, 1268 unsigned long start, unsigned long nr_pages,
1278 unsigned int gup_flags, struct page **pages, 1269 unsigned int gup_flags, struct page **pages,
1279 struct vm_area_struct **vmas); 1270 struct vm_area_struct **vmas, int *locked);
1280long get_user_pages(unsigned long start, unsigned long nr_pages, 1271long get_user_pages(unsigned long start, unsigned long nr_pages,
1281 unsigned int gup_flags, struct page **pages, 1272 unsigned int gup_flags, struct page **pages,
1282 struct vm_area_struct **vmas); 1273 struct vm_area_struct **vmas);
1283long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1274long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1284 unsigned int gup_flags, struct page **pages, int *locked); 1275 unsigned int gup_flags, struct page **pages, int *locked);
1285long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1286 unsigned long start, unsigned long nr_pages,
1287 struct page **pages, unsigned int gup_flags);
1288long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1276long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1289 struct page **pages, unsigned int gup_flags); 1277 struct page **pages, unsigned int gup_flags);
1290int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1278int get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -2099,7 +2087,7 @@ extern void truncate_inode_pages_final(struct address_space *);
2099 2087
2100/* generic vm_area_ops exported for stackable file systems */ 2088/* generic vm_area_ops exported for stackable file systems */
2101extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 2089extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2102extern void filemap_map_pages(struct fault_env *fe, 2090extern void filemap_map_pages(struct vm_fault *vmf,
2103 pgoff_t start_pgoff, pgoff_t end_pgoff); 2091 pgoff_t start_pgoff, pgoff_t end_pgoff);
2104extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 2092extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2105 2093