diff options
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 37 |
1 files changed, 13 insertions, 24 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 68a5121694ef..a61ebe8ad4ca 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -456,6 +456,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) | |||
| 456 | { | 456 | { |
| 457 | static const struct vm_operations_struct dummy_vm_ops = {}; | 457 | static const struct vm_operations_struct dummy_vm_ops = {}; |
| 458 | 458 | ||
| 459 | memset(vma, 0, sizeof(*vma)); | ||
| 459 | vma->vm_mm = mm; | 460 | vma->vm_mm = mm; |
| 460 | vma->vm_ops = &dummy_vm_ops; | 461 | vma->vm_ops = &dummy_vm_ops; |
| 461 | INIT_LIST_HEAD(&vma->anon_vma_chain); | 462 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
| @@ -727,10 +728,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
| 727 | return pte; | 728 | return pte; |
| 728 | } | 729 | } |
| 729 | 730 | ||
| 730 | int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, | 731 | vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, |
| 731 | struct page *page); | 732 | struct page *page); |
| 732 | int finish_fault(struct vm_fault *vmf); | 733 | vm_fault_t finish_fault(struct vm_fault *vmf); |
| 733 | int finish_mkwrite_fault(struct vm_fault *vmf); | 734 | vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); |
| 734 | #endif | 735 | #endif |
| 735 | 736 | ||
| 736 | /* | 737 | /* |
| @@ -959,15 +960,6 @@ static inline int page_zone_id(struct page *page) | |||
| 959 | return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; | 960 | return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; |
| 960 | } | 961 | } |
| 961 | 962 | ||
| 962 | static inline int zone_to_nid(struct zone *zone) | ||
| 963 | { | ||
| 964 | #ifdef CONFIG_NUMA | ||
| 965 | return zone->node; | ||
| 966 | #else | ||
| 967 | return 0; | ||
| 968 | #endif | ||
| 969 | } | ||
| 970 | |||
| 971 | #ifdef NODE_NOT_IN_PAGE_FLAGS | 963 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
| 972 | extern int page_to_nid(const struct page *page); | 964 | extern int page_to_nid(const struct page *page); |
| 973 | #else | 965 | #else |
| @@ -1411,8 +1403,8 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); | |||
| 1411 | int invalidate_inode_page(struct page *page); | 1403 | int invalidate_inode_page(struct page *page); |
| 1412 | 1404 | ||
| 1413 | #ifdef CONFIG_MMU | 1405 | #ifdef CONFIG_MMU |
| 1414 | extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | 1406 | extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, |
| 1415 | unsigned int flags); | 1407 | unsigned long address, unsigned int flags); |
| 1416 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | 1408 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, |
| 1417 | unsigned long address, unsigned int fault_flags, | 1409 | unsigned long address, unsigned int fault_flags, |
| 1418 | bool *unlocked); | 1410 | bool *unlocked); |
| @@ -1421,7 +1413,7 @@ void unmap_mapping_pages(struct address_space *mapping, | |||
| 1421 | void unmap_mapping_range(struct address_space *mapping, | 1413 | void unmap_mapping_range(struct address_space *mapping, |
| 1422 | loff_t const holebegin, loff_t const holelen, int even_cows); | 1414 | loff_t const holebegin, loff_t const holelen, int even_cows); |
| 1423 | #else | 1415 | #else |
| 1424 | static inline int handle_mm_fault(struct vm_area_struct *vma, | 1416 | static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, |
| 1425 | unsigned long address, unsigned int flags) | 1417 | unsigned long address, unsigned int flags) |
| 1426 | { | 1418 | { |
| 1427 | /* should never happen if there's no MMU */ | 1419 | /* should never happen if there's no MMU */ |
| @@ -2023,7 +2015,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) | |||
| 2023 | 2015 | ||
| 2024 | extern void __init pagecache_init(void); | 2016 | extern void __init pagecache_init(void); |
| 2025 | extern void free_area_init(unsigned long * zones_size); | 2017 | extern void free_area_init(unsigned long * zones_size); |
| 2026 | extern void free_area_init_node(int nid, unsigned long * zones_size, | 2018 | extern void __init free_area_init_node(int nid, unsigned long * zones_size, |
| 2027 | unsigned long zone_start_pfn, unsigned long *zholes_size); | 2019 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
| 2028 | extern void free_initmem(void); | 2020 | extern void free_initmem(void); |
| 2029 | 2021 | ||
| @@ -2571,7 +2563,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, | |||
| 2571 | #define FOLL_COW 0x4000 /* internal GUP flag */ | 2563 | #define FOLL_COW 0x4000 /* internal GUP flag */ |
| 2572 | #define FOLL_ANON 0x8000 /* don't do file mappings */ | 2564 | #define FOLL_ANON 0x8000 /* don't do file mappings */ |
| 2573 | 2565 | ||
| 2574 | static inline int vm_fault_to_errno(int vm_fault, int foll_flags) | 2566 | static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) |
| 2575 | { | 2567 | { |
| 2576 | if (vm_fault & VM_FAULT_OOM) | 2568 | if (vm_fault & VM_FAULT_OOM) |
| 2577 | return -ENOMEM; | 2569 | return -ENOMEM; |
| @@ -2665,12 +2657,7 @@ extern int randomize_va_space; | |||
| 2665 | const char * arch_vma_name(struct vm_area_struct *vma); | 2657 | const char * arch_vma_name(struct vm_area_struct *vma); |
| 2666 | void print_vma_addr(char *prefix, unsigned long rip); | 2658 | void print_vma_addr(char *prefix, unsigned long rip); |
| 2667 | 2659 | ||
| 2668 | void sparse_mem_maps_populate_node(struct page **map_map, | 2660 | void *sparse_buffer_alloc(unsigned long size); |
| 2669 | unsigned long pnum_begin, | ||
| 2670 | unsigned long pnum_end, | ||
| 2671 | unsigned long map_count, | ||
| 2672 | int nodeid); | ||
| 2673 | |||
| 2674 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, | 2661 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, |
| 2675 | struct vmem_altmap *altmap); | 2662 | struct vmem_altmap *altmap); |
| 2676 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 2663 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
| @@ -2744,6 +2731,7 @@ enum mf_action_page_type { | |||
| 2744 | MF_MSG_TRUNCATED_LRU, | 2731 | MF_MSG_TRUNCATED_LRU, |
| 2745 | MF_MSG_BUDDY, | 2732 | MF_MSG_BUDDY, |
| 2746 | MF_MSG_BUDDY_2ND, | 2733 | MF_MSG_BUDDY_2ND, |
| 2734 | MF_MSG_DAX, | ||
| 2747 | MF_MSG_UNKNOWN, | 2735 | MF_MSG_UNKNOWN, |
| 2748 | }; | 2736 | }; |
| 2749 | 2737 | ||
| @@ -2752,7 +2740,8 @@ extern void clear_huge_page(struct page *page, | |||
| 2752 | unsigned long addr_hint, | 2740 | unsigned long addr_hint, |
| 2753 | unsigned int pages_per_huge_page); | 2741 | unsigned int pages_per_huge_page); |
| 2754 | extern void copy_user_huge_page(struct page *dst, struct page *src, | 2742 | extern void copy_user_huge_page(struct page *dst, struct page *src, |
| 2755 | unsigned long addr, struct vm_area_struct *vma, | 2743 | unsigned long addr_hint, |
| 2744 | struct vm_area_struct *vma, | ||
| 2756 | unsigned int pages_per_huge_page); | 2745 | unsigned int pages_per_huge_page); |
| 2757 | extern long copy_huge_page_from_user(struct page *dst_page, | 2746 | extern long copy_huge_page_from_user(struct page *dst_page, |
| 2758 | const void __user *usr_src, | 2747 | const void __user *usr_src, |
