diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 167 |
1 files changed, 156 insertions, 11 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 24c395694f4d..462acaf36f3a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/prio_tree.h> | 12 | #include <linux/prio_tree.h> |
13 | #include <linux/debug_locks.h> | 13 | #include <linux/debug_locks.h> |
14 | #include <linux/mm_types.h> | 14 | #include <linux/mm_types.h> |
15 | #include <linux/range.h> | ||
15 | 16 | ||
16 | struct mempolicy; | 17 | struct mempolicy; |
17 | struct anon_vma; | 18 | struct anon_vma; |
@@ -265,6 +266,8 @@ static inline int get_page_unless_zero(struct page *page) | |||
265 | return atomic_inc_not_zero(&page->_count); | 266 | return atomic_inc_not_zero(&page->_count); |
266 | } | 267 | } |
267 | 268 | ||
269 | extern int page_is_ram(unsigned long pfn); | ||
270 | |||
268 | /* Support for virtually mapped pages */ | 271 | /* Support for virtually mapped pages */ |
269 | struct page *vmalloc_to_page(const void *addr); | 272 | struct page *vmalloc_to_page(const void *addr); |
270 | unsigned long vmalloc_to_pfn(const void *addr); | 273 | unsigned long vmalloc_to_pfn(const void *addr); |
@@ -620,13 +623,22 @@ void page_address_init(void); | |||
620 | /* | 623 | /* |
621 | * On an anonymous page mapped into a user virtual memory area, | 624 | * On an anonymous page mapped into a user virtual memory area, |
622 | * page->mapping points to its anon_vma, not to a struct address_space; | 625 | * page->mapping points to its anon_vma, not to a struct address_space; |
623 | * with the PAGE_MAPPING_ANON bit set to distinguish it. | 626 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. |
627 | * | ||
628 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | ||
629 | * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; | ||
630 | * and then page->mapping points, not to an anon_vma, but to a private | ||
631 | * structure which KSM associates with that merged page. See ksm.h. | ||
632 | * | ||
633 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. | ||
624 | * | 634 | * |
625 | * Please note that, confusingly, "page_mapping" refers to the inode | 635 | * Please note that, confusingly, "page_mapping" refers to the inode |
626 | * address_space which maps the page from disk; whereas "page_mapped" | 636 | * address_space which maps the page from disk; whereas "page_mapped" |
627 | * refers to user virtual address space into which the page is mapped. | 637 | * refers to user virtual address space into which the page is mapped. |
628 | */ | 638 | */ |
629 | #define PAGE_MAPPING_ANON 1 | 639 | #define PAGE_MAPPING_ANON 1 |
640 | #define PAGE_MAPPING_KSM 2 | ||
641 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) | ||
630 | 642 | ||
631 | extern struct address_space swapper_space; | 643 | extern struct address_space swapper_space; |
632 | static inline struct address_space *page_mapping(struct page *page) | 644 | static inline struct address_space *page_mapping(struct page *page) |
@@ -634,16 +646,19 @@ static inline struct address_space *page_mapping(struct page *page) | |||
634 | struct address_space *mapping = page->mapping; | 646 | struct address_space *mapping = page->mapping; |
635 | 647 | ||
636 | VM_BUG_ON(PageSlab(page)); | 648 | VM_BUG_ON(PageSlab(page)); |
637 | #ifdef CONFIG_SWAP | ||
638 | if (unlikely(PageSwapCache(page))) | 649 | if (unlikely(PageSwapCache(page))) |
639 | mapping = &swapper_space; | 650 | mapping = &swapper_space; |
640 | else | 651 | else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) |
641 | #endif | ||
642 | if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) | ||
643 | mapping = NULL; | 652 | mapping = NULL; |
644 | return mapping; | 653 | return mapping; |
645 | } | 654 | } |
646 | 655 | ||
656 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | ||
657 | static inline void *page_rmapping(struct page *page) | ||
658 | { | ||
659 | return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); | ||
660 | } | ||
661 | |||
647 | static inline int PageAnon(struct page *page) | 662 | static inline int PageAnon(struct page *page) |
648 | { | 663 | { |
649 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | 664 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
@@ -758,6 +773,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, | |||
758 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | 773 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |
759 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry | 774 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry |
760 | * @pte_hole: if set, called for each hole at all levels | 775 | * @pte_hole: if set, called for each hole at all levels |
776 | * @hugetlb_entry: if set, called for each hugetlb entry | ||
761 | * | 777 | * |
762 | * (see walk_page_range for more details) | 778 | * (see walk_page_range for more details) |
763 | */ | 779 | */ |
@@ -767,6 +783,8 @@ struct mm_walk { | |||
767 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); | 783 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); |
768 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); | 784 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); |
769 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); | 785 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); |
786 | int (*hugetlb_entry)(pte_t *, unsigned long, | ||
787 | unsigned long, unsigned long, struct mm_walk *); | ||
770 | struct mm_struct *mm; | 788 | struct mm_struct *mm; |
771 | void *private; | 789 | void *private; |
772 | }; | 790 | }; |
@@ -852,6 +870,114 @@ extern int mprotect_fixup(struct vm_area_struct *vma, | |||
852 | */ | 870 | */ |
853 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | 871 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
854 | struct page **pages); | 872 | struct page **pages); |
873 | /* | ||
874 | * per-process(per-mm_struct) statistics. | ||
875 | */ | ||
876 | #if defined(SPLIT_RSS_COUNTING) | ||
877 | /* | ||
878 | * The mm counters are not protected by its page_table_lock, | ||
879 | * so must be incremented atomically. | ||
880 | */ | ||
881 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
882 | { | ||
883 | atomic_long_set(&mm->rss_stat.count[member], value); | ||
884 | } | ||
885 | |||
886 | unsigned long get_mm_counter(struct mm_struct *mm, int member); | ||
887 | |||
888 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
889 | { | ||
890 | atomic_long_add(value, &mm->rss_stat.count[member]); | ||
891 | } | ||
892 | |||
893 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
894 | { | ||
895 | atomic_long_inc(&mm->rss_stat.count[member]); | ||
896 | } | ||
897 | |||
898 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
899 | { | ||
900 | atomic_long_dec(&mm->rss_stat.count[member]); | ||
901 | } | ||
902 | |||
903 | #else /* !USE_SPLIT_PTLOCKS */ | ||
904 | /* | ||
905 | * The mm counters are protected by its page_table_lock, | ||
906 | * so can be incremented directly. | ||
907 | */ | ||
908 | static inline void set_mm_counter(struct mm_struct *mm, int member, long value) | ||
909 | { | ||
910 | mm->rss_stat.count[member] = value; | ||
911 | } | ||
912 | |||
913 | static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) | ||
914 | { | ||
915 | return mm->rss_stat.count[member]; | ||
916 | } | ||
917 | |||
918 | static inline void add_mm_counter(struct mm_struct *mm, int member, long value) | ||
919 | { | ||
920 | mm->rss_stat.count[member] += value; | ||
921 | } | ||
922 | |||
923 | static inline void inc_mm_counter(struct mm_struct *mm, int member) | ||
924 | { | ||
925 | mm->rss_stat.count[member]++; | ||
926 | } | ||
927 | |||
928 | static inline void dec_mm_counter(struct mm_struct *mm, int member) | ||
929 | { | ||
930 | mm->rss_stat.count[member]--; | ||
931 | } | ||
932 | |||
933 | #endif /* !USE_SPLIT_PTLOCKS */ | ||
934 | |||
935 | static inline unsigned long get_mm_rss(struct mm_struct *mm) | ||
936 | { | ||
937 | return get_mm_counter(mm, MM_FILEPAGES) + | ||
938 | get_mm_counter(mm, MM_ANONPAGES); | ||
939 | } | ||
940 | |||
941 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) | ||
942 | { | ||
943 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
944 | } | ||
945 | |||
946 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
947 | { | ||
948 | return max(mm->hiwater_vm, mm->total_vm); | ||
949 | } | ||
950 | |||
951 | static inline void update_hiwater_rss(struct mm_struct *mm) | ||
952 | { | ||
953 | unsigned long _rss = get_mm_rss(mm); | ||
954 | |||
955 | if ((mm)->hiwater_rss < _rss) | ||
956 | (mm)->hiwater_rss = _rss; | ||
957 | } | ||
958 | |||
959 | static inline void update_hiwater_vm(struct mm_struct *mm) | ||
960 | { | ||
961 | if (mm->hiwater_vm < mm->total_vm) | ||
962 | mm->hiwater_vm = mm->total_vm; | ||
963 | } | ||
964 | |||
965 | static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, | ||
966 | struct mm_struct *mm) | ||
967 | { | ||
968 | unsigned long hiwater_rss = get_mm_hiwater_rss(mm); | ||
969 | |||
970 | if (*maxrss < hiwater_rss) | ||
971 | *maxrss = hiwater_rss; | ||
972 | } | ||
973 | |||
974 | #if defined(SPLIT_RSS_COUNTING) | ||
975 | void sync_mm_rss(struct task_struct *task, struct mm_struct *mm); | ||
976 | #else | ||
977 | static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) | ||
978 | { | ||
979 | } | ||
980 | #endif | ||
855 | 981 | ||
856 | /* | 982 | /* |
857 | * A callback you can register to apply pressure to ageable caches. | 983 | * A callback you can register to apply pressure to ageable caches. |
@@ -1022,6 +1148,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
1022 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1148 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
1023 | unsigned long end_pfn); | 1149 | unsigned long end_pfn); |
1024 | extern void remove_all_active_ranges(void); | 1150 | extern void remove_all_active_ranges(void); |
1151 | void sort_node_map(void); | ||
1152 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | ||
1153 | unsigned long end_pfn); | ||
1025 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1154 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
1026 | unsigned long end_pfn); | 1155 | unsigned long end_pfn); |
1027 | extern void get_pfn_range_for_nid(unsigned int nid, | 1156 | extern void get_pfn_range_for_nid(unsigned int nid, |
@@ -1029,6 +1158,10 @@ extern void get_pfn_range_for_nid(unsigned int nid, | |||
1029 | extern unsigned long find_min_pfn_with_active_regions(void); | 1158 | extern unsigned long find_min_pfn_with_active_regions(void); |
1030 | extern void free_bootmem_with_active_regions(int nid, | 1159 | extern void free_bootmem_with_active_regions(int nid, |
1031 | unsigned long max_low_pfn); | 1160 | unsigned long max_low_pfn); |
1161 | int add_from_early_node_map(struct range *range, int az, | ||
1162 | int nr_range, int nid); | ||
1163 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | ||
1164 | u64 goal, u64 limit); | ||
1032 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1165 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
1033 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | 1166 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); |
1034 | extern void sparse_memory_present_with_active_regions(int nid); | 1167 | extern void sparse_memory_present_with_active_regions(int nid); |
@@ -1061,16 +1194,13 @@ extern void si_meminfo(struct sysinfo * val); | |||
1061 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1194 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
1062 | extern int after_bootmem; | 1195 | extern int after_bootmem; |
1063 | 1196 | ||
1064 | #ifdef CONFIG_NUMA | ||
1065 | extern void setup_per_cpu_pageset(void); | 1197 | extern void setup_per_cpu_pageset(void); |
1066 | #else | ||
1067 | static inline void setup_per_cpu_pageset(void) {} | ||
1068 | #endif | ||
1069 | 1198 | ||
1070 | extern void zone_pcp_update(struct zone *zone); | 1199 | extern void zone_pcp_update(struct zone *zone); |
1071 | 1200 | ||
1072 | /* nommu.c */ | 1201 | /* nommu.c */ |
1073 | extern atomic_long_t mmap_pages_allocated; | 1202 | extern atomic_long_t mmap_pages_allocated; |
1203 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); | ||
1074 | 1204 | ||
1075 | /* prio_tree.c */ | 1205 | /* prio_tree.c */ |
1076 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1206 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
@@ -1092,7 +1222,7 @@ static inline void vma_nonlinear_insert(struct vm_area_struct *vma, | |||
1092 | 1222 | ||
1093 | /* mmap.c */ | 1223 | /* mmap.c */ |
1094 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); | 1224 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); |
1095 | extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, | 1225 | extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, |
1096 | unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); | 1226 | unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); |
1097 | extern struct vm_area_struct *vma_merge(struct mm_struct *, | 1227 | extern struct vm_area_struct *vma_merge(struct mm_struct *, |
1098 | struct vm_area_struct *prev, unsigned long addr, unsigned long end, | 1228 | struct vm_area_struct *prev, unsigned long addr, unsigned long end, |
@@ -1300,12 +1430,19 @@ extern int randomize_va_space; | |||
1300 | const char * arch_vma_name(struct vm_area_struct *vma); | 1430 | const char * arch_vma_name(struct vm_area_struct *vma); |
1301 | void print_vma_addr(char *prefix, unsigned long rip); | 1431 | void print_vma_addr(char *prefix, unsigned long rip); |
1302 | 1432 | ||
1433 | void sparse_mem_maps_populate_node(struct page **map_map, | ||
1434 | unsigned long pnum_begin, | ||
1435 | unsigned long pnum_end, | ||
1436 | unsigned long map_count, | ||
1437 | int nodeid); | ||
1438 | |||
1303 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); | 1439 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); |
1304 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 1440 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
1305 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); | 1441 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); |
1306 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | 1442 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); |
1307 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); | 1443 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); |
1308 | void *vmemmap_alloc_block(unsigned long size, int node); | 1444 | void *vmemmap_alloc_block(unsigned long size, int node); |
1445 | void *vmemmap_alloc_block_buf(unsigned long size, int node); | ||
1309 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); | 1446 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); |
1310 | int vmemmap_populate_basepages(struct page *start_page, | 1447 | int vmemmap_populate_basepages(struct page *start_page, |
1311 | unsigned long pages, int node); | 1448 | unsigned long pages, int node); |
@@ -1316,11 +1453,19 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | |||
1316 | size_t size); | 1453 | size_t size); |
1317 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | 1454 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
1318 | 1455 | ||
1456 | enum mf_flags { | ||
1457 | MF_COUNT_INCREASED = 1 << 0, | ||
1458 | }; | ||
1319 | extern void memory_failure(unsigned long pfn, int trapno); | 1459 | extern void memory_failure(unsigned long pfn, int trapno); |
1320 | extern int __memory_failure(unsigned long pfn, int trapno, int ref); | 1460 | extern int __memory_failure(unsigned long pfn, int trapno, int flags); |
1461 | extern int unpoison_memory(unsigned long pfn); | ||
1321 | extern int sysctl_memory_failure_early_kill; | 1462 | extern int sysctl_memory_failure_early_kill; |
1322 | extern int sysctl_memory_failure_recovery; | 1463 | extern int sysctl_memory_failure_recovery; |
1464 | extern void shake_page(struct page *p, int access); | ||
1323 | extern atomic_long_t mce_bad_pages; | 1465 | extern atomic_long_t mce_bad_pages; |
1466 | extern int soft_offline_page(struct page *page, int flags); | ||
1467 | |||
1468 | extern void dump_page(struct page *page); | ||
1324 | 1469 | ||
1325 | #endif /* __KERNEL__ */ | 1470 | #endif /* __KERNEL__ */ |
1326 | #endif /* _LINUX_MM_H */ | 1471 | #endif /* _LINUX_MM_H */ |