aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h37
1 files changed, 31 insertions, 6 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4d..60c467bfbabd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
620/* 620/*
621 * On an anonymous page mapped into a user virtual memory area, 621 * On an anonymous page mapped into a user virtual memory area,
622 * page->mapping points to its anon_vma, not to a struct address_space; 622 * page->mapping points to its anon_vma, not to a struct address_space;
623 * with the PAGE_MAPPING_ANON bit set to distinguish it. 623 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
624 *
625 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
626 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
627 * and then page->mapping points, not to an anon_vma, but to a private
628 * structure which KSM associates with that merged page. See ksm.h.
629 *
630 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
624 * 631 *
625 * Please note that, confusingly, "page_mapping" refers to the inode 632 * Please note that, confusingly, "page_mapping" refers to the inode
626 * address_space which maps the page from disk; whereas "page_mapped" 633 * address_space which maps the page from disk; whereas "page_mapped"
627 * refers to user virtual address space into which the page is mapped. 634 * refers to user virtual address space into which the page is mapped.
628 */ 635 */
629#define PAGE_MAPPING_ANON 1 636#define PAGE_MAPPING_ANON 1
637#define PAGE_MAPPING_KSM 2
638#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
630 639
631extern struct address_space swapper_space; 640extern struct address_space swapper_space;
632static inline struct address_space *page_mapping(struct page *page) 641static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
634 struct address_space *mapping = page->mapping; 643 struct address_space *mapping = page->mapping;
635 644
636 VM_BUG_ON(PageSlab(page)); 645 VM_BUG_ON(PageSlab(page));
637#ifdef CONFIG_SWAP
638 if (unlikely(PageSwapCache(page))) 646 if (unlikely(PageSwapCache(page)))
639 mapping = &swapper_space; 647 mapping = &swapper_space;
640 else 648 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
641#endif
642 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
643 mapping = NULL; 649 mapping = NULL;
644 return mapping; 650 return mapping;
645} 651}
646 652
653/* Neutral page->mapping pointer to address_space or anon_vma or other */
654static inline void *page_rmapping(struct page *page)
655{
656 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
657}
658
647static inline int PageAnon(struct page *page) 659static inline int PageAnon(struct page *page)
648{ 660{
649 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 661 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
758 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 770 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
759 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 771 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
760 * @pte_hole: if set, called for each hole at all levels 772 * @pte_hole: if set, called for each hole at all levels
773 * @hugetlb_entry: if set, called for each hugetlb entry
761 * 774 *
762 * (see walk_page_range for more details) 775 * (see walk_page_range for more details)
763 */ 776 */
@@ -767,6 +780,8 @@ struct mm_walk {
767 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); 780 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
768 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); 781 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
769 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); 782 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
783 int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
784 struct mm_walk *);
770 struct mm_struct *mm; 785 struct mm_struct *mm;
771 void *private; 786 void *private;
772}; 787};
@@ -1022,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1022extern void remove_active_range(unsigned int nid, unsigned long start_pfn, 1037extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1023 unsigned long end_pfn); 1038 unsigned long end_pfn);
1024extern void remove_all_active_ranges(void); 1039extern void remove_all_active_ranges(void);
1040void sort_node_map(void);
1041unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1042 unsigned long end_pfn);
1025extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1043extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1026 unsigned long end_pfn); 1044 unsigned long end_pfn);
1027extern void get_pfn_range_for_nid(unsigned int nid, 1045extern void get_pfn_range_for_nid(unsigned int nid,
@@ -1071,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
1071 1089
1072/* nommu.c */ 1090/* nommu.c */
1073extern atomic_long_t mmap_pages_allocated; 1091extern atomic_long_t mmap_pages_allocated;
1092extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1074 1093
1075/* prio_tree.c */ 1094/* prio_tree.c */
1076void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); 1095void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
@@ -1316,11 +1335,17 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1316 size_t size); 1335 size_t size);
1317extern void refund_locked_memory(struct mm_struct *mm, size_t size); 1336extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1318 1337
1338enum mf_flags {
1339 MF_COUNT_INCREASED = 1 << 0,
1340};
1319extern void memory_failure(unsigned long pfn, int trapno); 1341extern void memory_failure(unsigned long pfn, int trapno);
1320extern int __memory_failure(unsigned long pfn, int trapno, int ref); 1342extern int __memory_failure(unsigned long pfn, int trapno, int flags);
1343extern int unpoison_memory(unsigned long pfn);
1321extern int sysctl_memory_failure_early_kill; 1344extern int sysctl_memory_failure_early_kill;
1322extern int sysctl_memory_failure_recovery; 1345extern int sysctl_memory_failure_recovery;
1346extern void shake_page(struct page *p, int access);
1323extern atomic_long_t mce_bad_pages; 1347extern atomic_long_t mce_bad_pages;
1348extern int soft_offline_page(struct page *page, int flags);
1324 1349
1325#endif /* __KERNEL__ */ 1350#endif /* __KERNEL__ */
1326#endif /* _LINUX_MM_H */ 1351#endif /* _LINUX_MM_H */