diff options
author | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 19:55:28 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 19:55:28 -0500 |
commit | 84abd88a70090cf00f9e45c3a81680874f17626e (patch) | |
tree | 4f58b80057f6e1f5817af1dc33a5458b3dfc9a99 /include/linux/mm.h | |
parent | 13ca0fcaa33f6b1984c4111b6ec5df42689fea6f (diff) | |
parent | e28cab42f384745c8a947a9ccd51e4aae52f5d51 (diff) |
Merge remote branch 'linus/master' into x86/bootmem
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 37 |
1 files changed, 31 insertions, 6 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index bad433fdbfce..8b2fa8593c61 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -622,13 +622,22 @@ void page_address_init(void); | |||
622 | /* | 622 | /* |
623 | * On an anonymous page mapped into a user virtual memory area, | 623 | * On an anonymous page mapped into a user virtual memory area, |
624 | * page->mapping points to its anon_vma, not to a struct address_space; | 624 | * page->mapping points to its anon_vma, not to a struct address_space; |
625 | * with the PAGE_MAPPING_ANON bit set to distinguish it. | 625 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. |
626 | * | ||
627 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | ||
628 | * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; | ||
629 | * and then page->mapping points, not to an anon_vma, but to a private | ||
630 | * structure which KSM associates with that merged page. See ksm.h. | ||
631 | * | ||
632 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. | ||
626 | * | 633 | * |
627 | * Please note that, confusingly, "page_mapping" refers to the inode | 634 | * Please note that, confusingly, "page_mapping" refers to the inode |
628 | * address_space which maps the page from disk; whereas "page_mapped" | 635 | * address_space which maps the page from disk; whereas "page_mapped" |
629 | * refers to user virtual address space into which the page is mapped. | 636 | * refers to user virtual address space into which the page is mapped. |
630 | */ | 637 | */ |
631 | #define PAGE_MAPPING_ANON 1 | 638 | #define PAGE_MAPPING_ANON 1 |
639 | #define PAGE_MAPPING_KSM 2 | ||
640 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) | ||
632 | 641 | ||
633 | extern struct address_space swapper_space; | 642 | extern struct address_space swapper_space; |
634 | static inline struct address_space *page_mapping(struct page *page) | 643 | static inline struct address_space *page_mapping(struct page *page) |
@@ -636,16 +645,19 @@ static inline struct address_space *page_mapping(struct page *page) | |||
636 | struct address_space *mapping = page->mapping; | 645 | struct address_space *mapping = page->mapping; |
637 | 646 | ||
638 | VM_BUG_ON(PageSlab(page)); | 647 | VM_BUG_ON(PageSlab(page)); |
639 | #ifdef CONFIG_SWAP | ||
640 | if (unlikely(PageSwapCache(page))) | 648 | if (unlikely(PageSwapCache(page))) |
641 | mapping = &swapper_space; | 649 | mapping = &swapper_space; |
642 | else | 650 | else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) |
643 | #endif | ||
644 | if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) | ||
645 | mapping = NULL; | 651 | mapping = NULL; |
646 | return mapping; | 652 | return mapping; |
647 | } | 653 | } |
648 | 654 | ||
655 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | ||
656 | static inline void *page_rmapping(struct page *page) | ||
657 | { | ||
658 | return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); | ||
659 | } | ||
660 | |||
649 | static inline int PageAnon(struct page *page) | 661 | static inline int PageAnon(struct page *page) |
650 | { | 662 | { |
651 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | 663 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
@@ -760,6 +772,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, | |||
760 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | 772 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |
761 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry | 773 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry |
762 | * @pte_hole: if set, called for each hole at all levels | 774 | * @pte_hole: if set, called for each hole at all levels |
775 | * @hugetlb_entry: if set, called for each hugetlb entry | ||
763 | * | 776 | * |
764 | * (see walk_page_range for more details) | 777 | * (see walk_page_range for more details) |
765 | */ | 778 | */ |
@@ -769,6 +782,8 @@ struct mm_walk { | |||
769 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); | 782 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); |
770 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); | 783 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); |
771 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); | 784 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); |
785 | int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, | ||
786 | struct mm_walk *); | ||
772 | struct mm_struct *mm; | 787 | struct mm_struct *mm; |
773 | void *private; | 788 | void *private; |
774 | }; | 789 | }; |
@@ -1024,6 +1039,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
1024 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1039 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
1025 | unsigned long end_pfn); | 1040 | unsigned long end_pfn); |
1026 | extern void remove_all_active_ranges(void); | 1041 | extern void remove_all_active_ranges(void); |
1042 | void sort_node_map(void); | ||
1043 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | ||
1044 | unsigned long end_pfn); | ||
1027 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1045 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
1028 | unsigned long end_pfn); | 1046 | unsigned long end_pfn); |
1029 | extern void get_pfn_range_for_nid(unsigned int nid, | 1047 | extern void get_pfn_range_for_nid(unsigned int nid, |
@@ -1073,6 +1091,7 @@ extern void zone_pcp_update(struct zone *zone); | |||
1073 | 1091 | ||
1074 | /* nommu.c */ | 1092 | /* nommu.c */ |
1075 | extern atomic_long_t mmap_pages_allocated; | 1093 | extern atomic_long_t mmap_pages_allocated; |
1094 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); | ||
1076 | 1095 | ||
1077 | /* prio_tree.c */ | 1096 | /* prio_tree.c */ |
1078 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1097 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
@@ -1318,11 +1337,17 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | |||
1318 | size_t size); | 1337 | size_t size); |
1319 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | 1338 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
1320 | 1339 | ||
1340 | enum mf_flags { | ||
1341 | MF_COUNT_INCREASED = 1 << 0, | ||
1342 | }; | ||
1321 | extern void memory_failure(unsigned long pfn, int trapno); | 1343 | extern void memory_failure(unsigned long pfn, int trapno); |
1322 | extern int __memory_failure(unsigned long pfn, int trapno, int ref); | 1344 | extern int __memory_failure(unsigned long pfn, int trapno, int flags); |
1345 | extern int unpoison_memory(unsigned long pfn); | ||
1323 | extern int sysctl_memory_failure_early_kill; | 1346 | extern int sysctl_memory_failure_early_kill; |
1324 | extern int sysctl_memory_failure_recovery; | 1347 | extern int sysctl_memory_failure_recovery; |
1348 | extern void shake_page(struct page *p, int access); | ||
1325 | extern atomic_long_t mce_bad_pages; | 1349 | extern atomic_long_t mce_bad_pages; |
1350 | extern int soft_offline_page(struct page *page, int flags); | ||
1326 | 1351 | ||
1327 | #endif /* __KERNEL__ */ | 1352 | #endif /* __KERNEL__ */ |
1328 | #endif /* _LINUX_MM_H */ | 1353 | #endif /* _LINUX_MM_H */ |