diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 100 |
1 files changed, 9 insertions, 91 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 6571dd78e984..8b086070c3a5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -251,6 +251,9 @@ struct vm_operations_struct { | |||
251 | * writable, if an error is returned it will cause a SIGBUS */ | 251 | * writable, if an error is returned it will cause a SIGBUS */ |
252 | int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | 252 | int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); |
253 | 253 | ||
254 | /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ | ||
255 | int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
256 | |||
254 | /* called by access_process_vm when get_user_pages() fails, typically | 257 | /* called by access_process_vm when get_user_pages() fails, typically |
255 | * for use by special VMAs that can switch between memory and hardware | 258 | * for use by special VMAs that can switch between memory and hardware |
256 | */ | 259 | */ |
@@ -494,18 +497,9 @@ static inline int page_count(struct page *page) | |||
494 | return atomic_read(&compound_head(page)->_count); | 497 | return atomic_read(&compound_head(page)->_count); |
495 | } | 498 | } |
496 | 499 | ||
497 | #ifdef CONFIG_HUGETLB_PAGE | ||
498 | extern int PageHeadHuge(struct page *page_head); | ||
499 | #else /* CONFIG_HUGETLB_PAGE */ | ||
500 | static inline int PageHeadHuge(struct page *page_head) | ||
501 | { | ||
502 | return 0; | ||
503 | } | ||
504 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
505 | |||
506 | static inline bool __compound_tail_refcounted(struct page *page) | 500 | static inline bool __compound_tail_refcounted(struct page *page) |
507 | { | 501 | { |
508 | return !PageSlab(page) && !PageHeadHuge(page); | 502 | return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page); |
509 | } | 503 | } |
510 | 504 | ||
511 | /* | 505 | /* |
@@ -571,53 +565,6 @@ static inline void init_page_count(struct page *page) | |||
571 | atomic_set(&page->_count, 1); | 565 | atomic_set(&page->_count, 1); |
572 | } | 566 | } |
573 | 567 | ||
574 | /* | ||
575 | * PageBuddy() indicate that the page is free and in the buddy system | ||
576 | * (see mm/page_alloc.c). | ||
577 | * | ||
578 | * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to | ||
579 | * -2 so that an underflow of the page_mapcount() won't be mistaken | ||
580 | * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very | ||
581 | * efficiently by most CPU architectures. | ||
582 | */ | ||
583 | #define PAGE_BUDDY_MAPCOUNT_VALUE (-128) | ||
584 | |||
585 | static inline int PageBuddy(struct page *page) | ||
586 | { | ||
587 | return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; | ||
588 | } | ||
589 | |||
590 | static inline void __SetPageBuddy(struct page *page) | ||
591 | { | ||
592 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); | ||
593 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); | ||
594 | } | ||
595 | |||
596 | static inline void __ClearPageBuddy(struct page *page) | ||
597 | { | ||
598 | VM_BUG_ON_PAGE(!PageBuddy(page), page); | ||
599 | atomic_set(&page->_mapcount, -1); | ||
600 | } | ||
601 | |||
602 | #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) | ||
603 | |||
604 | static inline int PageBalloon(struct page *page) | ||
605 | { | ||
606 | return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; | ||
607 | } | ||
608 | |||
609 | static inline void __SetPageBalloon(struct page *page) | ||
610 | { | ||
611 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); | ||
612 | atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); | ||
613 | } | ||
614 | |||
615 | static inline void __ClearPageBalloon(struct page *page) | ||
616 | { | ||
617 | VM_BUG_ON_PAGE(!PageBalloon(page), page); | ||
618 | atomic_set(&page->_mapcount, -1); | ||
619 | } | ||
620 | |||
621 | void put_page(struct page *page); | 568 | void put_page(struct page *page); |
622 | void put_pages_list(struct list_head *pages); | 569 | void put_pages_list(struct list_head *pages); |
623 | 570 | ||
@@ -1006,34 +953,10 @@ void page_address_init(void); | |||
1006 | #define page_address_init() do { } while(0) | 953 | #define page_address_init() do { } while(0) |
1007 | #endif | 954 | #endif |
1008 | 955 | ||
1009 | /* | 956 | extern void *page_rmapping(struct page *page); |
1010 | * On an anonymous page mapped into a user virtual memory area, | 957 | extern struct anon_vma *page_anon_vma(struct page *page); |
1011 | * page->mapping points to its anon_vma, not to a struct address_space; | ||
1012 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. | ||
1013 | * | ||
1014 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | ||
1015 | * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; | ||
1016 | * and then page->mapping points, not to an anon_vma, but to a private | ||
1017 | * structure which KSM associates with that merged page. See ksm.h. | ||
1018 | * | ||
1019 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. | ||
1020 | * | ||
1021 | * Please note that, confusingly, "page_mapping" refers to the inode | ||
1022 | * address_space which maps the page from disk; whereas "page_mapped" | ||
1023 | * refers to user virtual address space into which the page is mapped. | ||
1024 | */ | ||
1025 | #define PAGE_MAPPING_ANON 1 | ||
1026 | #define PAGE_MAPPING_KSM 2 | ||
1027 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) | ||
1028 | |||
1029 | extern struct address_space *page_mapping(struct page *page); | 958 | extern struct address_space *page_mapping(struct page *page); |
1030 | 959 | ||
1031 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | ||
1032 | static inline void *page_rmapping(struct page *page) | ||
1033 | { | ||
1034 | return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); | ||
1035 | } | ||
1036 | |||
1037 | extern struct address_space *__page_file_mapping(struct page *); | 960 | extern struct address_space *__page_file_mapping(struct page *); |
1038 | 961 | ||
1039 | static inline | 962 | static inline |
@@ -1045,11 +968,6 @@ struct address_space *page_file_mapping(struct page *page) | |||
1045 | return page->mapping; | 968 | return page->mapping; |
1046 | } | 969 | } |
1047 | 970 | ||
1048 | static inline int PageAnon(struct page *page) | ||
1049 | { | ||
1050 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | ||
1051 | } | ||
1052 | |||
1053 | /* | 971 | /* |
1054 | * Return the pagecache index of the passed page. Regular pagecache pages | 972 | * Return the pagecache index of the passed page. Regular pagecache pages |
1055 | * use ->index whereas swapcache pages use ->private | 973 | * use ->index whereas swapcache pages use ->private |
@@ -1975,10 +1893,10 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); | |||
1975 | static inline unsigned long | 1893 | static inline unsigned long |
1976 | vm_unmapped_area(struct vm_unmapped_area_info *info) | 1894 | vm_unmapped_area(struct vm_unmapped_area_info *info) |
1977 | { | 1895 | { |
1978 | if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN)) | 1896 | if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) |
1979 | return unmapped_area(info); | ||
1980 | else | ||
1981 | return unmapped_area_topdown(info); | 1897 | return unmapped_area_topdown(info); |
1898 | else | ||
1899 | return unmapped_area(info); | ||
1982 | } | 1900 | } |
1983 | 1901 | ||
1984 | /* truncate.c */ | 1902 | /* truncate.c */ |