aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-04-15 19:13:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:17 -0400
commite8c6158fef15a1532bd5242a0cd88565eedabe61 (patch)
tree82f931e8dc18bd776ea32e745a7706574186ded8 /include/linux/mm.h
parent64d37a2baf5e5c0f1009c0ef290a9027de721d66 (diff)
mm: consolidate all page-flags helpers in <linux/page-flags.h>
Currently we take a naive approach to page flags on compound pages - we set the flag on the page without consideration if the flag makes sense for tail page or for compound page in general. This patchset try to sort this out by defining per-flag policy on what need to be done if page-flag helper operate on compound page. The last patch in the patchset also sanitizes usege of page->mapping for tail pages. We don't define the meaning of page->mapping for tail pages. Currently it's always NULL, which can be inconsistent with head page and potentially lead to problems. For now I caught one case of illegal usage of page flags or ->mapping: sound subsystem allocates pages with __GFP_COMP and maps them with PTEs. It leads to setting dirty bit on tail pages and access to tail_page's ->mapping. I don't see any bad behaviour caused by this, but worth fixing anyway. This patchset makes more sense if you take my THP refcounting into account: we will see more compound pages mapped with PTEs and we need to define behaviour of flags on compound pages to avoid bugs. This patch (of 16): We have page-flags helper function declarations/definitions spread over several header files. Let's consolidate them in <linux/page-flags.h>. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Jerome Marchand <jmarchan@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h81
1 files changed, 0 insertions, 81 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6571dd78e984..fb1fc38b01ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -494,15 +494,6 @@ static inline int page_count(struct page *page)
494 return atomic_read(&compound_head(page)->_count); 494 return atomic_read(&compound_head(page)->_count);
495} 495}
496 496
497#ifdef CONFIG_HUGETLB_PAGE
498extern int PageHeadHuge(struct page *page_head);
499#else /* CONFIG_HUGETLB_PAGE */
500static inline int PageHeadHuge(struct page *page_head)
501{
502 return 0;
503}
504#endif /* CONFIG_HUGETLB_PAGE */
505
506static inline bool __compound_tail_refcounted(struct page *page) 497static inline bool __compound_tail_refcounted(struct page *page)
507{ 498{
508 return !PageSlab(page) && !PageHeadHuge(page); 499 return !PageSlab(page) && !PageHeadHuge(page);
@@ -571,53 +562,6 @@ static inline void init_page_count(struct page *page)
571 atomic_set(&page->_count, 1); 562 atomic_set(&page->_count, 1);
572} 563}
573 564
574/*
575 * PageBuddy() indicate that the page is free and in the buddy system
576 * (see mm/page_alloc.c).
577 *
578 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
579 * -2 so that an underflow of the page_mapcount() won't be mistaken
580 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
581 * efficiently by most CPU architectures.
582 */
583#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
584
585static inline int PageBuddy(struct page *page)
586{
587 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
588}
589
590static inline void __SetPageBuddy(struct page *page)
591{
592 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
593 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
594}
595
596static inline void __ClearPageBuddy(struct page *page)
597{
598 VM_BUG_ON_PAGE(!PageBuddy(page), page);
599 atomic_set(&page->_mapcount, -1);
600}
601
602#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
603
604static inline int PageBalloon(struct page *page)
605{
606 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
607}
608
609static inline void __SetPageBalloon(struct page *page)
610{
611 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
612 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
613}
614
615static inline void __ClearPageBalloon(struct page *page)
616{
617 VM_BUG_ON_PAGE(!PageBalloon(page), page);
618 atomic_set(&page->_mapcount, -1);
619}
620
621void put_page(struct page *page); 565void put_page(struct page *page);
622void put_pages_list(struct list_head *pages); 566void put_pages_list(struct list_head *pages);
623 567
@@ -1006,26 +950,6 @@ void page_address_init(void);
1006#define page_address_init() do { } while(0) 950#define page_address_init() do { } while(0)
1007#endif 951#endif
1008 952
1009/*
1010 * On an anonymous page mapped into a user virtual memory area,
1011 * page->mapping points to its anon_vma, not to a struct address_space;
1012 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
1013 *
1014 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
1015 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
1016 * and then page->mapping points, not to an anon_vma, but to a private
1017 * structure which KSM associates with that merged page. See ksm.h.
1018 *
1019 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
1020 *
1021 * Please note that, confusingly, "page_mapping" refers to the inode
1022 * address_space which maps the page from disk; whereas "page_mapped"
1023 * refers to user virtual address space into which the page is mapped.
1024 */
1025#define PAGE_MAPPING_ANON 1
1026#define PAGE_MAPPING_KSM 2
1027#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1028
1029extern struct address_space *page_mapping(struct page *page); 953extern struct address_space *page_mapping(struct page *page);
1030 954
1031/* Neutral page->mapping pointer to address_space or anon_vma or other */ 955/* Neutral page->mapping pointer to address_space or anon_vma or other */
@@ -1045,11 +969,6 @@ struct address_space *page_file_mapping(struct page *page)
1045 return page->mapping; 969 return page->mapping;
1046} 970}
1047 971
1048static inline int PageAnon(struct page *page)
1049{
1050 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
1051}
1052
1053/* 972/*
1054 * Return the pagecache index of the passed page. Regular pagecache pages 973 * Return the pagecache index of the passed page. Regular pagecache pages
1055 * use ->index whereas swapcache pages use ->private 974 * use ->index whereas swapcache pages use ->private