aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/page-flags.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/page-flags.h')
-rw-r--r--include/linux/page-flags.h103
1 files changed, 103 insertions, 0 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c851ff92d5b3..f34e040b34e9 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -289,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison)
289#define __PG_HWPOISON 0 289#define __PG_HWPOISON 0
290#endif 290#endif
291 291
292/*
293 * On an anonymous page mapped into a user virtual memory area,
294 * page->mapping points to its anon_vma, not to a struct address_space;
295 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
296 *
297 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
298 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
299 * and then page->mapping points, not to an anon_vma, but to a private
300 * structure which KSM associates with that merged page. See ksm.h.
301 *
302 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
303 *
304 * Please note that, confusingly, "page_mapping" refers to the inode
305 * address_space which maps the page from disk; whereas "page_mapped"
306 * refers to user virtual address space into which the page is mapped.
307 */
308#define PAGE_MAPPING_ANON 1
309#define PAGE_MAPPING_KSM 2
310#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
311
312static inline int PageAnon(struct page *page)
313{
314 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
315}
316
317#ifdef CONFIG_KSM
318/*
319 * A KSM page is one of those write-protected "shared pages" or "merged pages"
320 * which KSM maps into multiple mms, wherever identical anonymous page content
321 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
322 * anon_vma, but to that page's node of the stable tree.
323 */
324static inline int PageKsm(struct page *page)
325{
326 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
327 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
328}
329#else
330TESTPAGEFLAG_FALSE(Ksm)
331#endif
332
292u64 stable_page_flags(struct page *page); 333u64 stable_page_flags(struct page *page);
293 334
294static inline int PageUptodate(struct page *page) 335static inline int PageUptodate(struct page *page)
@@ -426,6 +467,21 @@ static inline void ClearPageCompound(struct page *page)
426 467
427#endif /* !PAGEFLAGS_EXTENDED */ 468#endif /* !PAGEFLAGS_EXTENDED */
428 469
470#ifdef CONFIG_HUGETLB_PAGE
471int PageHuge(struct page *page);
472int PageHeadHuge(struct page *page);
473bool page_huge_active(struct page *page);
474#else
475TESTPAGEFLAG_FALSE(Huge)
476TESTPAGEFLAG_FALSE(HeadHuge)
477
478static inline bool page_huge_active(struct page *page)
479{
480 return 0;
481}
482#endif
483
484
429#ifdef CONFIG_TRANSPARENT_HUGEPAGE 485#ifdef CONFIG_TRANSPARENT_HUGEPAGE
430/* 486/*
431 * PageHuge() only returns true for hugetlbfs pages, but not for 487 * PageHuge() only returns true for hugetlbfs pages, but not for
@@ -480,6 +536,53 @@ static inline int PageTransTail(struct page *page)
480#endif 536#endif
481 537
482/* 538/*
539 * PageBuddy() indicate that the page is free and in the buddy system
540 * (see mm/page_alloc.c).
541 *
542 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
543 * -2 so that an underflow of the page_mapcount() won't be mistaken
544 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
545 * efficiently by most CPU architectures.
546 */
547#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
548
549static inline int PageBuddy(struct page *page)
550{
551 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
552}
553
554static inline void __SetPageBuddy(struct page *page)
555{
556 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
557 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
558}
559
560static inline void __ClearPageBuddy(struct page *page)
561{
562 VM_BUG_ON_PAGE(!PageBuddy(page), page);
563 atomic_set(&page->_mapcount, -1);
564}
565
566#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
567
568static inline int PageBalloon(struct page *page)
569{
570 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
571}
572
573static inline void __SetPageBalloon(struct page *page)
574{
575 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
576 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
577}
578
579static inline void __ClearPageBalloon(struct page *page)
580{
581 VM_BUG_ON_PAGE(!PageBalloon(page), page);
582 atomic_set(&page->_mapcount, -1);
583}
584
585/*
483 * If network-based swap is enabled, sl*b must keep track of whether pages 586 * If network-based swap is enabled, sl*b must keep track of whether pages
484 * were allocated from pfmemalloc reserves. 587 * were allocated from pfmemalloc reserves.
485 */ 588 */