summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-05-19 20:14:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit4e6118016eb7986109ad61b00186579f384f956a (patch)
treef3c06e8c7d5c726bacb8dafb5fd959484850bc99 /mm/page_alloc.c
parente2769dbdc51f1baa1908ecf6c84d50f19577e1db (diff)
mm, page_alloc: uninline the bad page part of check_new_page()
Bad pages should be rare so the code handling them doesn't need to be inline for performance reasons. Put it to separate function which returns void. This also assumes that the initial page_expected_state() result will match the result of the thorough check, i.e. the page doesn't become "good" in the meanwhile. This matches the same expectations already in place in free_pages_check(). !DEBUG_VM bloat-o-meter: add/remove: 1/0 grow/shrink: 0/1 up/down: 134/-274 (-140) function old new delta check_new_page_bad - 134 +134 get_page_from_freelist 3468 3194 -274 Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d8f642c498d..ecf663358b0d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1647,19 +1647,11 @@ static inline void expand(struct zone *zone, struct page *page,
1647 } 1647 }
1648} 1648}
1649 1649
1650/* 1650static void check_new_page_bad(struct page *page)
1651 * This page is about to be returned from the page allocator
1652 */
1653static inline int check_new_page(struct page *page)
1654{ 1651{
1655 const char *bad_reason; 1652 const char *bad_reason = NULL;
1656 unsigned long bad_flags; 1653 unsigned long bad_flags = 0;
1657 1654
1658 if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))
1659 return 0;
1660
1661 bad_reason = NULL;
1662 bad_flags = 0;
1663 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1655 if (unlikely(atomic_read(&page->_mapcount) != -1))
1664 bad_reason = "nonzero mapcount"; 1656 bad_reason = "nonzero mapcount";
1665 if (unlikely(page->mapping != NULL)) 1657 if (unlikely(page->mapping != NULL))
@@ -1678,11 +1670,20 @@ static inline int check_new_page(struct page *page)
1678 if (unlikely(page->mem_cgroup)) 1670 if (unlikely(page->mem_cgroup))
1679 bad_reason = "page still charged to cgroup"; 1671 bad_reason = "page still charged to cgroup";
1680#endif 1672#endif
1681 if (unlikely(bad_reason)) { 1673 bad_page(page, bad_reason, bad_flags);
1682 bad_page(page, bad_reason, bad_flags); 1674}
1683 return 1; 1675
1684 } 1676/*
1685 return 0; 1677 * This page is about to be returned from the page allocator
1678 */
1679static inline int check_new_page(struct page *page)
1680{
1681 if (likely(page_expected_state(page,
1682 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1683 return 0;
1684
1685 check_new_page_bad(page);
1686 return 1;
1686} 1687}
1687 1688
1688static inline bool free_pages_prezeroed(bool poisoned) 1689static inline bool free_pages_prezeroed(bool poisoned)