summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-05-19 20:14:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit7bfec6f47bb0ffd207c7e813e819235e6c1c0f34 (patch)
treee82d1efeb3abd3f0ddf732367df75c9c0ffdb5a3 /mm/page_alloc.c
parent93ea9964d14ad583492ffb9ab7543f015876aaf2 (diff)
mm, page_alloc: check multiple page fields with a single branch
Every page allocated or freed is checked for sanity to avoid corruptions that are difficult to detect later. A bad page could be due to a number of fields. Instead of using multiple branches, this patch combines multiple fields into a single branch. A detailed check is only necessary if that check fails. 4.6.0-rc2 4.6.0-rc2 initonce-v1r20 multcheck-v1r20 Min alloc-odr0-1 359.00 ( 0.00%) 348.00 ( 3.06%) Min alloc-odr0-2 260.00 ( 0.00%) 254.00 ( 2.31%) Min alloc-odr0-4 214.00 ( 0.00%) 213.00 ( 0.47%) Min alloc-odr0-8 186.00 ( 0.00%) 186.00 ( 0.00%) Min alloc-odr0-16 173.00 ( 0.00%) 173.00 ( 0.00%) Min alloc-odr0-32 165.00 ( 0.00%) 166.00 ( -0.61%) Min alloc-odr0-64 162.00 ( 0.00%) 162.00 ( 0.00%) Min alloc-odr0-128 161.00 ( 0.00%) 160.00 ( 0.62%) Min alloc-odr0-256 170.00 ( 0.00%) 169.00 ( 0.59%) Min alloc-odr0-512 181.00 ( 0.00%) 180.00 ( 0.55%) Min alloc-odr0-1024 190.00 ( 0.00%) 188.00 ( 1.05%) Min alloc-odr0-2048 196.00 ( 0.00%) 194.00 ( 1.02%) Min alloc-odr0-4096 202.00 ( 0.00%) 199.00 ( 1.49%) Min alloc-odr0-8192 205.00 ( 0.00%) 202.00 ( 1.46%) Min alloc-odr0-16384 205.00 ( 0.00%) 203.00 ( 0.98%) Again, the benefit is marginal but avoiding excessive branches is important. Ideally the paths would not have to check these conditions at all but regrettably abandoning the tests would make use-after-free bugs much harder to detect. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c55
1 files changed, 43 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9da66e792e17..76a394812776 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -784,10 +784,42 @@ out:
784 zone->free_area[order].nr_free++; 784 zone->free_area[order].nr_free++;
785} 785}
786 786
787/*
788 * A bad page could be due to a number of fields. Instead of multiple branches,
789 * try and check multiple fields with one check. The caller must do a detailed
790 * check if necessary.
791 */
792static inline bool page_expected_state(struct page *page,
793 unsigned long check_flags)
794{
795 if (unlikely(atomic_read(&page->_mapcount) != -1))
796 return false;
797
798 if (unlikely((unsigned long)page->mapping |
799 page_ref_count(page) |
800#ifdef CONFIG_MEMCG
801 (unsigned long)page->mem_cgroup |
802#endif
803 (page->flags & check_flags)))
804 return false;
805
806 return true;
807}
808
787static inline int free_pages_check(struct page *page) 809static inline int free_pages_check(struct page *page)
788{ 810{
789 const char *bad_reason = NULL; 811 const char *bad_reason;
790 unsigned long bad_flags = 0; 812 unsigned long bad_flags;
813
814 if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)) {
815 page_cpupid_reset_last(page);
816 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
817 return 0;
818 }
819
820 /* Something has gone sideways, find it */
821 bad_reason = NULL;
822 bad_flags = 0;
791 823
792 if (unlikely(atomic_read(&page->_mapcount) != -1)) 824 if (unlikely(atomic_read(&page->_mapcount) != -1))
793 bad_reason = "nonzero mapcount"; 825 bad_reason = "nonzero mapcount";
@@ -803,14 +835,8 @@ static inline int free_pages_check(struct page *page)
803 if (unlikely(page->mem_cgroup)) 835 if (unlikely(page->mem_cgroup))
804 bad_reason = "page still charged to cgroup"; 836 bad_reason = "page still charged to cgroup";
805#endif 837#endif
806 if (unlikely(bad_reason)) { 838 bad_page(page, bad_reason, bad_flags);
807 bad_page(page, bad_reason, bad_flags); 839 return 1;
808 return 1;
809 }
810 page_cpupid_reset_last(page);
811 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
812 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
813 return 0;
814} 840}
815 841
816/* 842/*
@@ -1492,9 +1518,14 @@ static inline void expand(struct zone *zone, struct page *page,
1492 */ 1518 */
1493static inline int check_new_page(struct page *page) 1519static inline int check_new_page(struct page *page)
1494{ 1520{
1495 const char *bad_reason = NULL; 1521 const char *bad_reason;
1496 unsigned long bad_flags = 0; 1522 unsigned long bad_flags;
1523
1524 if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))
1525 return 0;
1497 1526
1527 bad_reason = NULL;
1528 bad_flags = 0;
1498 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1529 if (unlikely(atomic_read(&page->_mapcount) != -1))
1499 bad_reason = "nonzero mapcount"; 1530 bad_reason = "nonzero mapcount";
1500 if (unlikely(page->mapping != NULL)) 1531 if (unlikely(page->mapping != NULL))