summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-05-19 20:14:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit4db7548ccbd9ec8e666f35df4a530f55904dec39 (patch)
tree73bed79bf4da794881cb5b9da99a017bdc2f3a99 /mm/page_alloc.c
parent002f290627c27068087f6204baec7a334e5a3b48 (diff)
mm, page_alloc: defer debugging checks of freed pages until a PCP drain
Every page free checks a number of page fields for validity. This catches premature frees and corruptions but it is also expensive. This patch weakens the debugging check by checking PCP pages at the time they are drained from the PCP list. This will trigger the bug but the site that freed the corrupt page will be lost. To get the full context, a kernel rebuild with DEBUG_VM is necessary. [akpm@linux-foundation.org: fix build] Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c152
1 files changed, 101 insertions, 51 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 39c441bb8d61..759d3f60bea0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -941,6 +941,103 @@ static inline int free_pages_check(struct page *page)
941 return 1; 941 return 1;
942} 942}
943 943
944static int free_tail_pages_check(struct page *head_page, struct page *page)
945{
946 int ret = 1;
947
948 /*
949 * We rely page->lru.next never has bit 0 set, unless the page
950 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
951 */
952 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
953
954 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
955 ret = 0;
956 goto out;
957 }
958 switch (page - head_page) {
959 case 1:
960 /* the first tail page: ->mapping is compound_mapcount() */
961 if (unlikely(compound_mapcount(page))) {
962 bad_page(page, "nonzero compound_mapcount", 0);
963 goto out;
964 }
965 break;
966 case 2:
967 /*
968 * the second tail page: ->mapping is
969 * page_deferred_list().next -- ignore value.
970 */
971 break;
972 default:
973 if (page->mapping != TAIL_MAPPING) {
974 bad_page(page, "corrupted mapping in tail page", 0);
975 goto out;
976 }
977 break;
978 }
979 if (unlikely(!PageTail(page))) {
980 bad_page(page, "PageTail not set", 0);
981 goto out;
982 }
983 if (unlikely(compound_head(page) != head_page)) {
984 bad_page(page, "compound_head not consistent", 0);
985 goto out;
986 }
987 ret = 0;
988out:
989 page->mapping = NULL;
990 clear_compound_head(page);
991 return ret;
992}
993
994static bool free_pages_prepare(struct page *page, unsigned int order);
995
996#ifdef CONFIG_DEBUG_VM
997static inline bool free_pcp_prepare(struct page *page)
998{
999 return free_pages_prepare(page, 0);
1000}
1001
1002static inline bool bulkfree_pcp_prepare(struct page *page)
1003{
1004 return false;
1005}
1006#else
1007static bool free_pcp_prepare(struct page *page)
1008{
1009 VM_BUG_ON_PAGE(PageTail(page), page);
1010
1011 trace_mm_page_free(page, 0);
1012 kmemcheck_free_shadow(page, 0);
1013 kasan_free_pages(page, 0);
1014
1015 if (PageAnonHead(page))
1016 page->mapping = NULL;
1017
1018 reset_page_owner(page, 0);
1019
1020 if (!PageHighMem(page)) {
1021 debug_check_no_locks_freed(page_address(page),
1022 PAGE_SIZE);
1023 debug_check_no_obj_freed(page_address(page),
1024 PAGE_SIZE);
1025 }
1026 arch_free_page(page, 0);
1027 kernel_poison_pages(page, 0, 0);
1028 kernel_map_pages(page, 0, 0);
1029
1030 page_cpupid_reset_last(page);
1031 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1032 return true;
1033}
1034
1035static bool bulkfree_pcp_prepare(struct page *page)
1036{
1037 return free_pages_check(page);
1038}
1039#endif /* CONFIG_DEBUG_VM */
1040
944/* 1041/*
945 * Frees a number of pages from the PCP lists 1042 * Frees a number of pages from the PCP lists
946 * Assumes all pages on list are in same zone, and of same order. 1043 * Assumes all pages on list are in same zone, and of same order.
@@ -1002,6 +1099,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1002 if (unlikely(isolated_pageblocks)) 1099 if (unlikely(isolated_pageblocks))
1003 mt = get_pageblock_migratetype(page); 1100 mt = get_pageblock_migratetype(page);
1004 1101
1102 if (bulkfree_pcp_prepare(page))
1103 continue;
1104
1005 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 1105 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1006 trace_mm_page_pcpu_drain(page, 0, mt); 1106 trace_mm_page_pcpu_drain(page, 0, mt);
1007 } while (--count && --batch_free && !list_empty(list)); 1107 } while (--count && --batch_free && !list_empty(list));
@@ -1028,56 +1128,6 @@ static void free_one_page(struct zone *zone,
1028 spin_unlock(&zone->lock); 1128 spin_unlock(&zone->lock);
1029} 1129}
1030 1130
1031static int free_tail_pages_check(struct page *head_page, struct page *page)
1032{
1033 int ret = 1;
1034
1035 /*
1036 * We rely page->lru.next never has bit 0 set, unless the page
1037 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1038 */
1039 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1040
1041 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1042 ret = 0;
1043 goto out;
1044 }
1045 switch (page - head_page) {
1046 case 1:
1047 /* the first tail page: ->mapping is compound_mapcount() */
1048 if (unlikely(compound_mapcount(page))) {
1049 bad_page(page, "nonzero compound_mapcount", 0);
1050 goto out;
1051 }
1052 break;
1053 case 2:
1054 /*
1055 * the second tail page: ->mapping is
1056 * page_deferred_list().next -- ignore value.
1057 */
1058 break;
1059 default:
1060 if (page->mapping != TAIL_MAPPING) {
1061 bad_page(page, "corrupted mapping in tail page", 0);
1062 goto out;
1063 }
1064 break;
1065 }
1066 if (unlikely(!PageTail(page))) {
1067 bad_page(page, "PageTail not set", 0);
1068 goto out;
1069 }
1070 if (unlikely(compound_head(page) != head_page)) {
1071 bad_page(page, "compound_head not consistent", 0);
1072 goto out;
1073 }
1074 ret = 0;
1075out:
1076 page->mapping = NULL;
1077 clear_compound_head(page);
1078 return ret;
1079}
1080
1081static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1131static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1082 unsigned long zone, int nid) 1132 unsigned long zone, int nid)
1083{ 1133{
@@ -2339,7 +2389,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2339 unsigned long pfn = page_to_pfn(page); 2389 unsigned long pfn = page_to_pfn(page);
2340 int migratetype; 2390 int migratetype;
2341 2391
2342 if (!free_pages_prepare(page, 0)) 2392 if (!free_pcp_prepare(page))
2343 return; 2393 return;
2344 2394
2345 migratetype = get_pfnblock_migratetype(page, pfn); 2395 migratetype = get_pfnblock_migratetype(page, pfn);