summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-05-19 20:14:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commite2769dbdc51f1baa1908ecf6c84d50f19577e1db (patch)
treec46aadcd53eb71d073221cd5249bb41dab25e457 /mm/page_alloc.c
parent479f854a207ce2b97545a0a83856778b541063d0 (diff)
mm, page_alloc: don't duplicate code in free_pcp_prepare
The new free_pcp_prepare() function shares a lot of code with free_pages_prepare(), which makes this a maintenance risk when some future patch modifies only one of them. We should be able to achieve the same effect (skipping free_pages_check() from !DEBUG_VM configs) by adding a parameter to free_pages_prepare() and making it inline, so the checks (and the order != 0 parts) are eliminated from the call from free_pcp_prepare(). !DEBUG_VM: bloat-o-meter reports no difference, as my gcc was already inlining free_pages_prepare() and the elimination seems to work as expected DEBUG_VM bloat-o-meter: add/remove: 0/1 grow/shrink: 2/0 up/down: 1035/-778 (257) function old new delta __free_pages_ok 297 1060 +763 free_hot_cold_page 480 752 +272 free_pages_prepare 778 - -778 Here inlining didn't occur before, and added some code, but it's ok for a debug option. [akpm@linux-foundation.org: fix build] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c133
1 files changed, 55 insertions, 78 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 193ed34a2780..7d8f642c498d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -991,47 +991,77 @@ out:
991 return ret; 991 return ret;
992} 992}
993 993
994static bool free_pages_prepare(struct page *page, unsigned int order); 994static __always_inline bool free_pages_prepare(struct page *page,
995 995 unsigned int order, bool check_free)
996#ifdef CONFIG_DEBUG_VM
997static inline bool free_pcp_prepare(struct page *page)
998{ 996{
999 return free_pages_prepare(page, 0); 997 int bad = 0;
1000}
1001 998
1002static inline bool bulkfree_pcp_prepare(struct page *page)
1003{
1004 return false;
1005}
1006#else
1007static bool free_pcp_prepare(struct page *page)
1008{
1009 VM_BUG_ON_PAGE(PageTail(page), page); 999 VM_BUG_ON_PAGE(PageTail(page), page);
1010 1000
1011 trace_mm_page_free(page, 0); 1001 trace_mm_page_free(page, order);
1012 kmemcheck_free_shadow(page, 0); 1002 kmemcheck_free_shadow(page, order);
1013 kasan_free_pages(page, 0); 1003 kasan_free_pages(page, order);
1004
1005 /*
1006 * Check tail pages before head page information is cleared to
1007 * avoid checking PageCompound for order-0 pages.
1008 */
1009 if (unlikely(order)) {
1010 bool compound = PageCompound(page);
1011 int i;
1012
1013 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1014 1014
1015 for (i = 1; i < (1 << order); i++) {
1016 if (compound)
1017 bad += free_tail_pages_check(page, page + i);
1018 if (unlikely(free_pages_check(page + i))) {
1019 bad++;
1020 continue;
1021 }
1022 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1023 }
1024 }
1015 if (PageAnonHead(page)) 1025 if (PageAnonHead(page))
1016 page->mapping = NULL; 1026 page->mapping = NULL;
1027 if (check_free)
1028 bad += free_pages_check(page);
1029 if (bad)
1030 return false;
1017 1031
1018 reset_page_owner(page, 0); 1032 page_cpupid_reset_last(page);
1033 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1034 reset_page_owner(page, order);
1019 1035
1020 if (!PageHighMem(page)) { 1036 if (!PageHighMem(page)) {
1021 debug_check_no_locks_freed(page_address(page), 1037 debug_check_no_locks_freed(page_address(page),
1022 PAGE_SIZE); 1038 PAGE_SIZE << order);
1023 debug_check_no_obj_freed(page_address(page), 1039 debug_check_no_obj_freed(page_address(page),
1024 PAGE_SIZE); 1040 PAGE_SIZE << order);
1025 } 1041 }
1026 arch_free_page(page, 0); 1042 arch_free_page(page, order);
1027 kernel_poison_pages(page, 0, 0); 1043 kernel_poison_pages(page, 1 << order, 0);
1028 kernel_map_pages(page, 0, 0); 1044 kernel_map_pages(page, 1 << order, 0);
1029 1045
1030 page_cpupid_reset_last(page);
1031 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1032 return true; 1046 return true;
1033} 1047}
1034 1048
1049#ifdef CONFIG_DEBUG_VM
1050static inline bool free_pcp_prepare(struct page *page)
1051{
1052 return free_pages_prepare(page, 0, true);
1053}
1054
1055static inline bool bulkfree_pcp_prepare(struct page *page)
1056{
1057 return false;
1058}
1059#else
1060static bool free_pcp_prepare(struct page *page)
1061{
1062 return free_pages_prepare(page, 0, false);
1063}
1064
1035static bool bulkfree_pcp_prepare(struct page *page) 1065static bool bulkfree_pcp_prepare(struct page *page)
1036{ 1066{
1037 return free_pages_check(page); 1067 return free_pages_check(page);
@@ -1201,66 +1231,13 @@ void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
1201 } 1231 }
1202} 1232}
1203 1233
1204static bool free_pages_prepare(struct page *page, unsigned int order)
1205{
1206 int bad = 0;
1207
1208 VM_BUG_ON_PAGE(PageTail(page), page);
1209
1210 trace_mm_page_free(page, order);
1211 kmemcheck_free_shadow(page, order);
1212 kasan_free_pages(page, order);
1213
1214 /*
1215 * Check tail pages before head page information is cleared to
1216 * avoid checking PageCompound for order-0 pages.
1217 */
1218 if (unlikely(order)) {
1219 bool compound = PageCompound(page);
1220 int i;
1221
1222 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1223
1224 for (i = 1; i < (1 << order); i++) {
1225 if (compound)
1226 bad += free_tail_pages_check(page, page + i);
1227 if (unlikely(free_pages_check(page + i))) {
1228 bad++;
1229 continue;
1230 }
1231 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1232 }
1233 }
1234 if (PageAnonHead(page))
1235 page->mapping = NULL;
1236 bad += free_pages_check(page);
1237 if (bad)
1238 return false;
1239
1240 page_cpupid_reset_last(page);
1241 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1242 reset_page_owner(page, order);
1243
1244 if (!PageHighMem(page)) {
1245 debug_check_no_locks_freed(page_address(page),
1246 PAGE_SIZE << order);
1247 debug_check_no_obj_freed(page_address(page),
1248 PAGE_SIZE << order);
1249 }
1250 arch_free_page(page, order);
1251 kernel_poison_pages(page, 1 << order, 0);
1252 kernel_map_pages(page, 1 << order, 0);
1253
1254 return true;
1255}
1256
1257static void __free_pages_ok(struct page *page, unsigned int order) 1234static void __free_pages_ok(struct page *page, unsigned int order)
1258{ 1235{
1259 unsigned long flags; 1236 unsigned long flags;
1260 int migratetype; 1237 int migratetype;
1261 unsigned long pfn = page_to_pfn(page); 1238 unsigned long pfn = page_to_pfn(page);
1262 1239
1263 if (!free_pages_prepare(page, order)) 1240 if (!free_pages_prepare(page, order, true))
1264 return; 1241 return;
1265 1242
1266 migratetype = get_pfnblock_migratetype(page, pfn); 1243 migratetype = get_pfnblock_migratetype(page, pfn);