aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-02-11 18:25:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:02 -0500
commit6e9f0d582dde095d971a3c6ce4685a218a0eac8e (patch)
tree6c6500ae0a15e86294732b039c9db03b5153a301 /mm
parent05891fb06517d19ae5357c9dc44e96bbe0300a3c (diff)
mm/page_alloc.c: drop dead destroy_compound_page()
The only caller is __free_one_page(). By the time we should have page->flags to be cleared already: - for 0-order pages though PCP list: free_hot_cold_page() free_pages_prepare() free_pages_check() page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; <put the page to PCP list> free_pcppages_bulk() page = <withdraw pages from PCP list> __free_one_page(page) - for non-0-order pages: __free_pages_ok() free_pages_prepare() free_pages_check() page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; free_one_page() __free_one_page() So there's no way PageCompound() will return true in __free_one_page(). Let's remove dead destroy_compound_page() and put assert for page->flags there instead. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c35
1 files changed, 1 insertions, 34 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d664eb922a7d..12d55b859d3f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -381,36 +381,6 @@ void prep_compound_page(struct page *page, unsigned long order)
381 } 381 }
382} 382}
383 383
384/* update __split_huge_page_refcount if you change this function */
385static int destroy_compound_page(struct page *page, unsigned long order)
386{
387 int i;
388 int nr_pages = 1 << order;
389 int bad = 0;
390
391 if (unlikely(compound_order(page) != order)) {
392 bad_page(page, "wrong compound order", 0);
393 bad++;
394 }
395
396 __ClearPageHead(page);
397
398 for (i = 1; i < nr_pages; i++) {
399 struct page *p = page + i;
400
401 if (unlikely(!PageTail(p))) {
402 bad_page(page, "PageTail not set", 0);
403 bad++;
404 } else if (unlikely(p->first_page != page)) {
405 bad_page(page, "first_page not consistent", 0);
406 bad++;
407 }
408 __ClearPageTail(p);
409 }
410
411 return bad;
412}
413
414static inline void prep_zero_page(struct page *page, unsigned int order, 384static inline void prep_zero_page(struct page *page, unsigned int order,
415 gfp_t gfp_flags) 385 gfp_t gfp_flags)
416{ 386{
@@ -613,10 +583,7 @@ static inline void __free_one_page(struct page *page,
613 int max_order = MAX_ORDER; 583 int max_order = MAX_ORDER;
614 584
615 VM_BUG_ON(!zone_is_initialized(zone)); 585 VM_BUG_ON(!zone_is_initialized(zone));
616 586 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
617 if (unlikely(PageCompound(page)))
618 if (unlikely(destroy_compound_page(page, order)))
619 return;
620 587
621 VM_BUG_ON(migratetype == -1); 588 VM_BUG_ON(migratetype == -1);
622 if (is_migrate_isolate(migratetype)) { 589 if (is_migrate_isolate(migratetype)) {