diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 102 |
1 files changed, 72 insertions, 30 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 533e2147d14f..e3758a09a009 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -205,7 +205,7 @@ static char * const zone_names[MAX_NR_ZONES] = { | |||
205 | }; | 205 | }; |
206 | 206 | ||
207 | int min_free_kbytes = 1024; | 207 | int min_free_kbytes = 1024; |
208 | int user_min_free_kbytes; | 208 | int user_min_free_kbytes = -1; |
209 | 209 | ||
210 | static unsigned long __meminitdata nr_kernel_pages; | 210 | static unsigned long __meminitdata nr_kernel_pages; |
211 | static unsigned long __meminitdata nr_all_pages; | 211 | static unsigned long __meminitdata nr_all_pages; |
@@ -295,7 +295,7 @@ static inline int bad_range(struct zone *zone, struct page *page) | |||
295 | } | 295 | } |
296 | #endif | 296 | #endif |
297 | 297 | ||
298 | static void bad_page(struct page *page) | 298 | static void bad_page(struct page *page, char *reason, unsigned long bad_flags) |
299 | { | 299 | { |
300 | static unsigned long resume; | 300 | static unsigned long resume; |
301 | static unsigned long nr_shown; | 301 | static unsigned long nr_shown; |
@@ -329,7 +329,7 @@ static void bad_page(struct page *page) | |||
329 | 329 | ||
330 | printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", | 330 | printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", |
331 | current->comm, page_to_pfn(page)); | 331 | current->comm, page_to_pfn(page)); |
332 | dump_page(page); | 332 | dump_page_badflags(page, reason, bad_flags); |
333 | 333 | ||
334 | print_modules(); | 334 | print_modules(); |
335 | dump_stack(); | 335 | dump_stack(); |
@@ -383,7 +383,7 @@ static int destroy_compound_page(struct page *page, unsigned long order) | |||
383 | int bad = 0; | 383 | int bad = 0; |
384 | 384 | ||
385 | if (unlikely(compound_order(page) != order)) { | 385 | if (unlikely(compound_order(page) != order)) { |
386 | bad_page(page); | 386 | bad_page(page, "wrong compound order", 0); |
387 | bad++; | 387 | bad++; |
388 | } | 388 | } |
389 | 389 | ||
@@ -392,8 +392,11 @@ static int destroy_compound_page(struct page *page, unsigned long order) | |||
392 | for (i = 1; i < nr_pages; i++) { | 392 | for (i = 1; i < nr_pages; i++) { |
393 | struct page *p = page + i; | 393 | struct page *p = page + i; |
394 | 394 | ||
395 | if (unlikely(!PageTail(p) || (p->first_page != page))) { | 395 | if (unlikely(!PageTail(p))) { |
396 | bad_page(page); | 396 | bad_page(page, "PageTail not set", 0); |
397 | bad++; | ||
398 | } else if (unlikely(p->first_page != page)) { | ||
399 | bad_page(page, "first_page not consistent", 0); | ||
397 | bad++; | 400 | bad++; |
398 | } | 401 | } |
399 | __ClearPageTail(p); | 402 | __ClearPageTail(p); |
@@ -506,12 +509,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, | |||
506 | return 0; | 509 | return 0; |
507 | 510 | ||
508 | if (page_is_guard(buddy) && page_order(buddy) == order) { | 511 | if (page_is_guard(buddy) && page_order(buddy) == order) { |
509 | VM_BUG_ON(page_count(buddy) != 0); | 512 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); |
510 | return 1; | 513 | return 1; |
511 | } | 514 | } |
512 | 515 | ||
513 | if (PageBuddy(buddy) && page_order(buddy) == order) { | 516 | if (PageBuddy(buddy) && page_order(buddy) == order) { |
514 | VM_BUG_ON(page_count(buddy) != 0); | 517 | VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); |
515 | return 1; | 518 | return 1; |
516 | } | 519 | } |
517 | return 0; | 520 | return 0; |
@@ -561,8 +564,8 @@ static inline void __free_one_page(struct page *page, | |||
561 | 564 | ||
562 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); | 565 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); |
563 | 566 | ||
564 | VM_BUG_ON(page_idx & ((1 << order) - 1)); | 567 | VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); |
565 | VM_BUG_ON(bad_range(zone, page)); | 568 | VM_BUG_ON_PAGE(bad_range(zone, page), page); |
566 | 569 | ||
567 | while (order < MAX_ORDER-1) { | 570 | while (order < MAX_ORDER-1) { |
568 | buddy_idx = __find_buddy_index(page_idx, order); | 571 | buddy_idx = __find_buddy_index(page_idx, order); |
@@ -618,12 +621,23 @@ out: | |||
618 | 621 | ||
619 | static inline int free_pages_check(struct page *page) | 622 | static inline int free_pages_check(struct page *page) |
620 | { | 623 | { |
621 | if (unlikely(page_mapcount(page) | | 624 | char *bad_reason = NULL; |
622 | (page->mapping != NULL) | | 625 | unsigned long bad_flags = 0; |
623 | (atomic_read(&page->_count) != 0) | | 626 | |
624 | (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | | 627 | if (unlikely(page_mapcount(page))) |
625 | (mem_cgroup_bad_page_check(page)))) { | 628 | bad_reason = "nonzero mapcount"; |
626 | bad_page(page); | 629 | if (unlikely(page->mapping != NULL)) |
630 | bad_reason = "non-NULL mapping"; | ||
631 | if (unlikely(atomic_read(&page->_count) != 0)) | ||
632 | bad_reason = "nonzero _count"; | ||
633 | if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { | ||
634 | bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; | ||
635 | bad_flags = PAGE_FLAGS_CHECK_AT_FREE; | ||
636 | } | ||
637 | if (unlikely(mem_cgroup_bad_page_check(page))) | ||
638 | bad_reason = "cgroup check failed"; | ||
639 | if (unlikely(bad_reason)) { | ||
640 | bad_page(page, bad_reason, bad_flags); | ||
627 | return 1; | 641 | return 1; |
628 | } | 642 | } |
629 | page_cpupid_reset_last(page); | 643 | page_cpupid_reset_last(page); |
@@ -813,7 +827,7 @@ static inline void expand(struct zone *zone, struct page *page, | |||
813 | area--; | 827 | area--; |
814 | high--; | 828 | high--; |
815 | size >>= 1; | 829 | size >>= 1; |
816 | VM_BUG_ON(bad_range(zone, &page[size])); | 830 | VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); |
817 | 831 | ||
818 | #ifdef CONFIG_DEBUG_PAGEALLOC | 832 | #ifdef CONFIG_DEBUG_PAGEALLOC |
819 | if (high < debug_guardpage_minorder()) { | 833 | if (high < debug_guardpage_minorder()) { |
@@ -843,12 +857,23 @@ static inline void expand(struct zone *zone, struct page *page, | |||
843 | */ | 857 | */ |
844 | static inline int check_new_page(struct page *page) | 858 | static inline int check_new_page(struct page *page) |
845 | { | 859 | { |
846 | if (unlikely(page_mapcount(page) | | 860 | char *bad_reason = NULL; |
847 | (page->mapping != NULL) | | 861 | unsigned long bad_flags = 0; |
848 | (atomic_read(&page->_count) != 0) | | 862 | |
849 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | | 863 | if (unlikely(page_mapcount(page))) |
850 | (mem_cgroup_bad_page_check(page)))) { | 864 | bad_reason = "nonzero mapcount"; |
851 | bad_page(page); | 865 | if (unlikely(page->mapping != NULL)) |
866 | bad_reason = "non-NULL mapping"; | ||
867 | if (unlikely(atomic_read(&page->_count) != 0)) | ||
868 | bad_reason = "nonzero _count"; | ||
869 | if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { | ||
870 | bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; | ||
871 | bad_flags = PAGE_FLAGS_CHECK_AT_PREP; | ||
872 | } | ||
873 | if (unlikely(mem_cgroup_bad_page_check(page))) | ||
874 | bad_reason = "cgroup check failed"; | ||
875 | if (unlikely(bad_reason)) { | ||
876 | bad_page(page, bad_reason, bad_flags); | ||
852 | return 1; | 877 | return 1; |
853 | } | 878 | } |
854 | return 0; | 879 | return 0; |
@@ -955,7 +980,7 @@ int move_freepages(struct zone *zone, | |||
955 | 980 | ||
956 | for (page = start_page; page <= end_page;) { | 981 | for (page = start_page; page <= end_page;) { |
957 | /* Make sure we are not inadvertently changing nodes */ | 982 | /* Make sure we are not inadvertently changing nodes */ |
958 | VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); | 983 | VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); |
959 | 984 | ||
960 | if (!pfn_valid_within(page_to_pfn(page))) { | 985 | if (!pfn_valid_within(page_to_pfn(page))) { |
961 | page++; | 986 | page++; |
@@ -1404,8 +1429,8 @@ void split_page(struct page *page, unsigned int order) | |||
1404 | { | 1429 | { |
1405 | int i; | 1430 | int i; |
1406 | 1431 | ||
1407 | VM_BUG_ON(PageCompound(page)); | 1432 | VM_BUG_ON_PAGE(PageCompound(page), page); |
1408 | VM_BUG_ON(!page_count(page)); | 1433 | VM_BUG_ON_PAGE(!page_count(page), page); |
1409 | 1434 | ||
1410 | #ifdef CONFIG_KMEMCHECK | 1435 | #ifdef CONFIG_KMEMCHECK |
1411 | /* | 1436 | /* |
@@ -1552,7 +1577,7 @@ again: | |||
1552 | zone_statistics(preferred_zone, zone, gfp_flags); | 1577 | zone_statistics(preferred_zone, zone, gfp_flags); |
1553 | local_irq_restore(flags); | 1578 | local_irq_restore(flags); |
1554 | 1579 | ||
1555 | VM_BUG_ON(bad_range(zone, page)); | 1580 | VM_BUG_ON_PAGE(bad_range(zone, page), page); |
1556 | if (prep_new_page(page, order, gfp_flags)) | 1581 | if (prep_new_page(page, order, gfp_flags)) |
1557 | goto again; | 1582 | goto again; |
1558 | return page; | 1583 | return page; |
@@ -5729,7 +5754,12 @@ module_init(init_per_zone_wmark_min) | |||
5729 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write, | 5754 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write, |
5730 | void __user *buffer, size_t *length, loff_t *ppos) | 5755 | void __user *buffer, size_t *length, loff_t *ppos) |
5731 | { | 5756 | { |
5732 | proc_dointvec(table, write, buffer, length, ppos); | 5757 | int rc; |
5758 | |||
5759 | rc = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
5760 | if (rc) | ||
5761 | return rc; | ||
5762 | |||
5733 | if (write) { | 5763 | if (write) { |
5734 | user_min_free_kbytes = min_free_kbytes; | 5764 | user_min_free_kbytes = min_free_kbytes; |
5735 | setup_per_zone_wmarks(); | 5765 | setup_per_zone_wmarks(); |
@@ -5996,7 +6026,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
5996 | pfn = page_to_pfn(page); | 6026 | pfn = page_to_pfn(page); |
5997 | bitmap = get_pageblock_bitmap(zone, pfn); | 6027 | bitmap = get_pageblock_bitmap(zone, pfn); |
5998 | bitidx = pfn_to_bitidx(zone, pfn); | 6028 | bitidx = pfn_to_bitidx(zone, pfn); |
5999 | VM_BUG_ON(!zone_spans_pfn(zone, pfn)); | 6029 | VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); |
6000 | 6030 | ||
6001 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) | 6031 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
6002 | if (flags & value) | 6032 | if (flags & value) |
@@ -6494,12 +6524,24 @@ static void dump_page_flags(unsigned long flags) | |||
6494 | printk(")\n"); | 6524 | printk(")\n"); |
6495 | } | 6525 | } |
6496 | 6526 | ||
6497 | void dump_page(struct page *page) | 6527 | void dump_page_badflags(struct page *page, char *reason, unsigned long badflags) |
6498 | { | 6528 | { |
6499 | printk(KERN_ALERT | 6529 | printk(KERN_ALERT |
6500 | "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", | 6530 | "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", |
6501 | page, atomic_read(&page->_count), page_mapcount(page), | 6531 | page, atomic_read(&page->_count), page_mapcount(page), |
6502 | page->mapping, page->index); | 6532 | page->mapping, page->index); |
6503 | dump_page_flags(page->flags); | 6533 | dump_page_flags(page->flags); |
6534 | if (reason) | ||
6535 | pr_alert("page dumped because: %s\n", reason); | ||
6536 | if (page->flags & badflags) { | ||
6537 | pr_alert("bad because of flags:\n"); | ||
6538 | dump_page_flags(page->flags & badflags); | ||
6539 | } | ||
6504 | mem_cgroup_print_bad_page(page); | 6540 | mem_cgroup_print_bad_page(page); |
6505 | } | 6541 | } |
6542 | |||
6543 | void dump_page(struct page *page, char *reason) | ||
6544 | { | ||
6545 | dump_page_badflags(page, reason, 0); | ||
6546 | } | ||
6547 | EXPORT_SYMBOL_GPL(dump_page); | ||