diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 47 |
1 files changed, 38 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1cf4f05dcda..bdd5c432c426 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/fault-inject.h> | 45 | #include <linux/fault-inject.h> |
46 | #include <linux/page-isolation.h> | 46 | #include <linux/page-isolation.h> |
47 | #include <linux/memcontrol.h> | 47 | #include <linux/memcontrol.h> |
48 | #include <linux/debugobjects.h> | ||
48 | 49 | ||
49 | #include <asm/tlbflush.h> | 50 | #include <asm/tlbflush.h> |
50 | #include <asm/div64.h> | 51 | #include <asm/div64.h> |
@@ -532,8 +533,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
532 | if (reserved) | 533 | if (reserved) |
533 | return; | 534 | return; |
534 | 535 | ||
535 | if (!PageHighMem(page)) | 536 | if (!PageHighMem(page)) { |
536 | debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); | 537 | debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); |
538 | debug_check_no_obj_freed(page_address(page), | ||
539 | PAGE_SIZE << order); | ||
540 | } | ||
537 | arch_free_page(page, order); | 541 | arch_free_page(page, order); |
538 | kernel_map_pages(page, 1 << order, 0); | 542 | kernel_map_pages(page, 1 << order, 0); |
539 | 543 | ||
@@ -995,8 +999,10 @@ static void free_hot_cold_page(struct page *page, int cold) | |||
995 | if (free_pages_check(page)) | 999 | if (free_pages_check(page)) |
996 | return; | 1000 | return; |
997 | 1001 | ||
998 | if (!PageHighMem(page)) | 1002 | if (!PageHighMem(page)) { |
999 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); | 1003 | debug_check_no_locks_freed(page_address(page), PAGE_SIZE); |
1004 | debug_check_no_obj_freed(page_address(page), PAGE_SIZE); | ||
1005 | } | ||
1000 | arch_free_page(page, 0); | 1006 | arch_free_page(page, 0); |
1001 | kernel_map_pages(page, 1, 0); | 1007 | kernel_map_pages(page, 1, 0); |
1002 | 1008 | ||
@@ -1461,7 +1467,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |||
1461 | struct task_struct *p = current; | 1467 | struct task_struct *p = current; |
1462 | int do_retry; | 1468 | int do_retry; |
1463 | int alloc_flags; | 1469 | int alloc_flags; |
1464 | int did_some_progress; | 1470 | unsigned long did_some_progress; |
1471 | unsigned long pages_reclaimed = 0; | ||
1465 | 1472 | ||
1466 | might_sleep_if(wait); | 1473 | might_sleep_if(wait); |
1467 | 1474 | ||
@@ -1611,14 +1618,26 @@ nofail_alloc: | |||
1611 | * Don't let big-order allocations loop unless the caller explicitly | 1618 | * Don't let big-order allocations loop unless the caller explicitly |
1612 | * requests that. Wait for some write requests to complete then retry. | 1619 | * requests that. Wait for some write requests to complete then retry. |
1613 | * | 1620 | * |
1614 | * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order | 1621 | * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER |
1615 | * <= 3, but that may not be true in other implementations. | 1622 | * means __GFP_NOFAIL, but that may not be true in other |
1623 | * implementations. | ||
1624 | * | ||
1625 | * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is | ||
1626 | * specified, then we retry until we no longer reclaim any pages | ||
1627 | * (above), or we've reclaimed an order of pages at least as | ||
1628 | * large as the allocation's order. In both cases, if the | ||
1629 | * allocation still fails, we stop retrying. | ||
1616 | */ | 1630 | */ |
1631 | pages_reclaimed += did_some_progress; | ||
1617 | do_retry = 0; | 1632 | do_retry = 0; |
1618 | if (!(gfp_mask & __GFP_NORETRY)) { | 1633 | if (!(gfp_mask & __GFP_NORETRY)) { |
1619 | if ((order <= PAGE_ALLOC_COSTLY_ORDER) || | 1634 | if (order <= PAGE_ALLOC_COSTLY_ORDER) { |
1620 | (gfp_mask & __GFP_REPEAT)) | ||
1621 | do_retry = 1; | 1635 | do_retry = 1; |
1636 | } else { | ||
1637 | if (gfp_mask & __GFP_REPEAT && | ||
1638 | pages_reclaimed < (1 << order)) | ||
1639 | do_retry = 1; | ||
1640 | } | ||
1622 | if (gfp_mask & __GFP_NOFAIL) | 1641 | if (gfp_mask & __GFP_NOFAIL) |
1623 | do_retry = 1; | 1642 | do_retry = 1; |
1624 | } | 1643 | } |
@@ -2524,7 +2543,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2524 | struct page *page; | 2543 | struct page *page; |
2525 | unsigned long end_pfn = start_pfn + size; | 2544 | unsigned long end_pfn = start_pfn + size; |
2526 | unsigned long pfn; | 2545 | unsigned long pfn; |
2546 | struct zone *z; | ||
2527 | 2547 | ||
2548 | z = &NODE_DATA(nid)->node_zones[zone]; | ||
2528 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | 2549 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
2529 | /* | 2550 | /* |
2530 | * There can be holes in boot-time mem_map[]s | 2551 | * There can be holes in boot-time mem_map[]s |
@@ -2542,7 +2563,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2542 | init_page_count(page); | 2563 | init_page_count(page); |
2543 | reset_page_mapcount(page); | 2564 | reset_page_mapcount(page); |
2544 | SetPageReserved(page); | 2565 | SetPageReserved(page); |
2545 | |||
2546 | /* | 2566 | /* |
2547 | * Mark the block movable so that blocks are reserved for | 2567 | * Mark the block movable so that blocks are reserved for |
2548 | * movable at startup. This will force kernel allocations | 2568 | * movable at startup. This will force kernel allocations |
@@ -2551,8 +2571,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2551 | * kernel allocations are made. Later some blocks near | 2571 | * kernel allocations are made. Later some blocks near |
2552 | * the start are marked MIGRATE_RESERVE by | 2572 | * the start are marked MIGRATE_RESERVE by |
2553 | * setup_zone_migrate_reserve() | 2573 | * setup_zone_migrate_reserve() |
2574 | * | ||
2575 | * bitmap is created for zone's valid pfn range. but memmap | ||
2576 | * can be created for invalid pages (for alignment) | ||
2577 | * check here not to call set_pageblock_migratetype() against | ||
2578 | * pfn out of zone. | ||
2554 | */ | 2579 | */ |
2555 | if ((pfn & (pageblock_nr_pages-1))) | 2580 | if ((z->zone_start_pfn <= pfn) |
2581 | && (pfn < z->zone_start_pfn + z->spanned_pages) | ||
2582 | && !(pfn & (pageblock_nr_pages - 1))) | ||
2556 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | 2583 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
2557 | 2584 | ||
2558 | INIT_LIST_HEAD(&page->lru); | 2585 | INIT_LIST_HEAD(&page->lru); |
@@ -4464,6 +4491,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
4464 | pfn = page_to_pfn(page); | 4491 | pfn = page_to_pfn(page); |
4465 | bitmap = get_pageblock_bitmap(zone, pfn); | 4492 | bitmap = get_pageblock_bitmap(zone, pfn); |
4466 | bitidx = pfn_to_bitidx(zone, pfn); | 4493 | bitidx = pfn_to_bitidx(zone, pfn); |
4494 | VM_BUG_ON(pfn < zone->zone_start_pfn); | ||
4495 | VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); | ||
4467 | 4496 | ||
4468 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) | 4497 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
4469 | if (flags & value) | 4498 | if (flags & value) |