diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 81 |
1 files changed, 72 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7945247b1e53..3a58221f4c22 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -614,6 +614,10 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
614 | list = &pcp->lists[migratetype]; | 614 | list = &pcp->lists[migratetype]; |
615 | } while (list_empty(list)); | 615 | } while (list_empty(list)); |
616 | 616 | ||
617 | /* This is the only non-empty list. Free them all. */ | ||
618 | if (batch_free == MIGRATE_PCPTYPES) | ||
619 | batch_free = to_free; | ||
620 | |||
617 | do { | 621 | do { |
618 | page = list_entry(list->prev, struct page, lru); | 622 | page = list_entry(list->prev, struct page, lru); |
619 | /* must delete as __free_one_page list manipulates */ | 623 | /* must delete as __free_one_page list manipulates */ |
@@ -863,9 +867,8 @@ static int move_freepages(struct zone *zone, | |||
863 | } | 867 | } |
864 | 868 | ||
865 | order = page_order(page); | 869 | order = page_order(page); |
866 | list_del(&page->lru); | 870 | list_move(&page->lru, |
867 | list_add(&page->lru, | 871 | &zone->free_area[order].free_list[migratetype]); |
868 | &zone->free_area[order].free_list[migratetype]); | ||
869 | page += 1 << order; | 872 | page += 1 << order; |
870 | pages_moved += 1 << order; | 873 | pages_moved += 1 << order; |
871 | } | 874 | } |
@@ -1333,7 +1336,7 @@ again: | |||
1333 | } | 1336 | } |
1334 | 1337 | ||
1335 | __count_zone_vm_events(PGALLOC, zone, 1 << order); | 1338 | __count_zone_vm_events(PGALLOC, zone, 1 << order); |
1336 | zone_statistics(preferred_zone, zone); | 1339 | zone_statistics(preferred_zone, zone, gfp_flags); |
1337 | local_irq_restore(flags); | 1340 | local_irq_restore(flags); |
1338 | 1341 | ||
1339 | VM_BUG_ON(bad_range(zone, page)); | 1342 | VM_BUG_ON(bad_range(zone, page)); |
@@ -1714,6 +1717,20 @@ try_next_zone: | |||
1714 | return page; | 1717 | return page; |
1715 | } | 1718 | } |
1716 | 1719 | ||
1720 | /* | ||
1721 | * Large machines with many possible nodes should not always dump per-node | ||
1722 | * meminfo in irq context. | ||
1723 | */ | ||
1724 | static inline bool should_suppress_show_mem(void) | ||
1725 | { | ||
1726 | bool ret = false; | ||
1727 | |||
1728 | #if NODES_SHIFT > 8 | ||
1729 | ret = in_interrupt(); | ||
1730 | #endif | ||
1731 | return ret; | ||
1732 | } | ||
1733 | |||
1717 | static inline int | 1734 | static inline int |
1718 | should_alloc_retry(gfp_t gfp_mask, unsigned int order, | 1735 | should_alloc_retry(gfp_t gfp_mask, unsigned int order, |
1719 | unsigned long pages_reclaimed) | 1736 | unsigned long pages_reclaimed) |
@@ -2085,7 +2102,7 @@ rebalance: | |||
2085 | sync_migration); | 2102 | sync_migration); |
2086 | if (page) | 2103 | if (page) |
2087 | goto got_pg; | 2104 | goto got_pg; |
2088 | sync_migration = true; | 2105 | sync_migration = !(gfp_mask & __GFP_NO_KSWAPD); |
2089 | 2106 | ||
2090 | /* Try direct reclaim and then allocating */ | 2107 | /* Try direct reclaim and then allocating */ |
2091 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 2108 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |
@@ -2157,11 +2174,25 @@ rebalance: | |||
2157 | 2174 | ||
2158 | nopage: | 2175 | nopage: |
2159 | if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { | 2176 | if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { |
2160 | printk(KERN_WARNING "%s: page allocation failure." | 2177 | unsigned int filter = SHOW_MEM_FILTER_NODES; |
2161 | " order:%d, mode:0x%x\n", | 2178 | |
2179 | /* | ||
2180 | * This documents exceptions given to allocations in certain | ||
2181 | * contexts that are allowed to allocate outside current's set | ||
2182 | * of allowed nodes. | ||
2183 | */ | ||
2184 | if (!(gfp_mask & __GFP_NOMEMALLOC)) | ||
2185 | if (test_thread_flag(TIF_MEMDIE) || | ||
2186 | (current->flags & (PF_MEMALLOC | PF_EXITING))) | ||
2187 | filter &= ~SHOW_MEM_FILTER_NODES; | ||
2188 | if (in_interrupt() || !wait) | ||
2189 | filter &= ~SHOW_MEM_FILTER_NODES; | ||
2190 | |||
2191 | pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n", | ||
2162 | current->comm, order, gfp_mask); | 2192 | current->comm, order, gfp_mask); |
2163 | dump_stack(); | 2193 | dump_stack(); |
2164 | show_mem(); | 2194 | if (!should_suppress_show_mem()) |
2195 | __show_mem(filter); | ||
2165 | } | 2196 | } |
2166 | return page; | 2197 | return page; |
2167 | got_pg: | 2198 | got_pg: |
@@ -2411,19 +2442,42 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
2411 | } | 2442 | } |
2412 | #endif | 2443 | #endif |
2413 | 2444 | ||
2445 | /* | ||
2446 | * Determine whether the zone's node should be displayed or not, depending on | ||
2447 | * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas(). | ||
2448 | */ | ||
2449 | static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone) | ||
2450 | { | ||
2451 | bool ret = false; | ||
2452 | |||
2453 | if (!(flags & SHOW_MEM_FILTER_NODES)) | ||
2454 | goto out; | ||
2455 | |||
2456 | get_mems_allowed(); | ||
2457 | ret = !node_isset(zone->zone_pgdat->node_id, | ||
2458 | cpuset_current_mems_allowed); | ||
2459 | put_mems_allowed(); | ||
2460 | out: | ||
2461 | return ret; | ||
2462 | } | ||
2463 | |||
2414 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 2464 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
2415 | 2465 | ||
2416 | /* | 2466 | /* |
2417 | * Show free area list (used inside shift_scroll-lock stuff) | 2467 | * Show free area list (used inside shift_scroll-lock stuff) |
2418 | * We also calculate the percentage fragmentation. We do this by counting the | 2468 | * We also calculate the percentage fragmentation. We do this by counting the |
2419 | * memory on each free list with the exception of the first item on the list. | 2469 | * memory on each free list with the exception of the first item on the list. |
2470 | * Suppresses nodes that are not allowed by current's cpuset if | ||
2471 | * SHOW_MEM_FILTER_NODES is passed. | ||
2420 | */ | 2472 | */ |
2421 | void show_free_areas(void) | 2473 | void __show_free_areas(unsigned int filter) |
2422 | { | 2474 | { |
2423 | int cpu; | 2475 | int cpu; |
2424 | struct zone *zone; | 2476 | struct zone *zone; |
2425 | 2477 | ||
2426 | for_each_populated_zone(zone) { | 2478 | for_each_populated_zone(zone) { |
2479 | if (skip_free_areas_zone(filter, zone)) | ||
2480 | continue; | ||
2427 | show_node(zone); | 2481 | show_node(zone); |
2428 | printk("%s per-cpu:\n", zone->name); | 2482 | printk("%s per-cpu:\n", zone->name); |
2429 | 2483 | ||
@@ -2465,6 +2519,8 @@ void show_free_areas(void) | |||
2465 | for_each_populated_zone(zone) { | 2519 | for_each_populated_zone(zone) { |
2466 | int i; | 2520 | int i; |
2467 | 2521 | ||
2522 | if (skip_free_areas_zone(filter, zone)) | ||
2523 | continue; | ||
2468 | show_node(zone); | 2524 | show_node(zone); |
2469 | printk("%s" | 2525 | printk("%s" |
2470 | " free:%lukB" | 2526 | " free:%lukB" |
@@ -2532,6 +2588,8 @@ void show_free_areas(void) | |||
2532 | for_each_populated_zone(zone) { | 2588 | for_each_populated_zone(zone) { |
2533 | unsigned long nr[MAX_ORDER], flags, order, total = 0; | 2589 | unsigned long nr[MAX_ORDER], flags, order, total = 0; |
2534 | 2590 | ||
2591 | if (skip_free_areas_zone(filter, zone)) | ||
2592 | continue; | ||
2535 | show_node(zone); | 2593 | show_node(zone); |
2536 | printk("%s: ", zone->name); | 2594 | printk("%s: ", zone->name); |
2537 | 2595 | ||
@@ -2551,6 +2609,11 @@ void show_free_areas(void) | |||
2551 | show_swap_cache_info(); | 2609 | show_swap_cache_info(); |
2552 | } | 2610 | } |
2553 | 2611 | ||
2612 | void show_free_areas(void) | ||
2613 | { | ||
2614 | __show_free_areas(0); | ||
2615 | } | ||
2616 | |||
2554 | static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) | 2617 | static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) |
2555 | { | 2618 | { |
2556 | zoneref->zone = zone; | 2619 | zoneref->zone = zone; |