diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 57 |
1 files changed, 24 insertions, 33 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dd886fac451a..5248fe070aa4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -234,8 +234,8 @@ int page_group_by_mobility_disabled __read_mostly; | |||
234 | 234 | ||
235 | void set_pageblock_migratetype(struct page *page, int migratetype) | 235 | void set_pageblock_migratetype(struct page *page, int migratetype) |
236 | { | 236 | { |
237 | 237 | if (unlikely(page_group_by_mobility_disabled && | |
238 | if (unlikely(page_group_by_mobility_disabled)) | 238 | migratetype < MIGRATE_PCPTYPES)) |
239 | migratetype = MIGRATE_UNMOVABLE; | 239 | migratetype = MIGRATE_UNMOVABLE; |
240 | 240 | ||
241 | set_pageblock_flags_group(page, (unsigned long)migratetype, | 241 | set_pageblock_flags_group(page, (unsigned long)migratetype, |
@@ -626,7 +626,7 @@ static inline int free_pages_check(struct page *page) | |||
626 | bad_page(page); | 626 | bad_page(page); |
627 | return 1; | 627 | return 1; |
628 | } | 628 | } |
629 | page_nid_reset_last(page); | 629 | page_cpupid_reset_last(page); |
630 | if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | 630 | if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
631 | page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; | 631 | page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
632 | return 0; | 632 | return 0; |
@@ -1027,6 +1027,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page, | |||
1027 | { | 1027 | { |
1028 | int current_order = page_order(page); | 1028 | int current_order = page_order(page); |
1029 | 1029 | ||
1030 | /* | ||
1031 | * When borrowing from MIGRATE_CMA, we need to release the excess | ||
1032 | * buddy pages to CMA itself. | ||
1033 | */ | ||
1030 | if (is_migrate_cma(fallback_type)) | 1034 | if (is_migrate_cma(fallback_type)) |
1031 | return fallback_type; | 1035 | return fallback_type; |
1032 | 1036 | ||
@@ -1091,21 +1095,11 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | |||
1091 | list_del(&page->lru); | 1095 | list_del(&page->lru); |
1092 | rmv_page_order(page); | 1096 | rmv_page_order(page); |
1093 | 1097 | ||
1094 | /* | ||
1095 | * Borrow the excess buddy pages as well, irrespective | ||
1096 | * of whether we stole freepages, or took ownership of | ||
1097 | * the pageblock or not. | ||
1098 | * | ||
1099 | * Exception: When borrowing from MIGRATE_CMA, release | ||
1100 | * the excess buddy pages to CMA itself. | ||
1101 | */ | ||
1102 | expand(zone, page, order, current_order, area, | 1098 | expand(zone, page, order, current_order, area, |
1103 | is_migrate_cma(migratetype) | 1099 | new_type); |
1104 | ? migratetype : start_migratetype); | ||
1105 | 1100 | ||
1106 | trace_mm_page_alloc_extfrag(page, order, | 1101 | trace_mm_page_alloc_extfrag(page, order, current_order, |
1107 | current_order, start_migratetype, migratetype, | 1102 | start_migratetype, migratetype, new_type); |
1108 | new_type == start_migratetype); | ||
1109 | 1103 | ||
1110 | return page; | 1104 | return page; |
1111 | } | 1105 | } |
@@ -1711,7 +1705,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |||
1711 | * comments in mmzone.h. Reduces cache footprint of zonelist scans | 1705 | * comments in mmzone.h. Reduces cache footprint of zonelist scans |
1712 | * that have to skip over a lot of full or unallowed zones. | 1706 | * that have to skip over a lot of full or unallowed zones. |
1713 | * | 1707 | * |
1714 | * If the zonelist cache is present in the passed in zonelist, then | 1708 | * If the zonelist cache is present in the passed zonelist, then |
1715 | * returns a pointer to the allowed node mask (either the current | 1709 | * returns a pointer to the allowed node mask (either the current |
1716 | * tasks mems_allowed, or node_states[N_MEMORY].) | 1710 | * tasks mems_allowed, or node_states[N_MEMORY].) |
1717 | * | 1711 | * |
@@ -1822,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) | |||
1822 | 1816 | ||
1823 | static bool zone_local(struct zone *local_zone, struct zone *zone) | 1817 | static bool zone_local(struct zone *local_zone, struct zone *zone) |
1824 | { | 1818 | { |
1825 | return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; | 1819 | return local_zone->node == zone->node; |
1826 | } | 1820 | } |
1827 | 1821 | ||
1828 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | 1822 | static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) |
@@ -1919,18 +1913,17 @@ zonelist_scan: | |||
1919 | * page was allocated in should have no effect on the | 1913 | * page was allocated in should have no effect on the |
1920 | * time the page has in memory before being reclaimed. | 1914 | * time the page has in memory before being reclaimed. |
1921 | * | 1915 | * |
1922 | * When zone_reclaim_mode is enabled, try to stay in | 1916 | * Try to stay in local zones in the fastpath. If |
1923 | * local zones in the fastpath. If that fails, the | 1917 | * that fails, the slowpath is entered, which will do |
1924 | * slowpath is entered, which will do another pass | 1918 | * another pass starting with the local zones, but |
1925 | * starting with the local zones, but ultimately fall | 1919 | * ultimately fall back to remote zones that do not |
1926 | * back to remote zones that do not partake in the | 1920 | * partake in the fairness round-robin cycle of this |
1927 | * fairness round-robin cycle of this zonelist. | 1921 | * zonelist. |
1928 | */ | 1922 | */ |
1929 | if (alloc_flags & ALLOC_WMARK_LOW) { | 1923 | if (alloc_flags & ALLOC_WMARK_LOW) { |
1930 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) | 1924 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) |
1931 | continue; | 1925 | continue; |
1932 | if (zone_reclaim_mode && | 1926 | if (!zone_local(preferred_zone, zone)) |
1933 | !zone_local(preferred_zone, zone)) | ||
1934 | continue; | 1927 | continue; |
1935 | } | 1928 | } |
1936 | /* | 1929 | /* |
@@ -2396,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2396 | * thrash fairness information for zones that are not | 2389 | * thrash fairness information for zones that are not |
2397 | * actually part of this zonelist's round-robin cycle. | 2390 | * actually part of this zonelist's round-robin cycle. |
2398 | */ | 2391 | */ |
2399 | if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) | 2392 | if (!zone_local(preferred_zone, zone)) |
2400 | continue; | 2393 | continue; |
2401 | mod_zone_page_state(zone, NR_ALLOC_BATCH, | 2394 | mod_zone_page_state(zone, NR_ALLOC_BATCH, |
2402 | high_wmark_pages(zone) - | 2395 | high_wmark_pages(zone) - |
@@ -2593,7 +2586,7 @@ rebalance: | |||
2593 | * running out of options and have to consider going OOM | 2586 | * running out of options and have to consider going OOM |
2594 | */ | 2587 | */ |
2595 | if (!did_some_progress) { | 2588 | if (!did_some_progress) { |
2596 | if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { | 2589 | if (oom_gfp_allowed(gfp_mask)) { |
2597 | if (oom_killer_disabled) | 2590 | if (oom_killer_disabled) |
2598 | goto nopage; | 2591 | goto nopage; |
2599 | /* Coredumps can quickly deplete all memory reserves */ | 2592 | /* Coredumps can quickly deplete all memory reserves */ |
@@ -3881,8 +3874,6 @@ static inline unsigned long wait_table_bits(unsigned long size) | |||
3881 | return ffz(~size); | 3874 | return ffz(~size); |
3882 | } | 3875 | } |
3883 | 3876 | ||
3884 | #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) | ||
3885 | |||
3886 | /* | 3877 | /* |
3887 | * Check if a pageblock contains reserved pages | 3878 | * Check if a pageblock contains reserved pages |
3888 | */ | 3879 | */ |
@@ -4015,7 +4006,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
4015 | mminit_verify_page_links(page, zone, nid, pfn); | 4006 | mminit_verify_page_links(page, zone, nid, pfn); |
4016 | init_page_count(page); | 4007 | init_page_count(page); |
4017 | page_mapcount_reset(page); | 4008 | page_mapcount_reset(page); |
4018 | page_nid_reset_last(page); | 4009 | page_cpupid_reset_last(page); |
4019 | SetPageReserved(page); | 4010 | SetPageReserved(page); |
4020 | /* | 4011 | /* |
4021 | * Mark the block movable so that blocks are reserved for | 4012 | * Mark the block movable so that blocks are reserved for |
@@ -4266,7 +4257,7 @@ static __meminit void zone_pcp_init(struct zone *zone) | |||
4266 | */ | 4257 | */ |
4267 | zone->pageset = &boot_pageset; | 4258 | zone->pageset = &boot_pageset; |
4268 | 4259 | ||
4269 | if (zone->present_pages) | 4260 | if (populated_zone(zone)) |
4270 | printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", | 4261 | printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", |
4271 | zone->name, zone->present_pages, | 4262 | zone->name, zone->present_pages, |
4272 | zone_batchsize(zone)); | 4263 | zone_batchsize(zone)); |
@@ -5160,7 +5151,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid) | |||
5160 | 5151 | ||
5161 | for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { | 5152 | for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { |
5162 | struct zone *zone = &pgdat->node_zones[zone_type]; | 5153 | struct zone *zone = &pgdat->node_zones[zone_type]; |
5163 | if (zone->present_pages) { | 5154 | if (populated_zone(zone)) { |
5164 | node_set_state(nid, N_HIGH_MEMORY); | 5155 | node_set_state(nid, N_HIGH_MEMORY); |
5165 | if (N_NORMAL_MEMORY != N_HIGH_MEMORY && | 5156 | if (N_NORMAL_MEMORY != N_HIGH_MEMORY && |
5166 | zone_type <= ZONE_NORMAL) | 5157 | zone_type <= ZONE_NORMAL) |