diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 55 |
1 files changed, 41 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 794e6715c226..0027d8f4a1bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1981,14 +1981,20 @@ static struct page * | |||
1981 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | 1981 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
1982 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 1982 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
1983 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 1983 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
1984 | int migratetype, unsigned long *did_some_progress, | 1984 | int migratetype, bool sync_migration, |
1985 | bool sync_migration) | 1985 | bool *deferred_compaction, |
1986 | unsigned long *did_some_progress) | ||
1986 | { | 1987 | { |
1987 | struct page *page; | 1988 | struct page *page; |
1988 | 1989 | ||
1989 | if (!order || compaction_deferred(preferred_zone)) | 1990 | if (!order) |
1990 | return NULL; | 1991 | return NULL; |
1991 | 1992 | ||
1993 | if (compaction_deferred(preferred_zone)) { | ||
1994 | *deferred_compaction = true; | ||
1995 | return NULL; | ||
1996 | } | ||
1997 | |||
1992 | current->flags |= PF_MEMALLOC; | 1998 | current->flags |= PF_MEMALLOC; |
1993 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 1999 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
1994 | nodemask, sync_migration); | 2000 | nodemask, sync_migration); |
@@ -2016,7 +2022,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2016 | * but not enough to satisfy watermarks. | 2022 | * but not enough to satisfy watermarks. |
2017 | */ | 2023 | */ |
2018 | count_vm_event(COMPACTFAIL); | 2024 | count_vm_event(COMPACTFAIL); |
2019 | defer_compaction(preferred_zone); | 2025 | |
2026 | /* | ||
2027 | * As async compaction considers a subset of pageblocks, only | ||
2028 | * defer if the failure was a sync compaction failure. | ||
2029 | */ | ||
2030 | if (sync_migration) | ||
2031 | defer_compaction(preferred_zone); | ||
2020 | 2032 | ||
2021 | cond_resched(); | 2033 | cond_resched(); |
2022 | } | 2034 | } |
@@ -2028,8 +2040,9 @@ static inline struct page * | |||
2028 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | 2040 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
2029 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2041 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2030 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2042 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2031 | int migratetype, unsigned long *did_some_progress, | 2043 | int migratetype, bool sync_migration, |
2032 | bool sync_migration) | 2044 | bool *deferred_compaction, |
2045 | unsigned long *did_some_progress) | ||
2033 | { | 2046 | { |
2034 | return NULL; | 2047 | return NULL; |
2035 | } | 2048 | } |
@@ -2179,6 +2192,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2179 | unsigned long pages_reclaimed = 0; | 2192 | unsigned long pages_reclaimed = 0; |
2180 | unsigned long did_some_progress; | 2193 | unsigned long did_some_progress; |
2181 | bool sync_migration = false; | 2194 | bool sync_migration = false; |
2195 | bool deferred_compaction = false; | ||
2182 | 2196 | ||
2183 | /* | 2197 | /* |
2184 | * In the slowpath, we sanity check order to avoid ever trying to | 2198 | * In the slowpath, we sanity check order to avoid ever trying to |
@@ -2259,12 +2273,22 @@ rebalance: | |||
2259 | zonelist, high_zoneidx, | 2273 | zonelist, high_zoneidx, |
2260 | nodemask, | 2274 | nodemask, |
2261 | alloc_flags, preferred_zone, | 2275 | alloc_flags, preferred_zone, |
2262 | migratetype, &did_some_progress, | 2276 | migratetype, sync_migration, |
2263 | sync_migration); | 2277 | &deferred_compaction, |
2278 | &did_some_progress); | ||
2264 | if (page) | 2279 | if (page) |
2265 | goto got_pg; | 2280 | goto got_pg; |
2266 | sync_migration = true; | 2281 | sync_migration = true; |
2267 | 2282 | ||
2283 | /* | ||
2284 | * If compaction is deferred for high-order allocations, it is because | ||
2285 | * sync compaction recently failed. In this is the case and the caller | ||
2286 | * has requested the system not be heavily disrupted, fail the | ||
2287 | * allocation now instead of entering direct reclaim | ||
2288 | */ | ||
2289 | if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) | ||
2290 | goto nopage; | ||
2291 | |||
2268 | /* Try direct reclaim and then allocating */ | 2292 | /* Try direct reclaim and then allocating */ |
2269 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 2293 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |
2270 | zonelist, high_zoneidx, | 2294 | zonelist, high_zoneidx, |
@@ -2328,8 +2352,9 @@ rebalance: | |||
2328 | zonelist, high_zoneidx, | 2352 | zonelist, high_zoneidx, |
2329 | nodemask, | 2353 | nodemask, |
2330 | alloc_flags, preferred_zone, | 2354 | alloc_flags, preferred_zone, |
2331 | migratetype, &did_some_progress, | 2355 | migratetype, sync_migration, |
2332 | sync_migration); | 2356 | &deferred_compaction, |
2357 | &did_some_progress); | ||
2333 | if (page) | 2358 | if (page) |
2334 | goto got_pg; | 2359 | goto got_pg; |
2335 | } | 2360 | } |
@@ -4237,7 +4262,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4237 | for (j = 0; j < MAX_NR_ZONES; j++) { | 4262 | for (j = 0; j < MAX_NR_ZONES; j++) { |
4238 | struct zone *zone = pgdat->node_zones + j; | 4263 | struct zone *zone = pgdat->node_zones + j; |
4239 | unsigned long size, realsize, memmap_pages; | 4264 | unsigned long size, realsize, memmap_pages; |
4240 | enum lru_list l; | 4265 | enum lru_list lru; |
4241 | 4266 | ||
4242 | size = zone_spanned_pages_in_node(nid, j, zones_size); | 4267 | size = zone_spanned_pages_in_node(nid, j, zones_size); |
4243 | realsize = size - zone_absent_pages_in_node(nid, j, | 4268 | realsize = size - zone_absent_pages_in_node(nid, j, |
@@ -4287,8 +4312,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4287 | zone->zone_pgdat = pgdat; | 4312 | zone->zone_pgdat = pgdat; |
4288 | 4313 | ||
4289 | zone_pcp_init(zone); | 4314 | zone_pcp_init(zone); |
4290 | for_each_lru(l) | 4315 | for_each_lru(lru) |
4291 | INIT_LIST_HEAD(&zone->lru[l].list); | 4316 | INIT_LIST_HEAD(&zone->lruvec.lists[lru]); |
4292 | zone->reclaim_stat.recent_rotated[0] = 0; | 4317 | zone->reclaim_stat.recent_rotated[0] = 0; |
4293 | zone->reclaim_stat.recent_rotated[1] = 0; | 4318 | zone->reclaim_stat.recent_rotated[1] = 0; |
4294 | zone->reclaim_stat.recent_scanned[0] = 0; | 4319 | zone->reclaim_stat.recent_scanned[0] = 0; |
@@ -4642,8 +4667,10 @@ static void check_for_regular_memory(pg_data_t *pgdat) | |||
4642 | 4667 | ||
4643 | for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { | 4668 | for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { |
4644 | struct zone *zone = &pgdat->node_zones[zone_type]; | 4669 | struct zone *zone = &pgdat->node_zones[zone_type]; |
4645 | if (zone->present_pages) | 4670 | if (zone->present_pages) { |
4646 | node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); | 4671 | node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); |
4672 | break; | ||
4673 | } | ||
4647 | } | 4674 | } |
4648 | #endif | 4675 | #endif |
4649 | } | 4676 | } |