diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 27 |
1 files changed, 22 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6a29ed8e6e6..38e5be65f24 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1513,6 +1513,7 @@ failed: | |||
1513 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | 1513 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
1514 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | 1514 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
1515 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | 1515 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
1516 | #define ALLOC_PFMEMALLOC 0x80 /* Caller has PF_MEMALLOC set */ | ||
1516 | 1517 | ||
1517 | #ifdef CONFIG_FAIL_PAGE_ALLOC | 1518 | #ifdef CONFIG_FAIL_PAGE_ALLOC |
1518 | 1519 | ||
@@ -2293,16 +2294,22 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
2293 | } else if (unlikely(rt_task(current)) && !in_interrupt()) | 2294 | } else if (unlikely(rt_task(current)) && !in_interrupt()) |
2294 | alloc_flags |= ALLOC_HARDER; | 2295 | alloc_flags |= ALLOC_HARDER; |
2295 | 2296 | ||
2296 | if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { | 2297 | if ((current->flags & PF_MEMALLOC) || |
2297 | if (!in_interrupt() && | 2298 | unlikely(test_thread_flag(TIF_MEMDIE))) { |
2298 | ((current->flags & PF_MEMALLOC) || | 2299 | alloc_flags |= ALLOC_PFMEMALLOC; |
2299 | unlikely(test_thread_flag(TIF_MEMDIE)))) | 2300 | |
2301 | if (likely(!(gfp_mask & __GFP_NOMEMALLOC)) && !in_interrupt()) | ||
2300 | alloc_flags |= ALLOC_NO_WATERMARKS; | 2302 | alloc_flags |= ALLOC_NO_WATERMARKS; |
2301 | } | 2303 | } |
2302 | 2304 | ||
2303 | return alloc_flags; | 2305 | return alloc_flags; |
2304 | } | 2306 | } |
2305 | 2307 | ||
2308 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) | ||
2309 | { | ||
2310 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_PFMEMALLOC); | ||
2311 | } | ||
2312 | |||
2306 | static inline struct page * | 2313 | static inline struct page * |
2307 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | 2314 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
2308 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2315 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
@@ -2490,10 +2497,18 @@ nopage: | |||
2490 | warn_alloc_failed(gfp_mask, order, NULL); | 2497 | warn_alloc_failed(gfp_mask, order, NULL); |
2491 | return page; | 2498 | return page; |
2492 | got_pg: | 2499 | got_pg: |
2500 | /* | ||
2501 | * page->pfmemalloc is set when the caller had PFMEMALLOC set or is | ||
2502 | * been OOM killed. The expectation is that the caller is taking | ||
2503 | * steps that will free more memory. The caller should avoid the | ||
2504 | * page being used for !PFMEMALLOC purposes. | ||
2505 | */ | ||
2506 | page->pfmemalloc = !!(alloc_flags & ALLOC_PFMEMALLOC); | ||
2507 | |||
2493 | if (kmemcheck_enabled) | 2508 | if (kmemcheck_enabled) |
2494 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); | 2509 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); |
2495 | return page; | ||
2496 | 2510 | ||
2511 | return page; | ||
2497 | } | 2512 | } |
2498 | 2513 | ||
2499 | /* | 2514 | /* |
@@ -2544,6 +2559,8 @@ retry_cpuset: | |||
2544 | page = __alloc_pages_slowpath(gfp_mask, order, | 2559 | page = __alloc_pages_slowpath(gfp_mask, order, |
2545 | zonelist, high_zoneidx, nodemask, | 2560 | zonelist, high_zoneidx, nodemask, |
2546 | preferred_zone, migratetype); | 2561 | preferred_zone, migratetype); |
2562 | else | ||
2563 | page->pfmemalloc = false; | ||
2547 | 2564 | ||
2548 | trace_mm_page_alloc(page, order, gfp_mask, migratetype); | 2565 | trace_mm_page_alloc(page, order, gfp_mask, migratetype); |
2549 | 2566 | ||