summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-07-28 18:49:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit31a6c1909f51dbe9bf08eb40dc64e3db90cf6f79 (patch)
tree7adc87cfa64bdd01e308be5dfc2e7b65f84a17db /mm/page_alloc.c
parent87cc271d5e4320d705cfdf59f68d4d037b3511b2 (diff)
mm, page_alloc: set alloc_flags only once in slowpath
In __alloc_pages_slowpath(), alloc_flags doesn't change after it's initialized, so move the initialization above the retry: label. Also make the comment above the initialization more descriptive. The only exception in the alloc_flags being constant is ALLOC_NO_WATERMARKS, which may change due to TIF_MEMDIE being set on the allocating thread. We can fix this, and make the code simpler and a bit more effective at the same time, by moving the part that determines ALLOC_NO_WATERMARKS from gfp_to_alloc_flags() to gfp_pfmemalloc_allowed(). This means we don't have to mask out ALLOC_NO_WATERMARKS in numerous places in __alloc_pages_slowpath() anymore. The only two tests for the flag can instead call gfp_pfmemalloc_allowed(). Link: http://lkml.kernel.org/r/20160721073614.24395-3-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c281125b2349..5703f7fca832 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3119,8 +3119,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3119 */ 3119 */
3120 count_vm_event(COMPACTSTALL); 3120 count_vm_event(COMPACTSTALL);
3121 3121
3122 page = get_page_from_freelist(gfp_mask, order, 3122 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3123 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
3124 3123
3125 if (page) { 3124 if (page) {
3126 struct zone *zone = page_zone(page); 3125 struct zone *zone = page_zone(page);
@@ -3288,8 +3287,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3288 return NULL; 3287 return NULL;
3289 3288
3290retry: 3289retry:
3291 page = get_page_from_freelist(gfp_mask, order, 3290 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3292 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
3293 3291
3294 /* 3292 /*
3295 * If an allocation failed after direct reclaim, it could be because 3293 * If an allocation failed after direct reclaim, it could be because
@@ -3351,16 +3349,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
3351 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3349 } else if (unlikely(rt_task(current)) && !in_interrupt())
3352 alloc_flags |= ALLOC_HARDER; 3350 alloc_flags |= ALLOC_HARDER;
3353 3351
3354 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
3355 if (gfp_mask & __GFP_MEMALLOC)
3356 alloc_flags |= ALLOC_NO_WATERMARKS;
3357 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3358 alloc_flags |= ALLOC_NO_WATERMARKS;
3359 else if (!in_interrupt() &&
3360 ((current->flags & PF_MEMALLOC) ||
3361 unlikely(test_thread_flag(TIF_MEMDIE))))
3362 alloc_flags |= ALLOC_NO_WATERMARKS;
3363 }
3364#ifdef CONFIG_CMA 3352#ifdef CONFIG_CMA
3365 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3353 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3366 alloc_flags |= ALLOC_CMA; 3354 alloc_flags |= ALLOC_CMA;
@@ -3370,7 +3358,19 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
3370 3358
3371bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3359bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3372{ 3360{
3373 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 3361 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3362 return false;
3363
3364 if (gfp_mask & __GFP_MEMALLOC)
3365 return true;
3366 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3367 return true;
3368 if (!in_interrupt() &&
3369 ((current->flags & PF_MEMALLOC) ||
3370 unlikely(test_thread_flag(TIF_MEMDIE))))
3371 return true;
3372
3373 return false;
3374} 3374}
3375 3375
3376static inline bool is_thp_gfp_mask(gfp_t gfp_mask) 3376static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
@@ -3503,36 +3503,36 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3503 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3503 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3504 gfp_mask &= ~__GFP_ATOMIC; 3504 gfp_mask &= ~__GFP_ATOMIC;
3505 3505
3506retry:
3507 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3508 wake_all_kswapds(order, ac);
3509
3510 /* 3506 /*
3511 * OK, we're below the kswapd watermark and have kicked background 3507 * The fast path uses conservative alloc_flags to succeed only until
3512 * reclaim. Now things get more complex, so set up alloc_flags according 3508 * kswapd needs to be woken up, and to avoid the cost of setting up
3513 * to how we want to proceed. 3509 * alloc_flags precisely. So we do that now.
3514 */ 3510 */
3515 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3511 alloc_flags = gfp_to_alloc_flags(gfp_mask);
3516 3512
3513retry:
3514 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3515 wake_all_kswapds(order, ac);
3516
3517 /* 3517 /*
3518 * Reset the zonelist iterators if memory policies can be ignored. 3518 * Reset the zonelist iterators if memory policies can be ignored.
3519 * These allocations are high priority and system rather than user 3519 * These allocations are high priority and system rather than user
3520 * orientated. 3520 * orientated.
3521 */ 3521 */
3522 if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) { 3522 if (!(alloc_flags & ALLOC_CPUSET) || gfp_pfmemalloc_allowed(gfp_mask)) {
3523 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); 3523 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3524 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3524 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3525 ac->high_zoneidx, ac->nodemask); 3525 ac->high_zoneidx, ac->nodemask);
3526 } 3526 }
3527 3527
3528 /* This is the last chance, in general, before the goto nopage. */ 3528 /* This is the last chance, in general, before the goto nopage. */
3529 page = get_page_from_freelist(gfp_mask, order, 3529 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3530 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
3531 if (page) 3530 if (page)
3532 goto got_pg; 3531 goto got_pg;
3533 3532
3534 /* Allocate without watermarks if the context allows */ 3533 /* Allocate without watermarks if the context allows */
3535 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3534 if (gfp_pfmemalloc_allowed(gfp_mask)) {
3535
3536 page = get_page_from_freelist(gfp_mask, order, 3536 page = get_page_from_freelist(gfp_mask, order,
3537 ALLOC_NO_WATERMARKS, ac); 3537 ALLOC_NO_WATERMARKS, ac);
3538 if (page) 3538 if (page)