diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-05-19 20:14:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 22:12:14 -0400 |
commit | 4fcb0971175f6037590d7b7772fe6619261d2165 (patch) | |
tree | 963444f36dc778cfb4768aec404d1eb923f0982d /mm/page_alloc.c | |
parent | 3777999dd47ec00ec34a151b1d93c0a2b721e822 (diff) |
mm, page_alloc: shorten the page allocator fast path
The page allocator fast path checks page multiple times unnecessarily.
This patch avoids all the slowpath checks if the first allocation
attempt succeeds.
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a3b7eb86f912..8380011d77db 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3398,31 +3398,26 @@ retry_cpuset: | |||
3398 | ac.nodemask, &ac.preferred_zone); | 3398 | ac.nodemask, &ac.preferred_zone); |
3399 | if (!ac.preferred_zone) { | 3399 | if (!ac.preferred_zone) { |
3400 | page = NULL; | 3400 | page = NULL; |
3401 | goto out; | 3401 | goto no_zone; |
3402 | } | 3402 | } |
3403 | 3403 | ||
3404 | ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); | 3404 | ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); |
3405 | 3405 | ||
3406 | /* First allocation attempt */ | 3406 | /* First allocation attempt */ |
3407 | page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); | 3407 | page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); |
3408 | if (unlikely(!page)) { | 3408 | if (likely(page)) |
3409 | /* | 3409 | goto out; |
3410 | * Runtime PM, block IO and its error handling path | ||
3411 | * can deadlock because I/O on the device might not | ||
3412 | * complete. | ||
3413 | */ | ||
3414 | alloc_mask = memalloc_noio_flags(gfp_mask); | ||
3415 | ac.spread_dirty_pages = false; | ||
3416 | |||
3417 | page = __alloc_pages_slowpath(alloc_mask, order, &ac); | ||
3418 | } | ||
3419 | 3410 | ||
3420 | if (kmemcheck_enabled && page) | 3411 | /* |
3421 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); | 3412 | * Runtime PM, block IO and its error handling path can deadlock |
3413 | * because I/O on the device might not complete. | ||
3414 | */ | ||
3415 | alloc_mask = memalloc_noio_flags(gfp_mask); | ||
3416 | ac.spread_dirty_pages = false; | ||
3422 | 3417 | ||
3423 | trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); | 3418 | page = __alloc_pages_slowpath(alloc_mask, order, &ac); |
3424 | 3419 | ||
3425 | out: | 3420 | no_zone: |
3426 | /* | 3421 | /* |
3427 | * When updating a task's mems_allowed, it is possible to race with | 3422 | * When updating a task's mems_allowed, it is possible to race with |
3428 | * parallel threads in such a way that an allocation can fail while | 3423 | * parallel threads in such a way that an allocation can fail while |
@@ -3434,6 +3429,12 @@ out: | |||
3434 | goto retry_cpuset; | 3429 | goto retry_cpuset; |
3435 | } | 3430 | } |
3436 | 3431 | ||
3432 | out: | ||
3433 | if (kmemcheck_enabled && page) | ||
3434 | kmemcheck_pagealloc_alloc(page, order, gfp_mask); | ||
3435 | |||
3436 | trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); | ||
3437 | |||
3437 | return page; | 3438 | return page; |
3438 | } | 3439 | } |
3439 | EXPORT_SYMBOL(__alloc_pages_nodemask); | 3440 | EXPORT_SYMBOL(__alloc_pages_nodemask); |