diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2015-02-11 18:25:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 20:06:02 -0500 |
commit | 1a6d53a105406d97396c87511afd6f09b4dc8ad2 (patch) | |
tree | cca48e0dfdb6ef300f34921287cabdbb5da2e4d9 /mm/compaction.c | |
parent | a9263751e11a07af40a98dba88021821cd430cfd (diff) |
mm: reduce try_to_compact_pages parameters
Expand the usage of the struct alloc_context introduced in the previous
patch also for calling try_to_compact_pages(), to reduce the number of its
parameters. Since the function is in different compilation unit, we need
to move alloc_context definition in the shared mm/internal.h header.
With this change we get simpler code and small savings of code size and stack
usage:
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-27 (-27)
function old new delta
__alloc_pages_direct_compact 283 256 -27
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-13 (-13)
function old new delta
try_to_compact_pages 582 569 -13
Stack usage of __alloc_pages_direct_compact goes from 24 to none (per
scripts/checkstack.pl).
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 546e571e9d60..9c7e6909dd29 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1335,22 +1335,20 @@ int sysctl_extfrag_threshold = 500; | |||
1335 | 1335 | ||
1336 | /** | 1336 | /** |
1337 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation | 1337 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation |
1338 | * @zonelist: The zonelist used for the current allocation | ||
1339 | * @order: The order of the current allocation | ||
1340 | * @gfp_mask: The GFP mask of the current allocation | 1338 | * @gfp_mask: The GFP mask of the current allocation |
1341 | * @nodemask: The allowed nodes to allocate from | 1339 | * @order: The order of the current allocation |
1340 | * @alloc_flags: The allocation flags of the current allocation | ||
1341 | * @ac: The context of current allocation | ||
1342 | * @mode: The migration mode for async, sync light, or sync migration | 1342 | * @mode: The migration mode for async, sync light, or sync migration |
1343 | * @contended: Return value that determines if compaction was aborted due to | 1343 | * @contended: Return value that determines if compaction was aborted due to |
1344 | * need_resched() or lock contention | 1344 | * need_resched() or lock contention |
1345 | * | 1345 | * |
1346 | * This is the main entry point for direct page compaction. | 1346 | * This is the main entry point for direct page compaction. |
1347 | */ | 1347 | */ |
1348 | unsigned long try_to_compact_pages(struct zonelist *zonelist, | 1348 | unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, |
1349 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 1349 | int alloc_flags, const struct alloc_context *ac, |
1350 | enum migrate_mode mode, int *contended, | 1350 | enum migrate_mode mode, int *contended) |
1351 | int alloc_flags, int classzone_idx) | ||
1352 | { | 1351 | { |
1353 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | ||
1354 | int may_enter_fs = gfp_mask & __GFP_FS; | 1352 | int may_enter_fs = gfp_mask & __GFP_FS; |
1355 | int may_perform_io = gfp_mask & __GFP_IO; | 1353 | int may_perform_io = gfp_mask & __GFP_IO; |
1356 | struct zoneref *z; | 1354 | struct zoneref *z; |
@@ -1365,8 +1363,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1365 | return COMPACT_SKIPPED; | 1363 | return COMPACT_SKIPPED; |
1366 | 1364 | ||
1367 | /* Compact each zone in the list */ | 1365 | /* Compact each zone in the list */ |
1368 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, | 1366 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
1369 | nodemask) { | 1367 | ac->nodemask) { |
1370 | int status; | 1368 | int status; |
1371 | int zone_contended; | 1369 | int zone_contended; |
1372 | 1370 | ||
@@ -1374,7 +1372,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1374 | continue; | 1372 | continue; |
1375 | 1373 | ||
1376 | status = compact_zone_order(zone, order, gfp_mask, mode, | 1374 | status = compact_zone_order(zone, order, gfp_mask, mode, |
1377 | &zone_contended, alloc_flags, classzone_idx); | 1375 | &zone_contended, alloc_flags, |
1376 | ac->classzone_idx); | ||
1378 | rc = max(status, rc); | 1377 | rc = max(status, rc); |
1379 | /* | 1378 | /* |
1380 | * It takes at least one zone that wasn't lock contended | 1379 | * It takes at least one zone that wasn't lock contended |
@@ -1384,7 +1383,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1384 | 1383 | ||
1385 | /* If a normal allocation would succeed, stop compacting */ | 1384 | /* If a normal allocation would succeed, stop compacting */ |
1386 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), | 1385 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), |
1387 | classzone_idx, alloc_flags)) { | 1386 | ac->classzone_idx, alloc_flags)) { |
1388 | /* | 1387 | /* |
1389 | * We think the allocation will succeed in this zone, | 1388 | * We think the allocation will succeed in this zone, |
1390 | * but it is not certain, hence the false. The caller | 1389 | * but it is not certain, hence the false. The caller |