diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 48 |
1 files changed, 27 insertions, 21 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index f9792ba3537c..1fc6736815e0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1086,9 +1086,9 @@ static int compact_finished(struct zone *zone, struct compact_control *cc, | |||
1086 | 1086 | ||
1087 | /* Compaction run is not finished if the watermark is not met */ | 1087 | /* Compaction run is not finished if the watermark is not met */ |
1088 | watermark = low_wmark_pages(zone); | 1088 | watermark = low_wmark_pages(zone); |
1089 | watermark += (1 << cc->order); | ||
1090 | 1089 | ||
1091 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) | 1090 | if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, |
1091 | cc->alloc_flags)) | ||
1092 | return COMPACT_CONTINUE; | 1092 | return COMPACT_CONTINUE; |
1093 | 1093 | ||
1094 | /* Direct compactor: Is a suitable page free? */ | 1094 | /* Direct compactor: Is a suitable page free? */ |
@@ -1114,7 +1114,8 @@ static int compact_finished(struct zone *zone, struct compact_control *cc, | |||
1114 | * COMPACT_PARTIAL - If the allocation would succeed without compaction | 1114 | * COMPACT_PARTIAL - If the allocation would succeed without compaction |
1115 | * COMPACT_CONTINUE - If compaction should run now | 1115 | * COMPACT_CONTINUE - If compaction should run now |
1116 | */ | 1116 | */ |
1117 | unsigned long compaction_suitable(struct zone *zone, int order) | 1117 | unsigned long compaction_suitable(struct zone *zone, int order, |
1118 | int alloc_flags, int classzone_idx) | ||
1118 | { | 1119 | { |
1119 | int fragindex; | 1120 | int fragindex; |
1120 | unsigned long watermark; | 1121 | unsigned long watermark; |
@@ -1126,21 +1127,30 @@ unsigned long compaction_suitable(struct zone *zone, int order) | |||
1126 | if (order == -1) | 1127 | if (order == -1) |
1127 | return COMPACT_CONTINUE; | 1128 | return COMPACT_CONTINUE; |
1128 | 1129 | ||
1130 | watermark = low_wmark_pages(zone); | ||
1131 | /* | ||
1132 | * If watermarks for high-order allocation are already met, there | ||
1133 | * should be no need for compaction at all. | ||
1134 | */ | ||
1135 | if (zone_watermark_ok(zone, order, watermark, classzone_idx, | ||
1136 | alloc_flags)) | ||
1137 | return COMPACT_PARTIAL; | ||
1138 | |||
1129 | /* | 1139 | /* |
1130 | * Watermarks for order-0 must be met for compaction. Note the 2UL. | 1140 | * Watermarks for order-0 must be met for compaction. Note the 2UL. |
1131 | * This is because during migration, copies of pages need to be | 1141 | * This is because during migration, copies of pages need to be |
1132 | * allocated and for a short time, the footprint is higher | 1142 | * allocated and for a short time, the footprint is higher |
1133 | */ | 1143 | */ |
1134 | watermark = low_wmark_pages(zone) + (2UL << order); | 1144 | watermark += (2UL << order); |
1135 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) | 1145 | if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) |
1136 | return COMPACT_SKIPPED; | 1146 | return COMPACT_SKIPPED; |
1137 | 1147 | ||
1138 | /* | 1148 | /* |
1139 | * fragmentation index determines if allocation failures are due to | 1149 | * fragmentation index determines if allocation failures are due to |
1140 | * low memory or external fragmentation | 1150 | * low memory or external fragmentation |
1141 | * | 1151 | * |
1142 | * index of -1000 implies allocations might succeed depending on | 1152 | * index of -1000 would imply allocations might succeed depending on |
1143 | * watermarks | 1153 | * watermarks, but we already failed the high-order watermark check |
1144 | * index towards 0 implies failure is due to lack of memory | 1154 | * index towards 0 implies failure is due to lack of memory |
1145 | * index towards 1000 implies failure is due to fragmentation | 1155 | * index towards 1000 implies failure is due to fragmentation |
1146 | * | 1156 | * |
@@ -1150,10 +1160,6 @@ unsigned long compaction_suitable(struct zone *zone, int order) | |||
1150 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) | 1160 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) |
1151 | return COMPACT_SKIPPED; | 1161 | return COMPACT_SKIPPED; |
1152 | 1162 | ||
1153 | if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, | ||
1154 | 0, 0)) | ||
1155 | return COMPACT_PARTIAL; | ||
1156 | |||
1157 | return COMPACT_CONTINUE; | 1163 | return COMPACT_CONTINUE; |
1158 | } | 1164 | } |
1159 | 1165 | ||
@@ -1165,7 +1171,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1165 | const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); | 1171 | const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
1166 | const bool sync = cc->mode != MIGRATE_ASYNC; | 1172 | const bool sync = cc->mode != MIGRATE_ASYNC; |
1167 | 1173 | ||
1168 | ret = compaction_suitable(zone, cc->order); | 1174 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, |
1175 | cc->classzone_idx); | ||
1169 | switch (ret) { | 1176 | switch (ret) { |
1170 | case COMPACT_PARTIAL: | 1177 | case COMPACT_PARTIAL: |
1171 | case COMPACT_SKIPPED: | 1178 | case COMPACT_SKIPPED: |
@@ -1254,7 +1261,8 @@ out: | |||
1254 | } | 1261 | } |
1255 | 1262 | ||
1256 | static unsigned long compact_zone_order(struct zone *zone, int order, | 1263 | static unsigned long compact_zone_order(struct zone *zone, int order, |
1257 | gfp_t gfp_mask, enum migrate_mode mode, int *contended) | 1264 | gfp_t gfp_mask, enum migrate_mode mode, int *contended, |
1265 | int alloc_flags, int classzone_idx) | ||
1258 | { | 1266 | { |
1259 | unsigned long ret; | 1267 | unsigned long ret; |
1260 | struct compact_control cc = { | 1268 | struct compact_control cc = { |
@@ -1264,6 +1272,8 @@ static unsigned long compact_zone_order(struct zone *zone, int order, | |||
1264 | .gfp_mask = gfp_mask, | 1272 | .gfp_mask = gfp_mask, |
1265 | .zone = zone, | 1273 | .zone = zone, |
1266 | .mode = mode, | 1274 | .mode = mode, |
1275 | .alloc_flags = alloc_flags, | ||
1276 | .classzone_idx = classzone_idx, | ||
1267 | }; | 1277 | }; |
1268 | INIT_LIST_HEAD(&cc.freepages); | 1278 | INIT_LIST_HEAD(&cc.freepages); |
1269 | INIT_LIST_HEAD(&cc.migratepages); | 1279 | INIT_LIST_HEAD(&cc.migratepages); |
@@ -1295,6 +1305,7 @@ int sysctl_extfrag_threshold = 500; | |||
1295 | unsigned long try_to_compact_pages(struct zonelist *zonelist, | 1305 | unsigned long try_to_compact_pages(struct zonelist *zonelist, |
1296 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 1306 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
1297 | enum migrate_mode mode, int *contended, | 1307 | enum migrate_mode mode, int *contended, |
1308 | int alloc_flags, int classzone_idx, | ||
1298 | struct zone **candidate_zone) | 1309 | struct zone **candidate_zone) |
1299 | { | 1310 | { |
1300 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | 1311 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
@@ -1303,7 +1314,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1303 | struct zoneref *z; | 1314 | struct zoneref *z; |
1304 | struct zone *zone; | 1315 | struct zone *zone; |
1305 | int rc = COMPACT_DEFERRED; | 1316 | int rc = COMPACT_DEFERRED; |
1306 | int alloc_flags = 0; | ||
1307 | int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ | 1317 | int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ |
1308 | 1318 | ||
1309 | *contended = COMPACT_CONTENDED_NONE; | 1319 | *contended = COMPACT_CONTENDED_NONE; |
@@ -1312,10 +1322,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1312 | if (!order || !may_enter_fs || !may_perform_io) | 1322 | if (!order || !may_enter_fs || !may_perform_io) |
1313 | return COMPACT_SKIPPED; | 1323 | return COMPACT_SKIPPED; |
1314 | 1324 | ||
1315 | #ifdef CONFIG_CMA | ||
1316 | if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | ||
1317 | alloc_flags |= ALLOC_CMA; | ||
1318 | #endif | ||
1319 | /* Compact each zone in the list */ | 1325 | /* Compact each zone in the list */ |
1320 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, | 1326 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, |
1321 | nodemask) { | 1327 | nodemask) { |
@@ -1326,7 +1332,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1326 | continue; | 1332 | continue; |
1327 | 1333 | ||
1328 | status = compact_zone_order(zone, order, gfp_mask, mode, | 1334 | status = compact_zone_order(zone, order, gfp_mask, mode, |
1329 | &zone_contended); | 1335 | &zone_contended, alloc_flags, classzone_idx); |
1330 | rc = max(status, rc); | 1336 | rc = max(status, rc); |
1331 | /* | 1337 | /* |
1332 | * It takes at least one zone that wasn't lock contended | 1338 | * It takes at least one zone that wasn't lock contended |
@@ -1335,8 +1341,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1335 | all_zones_contended &= zone_contended; | 1341 | all_zones_contended &= zone_contended; |
1336 | 1342 | ||
1337 | /* If a normal allocation would succeed, stop compacting */ | 1343 | /* If a normal allocation would succeed, stop compacting */ |
1338 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, | 1344 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), |
1339 | alloc_flags)) { | 1345 | classzone_idx, alloc_flags)) { |
1340 | *candidate_zone = zone; | 1346 | *candidate_zone = zone; |
1341 | /* | 1347 | /* |
1342 | * We think the allocation will succeed in this zone, | 1348 | * We think the allocation will succeed in this zone, |