diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-03-22 19:30:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:00 -0400 |
commit | d527caf22e48480b102c7c6ee5b9ba12170148f7 (patch) | |
tree | 7d53a2c430f8c020b6fa8390396dd2d1ce480b9a /mm | |
parent | 89699605fe7cfd8611900346f61cb6cbf179b10a (diff) |
mm: compaction: prevent kswapd compacting memory to reduce CPU usage
This patch reverts 5a03b051 ("thp: use compaction in kswapd for GFP_ATOMIC
order > 0") due to reports stating that kswapd CPU usage was higher and
IRQs were being disabled more frequently. This was reported at
http://www.spinics.net/linux/fedora/alsa-user/msg09885.html.
Without this patch applied, CPU usage by kswapd hovers around the 20% mark
according to the tester (Arthur Marsh:
http://www.spinics.net/linux/fedora/alsa-user/msg09899.html). With this
patch applied, it's around 2%.
The problem is not related to THP which specifies __GFP_NO_KSWAPD but is
triggered by high-order allocations hitting the low watermark for their
order and waking kswapd on kernels with CONFIG_COMPACTION set. The most
common trigger for this is network cards configured for jumbo frames but
it's also possible it'll be triggered by fork-heavy workloads (order-1)
and some wireless cards which depend on order-1 allocations.
The symptoms for the user will be high CPU usage by kswapd in low-memory
situations which could be confused with another writeback problem. While
a patch like 5a03b051 may be reintroduced in the future, this patch plays
it safe for now and reverts it.
[mel@csn.ul.ie: Beefed up the changelog]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reported-by: Arthur Marsh <arthur.marsh@internode.on.net>
Tested-by: Arthur Marsh <arthur.marsh@internode.on.net>
Cc: <stable@kernel.org> [2.6.38.1]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 24 | ||||
-rw-r--r-- | mm/vmscan.c | 18 |
2 files changed, 4 insertions, 38 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 8be430b812de..dcb058bd76c4 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -42,8 +42,6 @@ struct compact_control { | |||
42 | unsigned int order; /* order a direct compactor needs */ | 42 | unsigned int order; /* order a direct compactor needs */ |
43 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | 43 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
44 | struct zone *zone; | 44 | struct zone *zone; |
45 | |||
46 | int compact_mode; | ||
47 | }; | 45 | }; |
48 | 46 | ||
49 | static unsigned long release_freepages(struct list_head *freelist) | 47 | static unsigned long release_freepages(struct list_head *freelist) |
@@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone, | |||
397 | return COMPACT_COMPLETE; | 395 | return COMPACT_COMPLETE; |
398 | 396 | ||
399 | /* Compaction run is not finished if the watermark is not met */ | 397 | /* Compaction run is not finished if the watermark is not met */ |
400 | if (cc->compact_mode != COMPACT_MODE_KSWAPD) | 398 | watermark = low_wmark_pages(zone); |
401 | watermark = low_wmark_pages(zone); | ||
402 | else | ||
403 | watermark = high_wmark_pages(zone); | ||
404 | watermark += (1 << cc->order); | 399 | watermark += (1 << cc->order); |
405 | 400 | ||
406 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) | 401 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) |
@@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone, | |||
413 | if (cc->order == -1) | 408 | if (cc->order == -1) |
414 | return COMPACT_CONTINUE; | 409 | return COMPACT_CONTINUE; |
415 | 410 | ||
416 | /* | ||
417 | * Generating only one page of the right order is not enough | ||
418 | * for kswapd, we must continue until we're above the high | ||
419 | * watermark as a pool for high order GFP_ATOMIC allocations | ||
420 | * too. | ||
421 | */ | ||
422 | if (cc->compact_mode == COMPACT_MODE_KSWAPD) | ||
423 | return COMPACT_CONTINUE; | ||
424 | |||
425 | /* Direct compactor: Is a suitable page free? */ | 411 | /* Direct compactor: Is a suitable page free? */ |
426 | for (order = cc->order; order < MAX_ORDER; order++) { | 412 | for (order = cc->order; order < MAX_ORDER; order++) { |
427 | /* Job done if page is free of the right migratetype */ | 413 | /* Job done if page is free of the right migratetype */ |
@@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
543 | 529 | ||
544 | unsigned long compact_zone_order(struct zone *zone, | 530 | unsigned long compact_zone_order(struct zone *zone, |
545 | int order, gfp_t gfp_mask, | 531 | int order, gfp_t gfp_mask, |
546 | bool sync, | 532 | bool sync) |
547 | int compact_mode) | ||
548 | { | 533 | { |
549 | struct compact_control cc = { | 534 | struct compact_control cc = { |
550 | .nr_freepages = 0, | 535 | .nr_freepages = 0, |
@@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone, | |||
553 | .migratetype = allocflags_to_migratetype(gfp_mask), | 538 | .migratetype = allocflags_to_migratetype(gfp_mask), |
554 | .zone = zone, | 539 | .zone = zone, |
555 | .sync = sync, | 540 | .sync = sync, |
556 | .compact_mode = compact_mode, | ||
557 | }; | 541 | }; |
558 | INIT_LIST_HEAD(&cc.freepages); | 542 | INIT_LIST_HEAD(&cc.freepages); |
559 | INIT_LIST_HEAD(&cc.migratepages); | 543 | INIT_LIST_HEAD(&cc.migratepages); |
@@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
599 | nodemask) { | 583 | nodemask) { |
600 | int status; | 584 | int status; |
601 | 585 | ||
602 | status = compact_zone_order(zone, order, gfp_mask, sync, | 586 | status = compact_zone_order(zone, order, gfp_mask, sync); |
603 | COMPACT_MODE_DIRECT_RECLAIM); | ||
604 | rc = max(status, rc); | 587 | rc = max(status, rc); |
605 | 588 | ||
606 | /* If a normal allocation would succeed, stop compacting */ | 589 | /* If a normal allocation would succeed, stop compacting */ |
@@ -631,7 +614,6 @@ static int compact_node(int nid) | |||
631 | .nr_freepages = 0, | 614 | .nr_freepages = 0, |
632 | .nr_migratepages = 0, | 615 | .nr_migratepages = 0, |
633 | .order = -1, | 616 | .order = -1, |
634 | .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, | ||
635 | }; | 617 | }; |
636 | 618 | ||
637 | zone = &pgdat->node_zones[zoneid]; | 619 | zone = &pgdat->node_zones[zoneid]; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 6771ea70bfe7..3b4a41d72489 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2397,7 +2397,6 @@ loop_again: | |||
2397 | * cause too much scanning of the lower zones. | 2397 | * cause too much scanning of the lower zones. |
2398 | */ | 2398 | */ |
2399 | for (i = 0; i <= end_zone; i++) { | 2399 | for (i = 0; i <= end_zone; i++) { |
2400 | int compaction; | ||
2401 | struct zone *zone = pgdat->node_zones + i; | 2400 | struct zone *zone = pgdat->node_zones + i; |
2402 | int nr_slab; | 2401 | int nr_slab; |
2403 | 2402 | ||
@@ -2428,24 +2427,9 @@ loop_again: | |||
2428 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; | 2427 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; |
2429 | total_scanned += sc.nr_scanned; | 2428 | total_scanned += sc.nr_scanned; |
2430 | 2429 | ||
2431 | compaction = 0; | ||
2432 | if (order && | ||
2433 | zone_watermark_ok(zone, 0, | ||
2434 | high_wmark_pages(zone), | ||
2435 | end_zone, 0) && | ||
2436 | !zone_watermark_ok(zone, order, | ||
2437 | high_wmark_pages(zone), | ||
2438 | end_zone, 0)) { | ||
2439 | compact_zone_order(zone, | ||
2440 | order, | ||
2441 | sc.gfp_mask, false, | ||
2442 | COMPACT_MODE_KSWAPD); | ||
2443 | compaction = 1; | ||
2444 | } | ||
2445 | |||
2446 | if (zone->all_unreclaimable) | 2430 | if (zone->all_unreclaimable) |
2447 | continue; | 2431 | continue; |
2448 | if (!compaction && nr_slab == 0 && | 2432 | if (nr_slab == 0 && |
2449 | !zone_reclaimable(zone)) | 2433 | !zone_reclaimable(zone)) |
2450 | zone->all_unreclaimable = 1; | 2434 | zone->all_unreclaimable = 1; |
2451 | /* | 2435 | /* |