diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-03-22 19:30:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:00 -0400 |
commit | d527caf22e48480b102c7c6ee5b9ba12170148f7 (patch) | |
tree | 7d53a2c430f8c020b6fa8390396dd2d1ce480b9a /mm/vmscan.c | |
parent | 89699605fe7cfd8611900346f61cb6cbf179b10a (diff) |
mm: compaction: prevent kswapd compacting memory to reduce CPU usage
This patch reverts 5a03b051 ("thp: use compaction in kswapd for GFP_ATOMIC
order > 0") due to reports stating that kswapd CPU usage was higher and
IRQs were being disabled more frequently. This was reported at
http://www.spinics.net/linux/fedora/alsa-user/msg09885.html.
Without this patch applied, CPU usage by kswapd hovers around the 20% mark
according to the tester (Arthur Marsh:
http://www.spinics.net/linux/fedora/alsa-user/msg09899.html). With this
patch applied, it's around 2%.
The problem is not related to THP which specifies __GFP_NO_KSWAPD but is
triggered by high-order allocations hitting the low watermark for their
order and waking kswapd on kernels with CONFIG_COMPACTION set. The most
common trigger for this is network cards configured for jumbo frames but
it's also possible it'll be triggered by fork-heavy workloads (order-1)
and some wireless cards which depend on order-1 allocations.
The symptoms for the user will be high CPU usage by kswapd in low-memory
situations which could be confused with another writeback problem. While
a patch like 5a03b051 may be reintroduced in the future, this patch plays
it safe for now and reverts it.
[mel@csn.ul.ie: Beefed up the changelog]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reported-by: Arthur Marsh <arthur.marsh@internode.on.net>
Tested-by: Arthur Marsh <arthur.marsh@internode.on.net>
Cc: <stable@kernel.org> [2.6.38.1]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 18 |
1 files changed, 1 insertions, 17 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 6771ea70bfe7..3b4a41d72489 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2397,7 +2397,6 @@ loop_again: | |||
2397 | * cause too much scanning of the lower zones. | 2397 | * cause too much scanning of the lower zones. |
2398 | */ | 2398 | */ |
2399 | for (i = 0; i <= end_zone; i++) { | 2399 | for (i = 0; i <= end_zone; i++) { |
2400 | int compaction; | ||
2401 | struct zone *zone = pgdat->node_zones + i; | 2400 | struct zone *zone = pgdat->node_zones + i; |
2402 | int nr_slab; | 2401 | int nr_slab; |
2403 | 2402 | ||
@@ -2428,24 +2427,9 @@ loop_again: | |||
2428 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; | 2427 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; |
2429 | total_scanned += sc.nr_scanned; | 2428 | total_scanned += sc.nr_scanned; |
2430 | 2429 | ||
2431 | compaction = 0; | ||
2432 | if (order && | ||
2433 | zone_watermark_ok(zone, 0, | ||
2434 | high_wmark_pages(zone), | ||
2435 | end_zone, 0) && | ||
2436 | !zone_watermark_ok(zone, order, | ||
2437 | high_wmark_pages(zone), | ||
2438 | end_zone, 0)) { | ||
2439 | compact_zone_order(zone, | ||
2440 | order, | ||
2441 | sc.gfp_mask, false, | ||
2442 | COMPACT_MODE_KSWAPD); | ||
2443 | compaction = 1; | ||
2444 | } | ||
2445 | |||
2446 | if (zone->all_unreclaimable) | 2430 | if (zone->all_unreclaimable) |
2447 | continue; | 2431 | continue; |
2448 | if (!compaction && nr_slab == 0 && | 2432 | if (nr_slab == 0 && |
2449 | !zone_reclaimable(zone)) | 2433 | !zone_reclaimable(zone)) |
2450 | zone->all_unreclaimable = 1; | 2434 | zone->all_unreclaimable = 1; |
2451 | /* | 2435 | /* |