aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/compaction.h
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-03-22 19:30:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 20:44:00 -0400
commitd527caf22e48480b102c7c6ee5b9ba12170148f7 (patch)
tree7d53a2c430f8c020b6fa8390396dd2d1ce480b9a /include/linux/compaction.h
parent89699605fe7cfd8611900346f61cb6cbf179b10a (diff)
mm: compaction: prevent kswapd compacting memory to reduce CPU usage
This patch reverts 5a03b051 ("thp: use compaction in kswapd for GFP_ATOMIC order > 0") due to reports stating that kswapd CPU usage was higher and IRQs were being disabled more frequently. This was reported at http://www.spinics.net/linux/fedora/alsa-user/msg09885.html. Without this patch applied, CPU usage by kswapd hovers around the 20% mark according to the tester (Arthur Marsh: http://www.spinics.net/linux/fedora/alsa-user/msg09899.html). With this patch applied, it's around 2%. The problem is not related to THP which specifies __GFP_NO_KSWAPD but is triggered by high-order allocations hitting the low watermark for their order and waking kswapd on kernels with CONFIG_COMPACTION set. The most common trigger for this is network cards configured for jumbo frames but it's also possible it'll be triggered by fork-heavy workloads (order-1) and some wireless cards which depend on order-1 allocations. The symptoms for the user will be high CPU usage by kswapd in low-memory situations which could be confused with another writeback problem. While a patch like 5a03b051 may be reintroduced in the future, this patch plays it safe for now and reverts it. [mel@csn.ul.ie: Beefed up the changelog] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reported-by: Arthur Marsh <arthur.marsh@internode.on.net> Tested-by: Arthur Marsh <arthur.marsh@internode.on.net> Cc: <stable@kernel.org> [2.6.38.1] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/compaction.h')
-rw-r--r--include/linux/compaction.h9
1 files changed, 2 insertions, 7 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index dfa2ed4c0d26..cc9f7a428649 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -11,9 +11,6 @@
11/* The full zone was compacted */ 11/* The full zone was compacted */
12#define COMPACT_COMPLETE 3 12#define COMPACT_COMPLETE 3
13 13
14#define COMPACT_MODE_DIRECT_RECLAIM 0
15#define COMPACT_MODE_KSWAPD 1
16
17#ifdef CONFIG_COMPACTION 14#ifdef CONFIG_COMPACTION
18extern int sysctl_compact_memory; 15extern int sysctl_compact_memory;
19extern int sysctl_compaction_handler(struct ctl_table *table, int write, 16extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
28 bool sync); 25 bool sync);
29extern unsigned long compaction_suitable(struct zone *zone, int order); 26extern unsigned long compaction_suitable(struct zone *zone, int order);
30extern unsigned long compact_zone_order(struct zone *zone, int order, 27extern unsigned long compact_zone_order(struct zone *zone, int order,
31 gfp_t gfp_mask, bool sync, 28 gfp_t gfp_mask, bool sync);
32 int compact_mode);
33 29
34/* Do not skip compaction more than 64 times */ 30/* Do not skip compaction more than 64 times */
35#define COMPACT_MAX_DEFER_SHIFT 6 31#define COMPACT_MAX_DEFER_SHIFT 6
@@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
74} 70}
75 71
76static inline unsigned long compact_zone_order(struct zone *zone, int order, 72static inline unsigned long compact_zone_order(struct zone *zone, int order,
77 gfp_t gfp_mask, bool sync, 73 gfp_t gfp_mask, bool sync)
78 int compact_mode)
79{ 74{
80 return COMPACT_CONTINUE; 75 return COMPACT_CONTINUE;
81} 76}