diff options
author | Zlatko Calusic <zlatko.calusic@iskon.hr> | 2013-02-22 19:34:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:15 -0500 |
commit | 258401a60c4df39332f30ef57afbc6dbf29a7e84 (patch) | |
tree | a397beebf23182f77ca6e688681d79eb3181308a /mm | |
parent | 4db0e950c5b78586bea9e1b027be849631f89a17 (diff) |
mm: don't wait on congested zones in balance_pgdat()
From: Zlatko Calusic <zlatko.calusic@iskon.hr>
Commit 92df3a723f84 ("mm: vmscan: throttle reclaim if encountering too
many dirty pages under writeback") introduced waiting on congested zones
based on a sane algorithm in shrink_inactive_list().
What this means is that there's no more need for throttling and
additional heuristics in balance_pgdat(). So, let's remove it and tidy
up the code.
Signed-off-by: Zlatko Calusic <zlatko.calusic@iskon.hr>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 29 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
2 files changed, 1 insertions, 29 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8fde2fc223d9..b93968b71dc6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2617,7 +2617,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | |||
2617 | int *classzone_idx) | 2617 | int *classzone_idx) |
2618 | { | 2618 | { |
2619 | bool pgdat_is_balanced = false; | 2619 | bool pgdat_is_balanced = false; |
2620 | struct zone *unbalanced_zone; | ||
2621 | int i; | 2620 | int i; |
2622 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ | 2621 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ |
2623 | unsigned long total_scanned; | 2622 | unsigned long total_scanned; |
@@ -2648,9 +2647,6 @@ loop_again: | |||
2648 | 2647 | ||
2649 | do { | 2648 | do { |
2650 | unsigned long lru_pages = 0; | 2649 | unsigned long lru_pages = 0; |
2651 | int has_under_min_watermark_zone = 0; | ||
2652 | |||
2653 | unbalanced_zone = NULL; | ||
2654 | 2650 | ||
2655 | /* | 2651 | /* |
2656 | * Scan in the highmem->dma direction for the highest | 2652 | * Scan in the highmem->dma direction for the highest |
@@ -2790,17 +2786,7 @@ loop_again: | |||
2790 | continue; | 2786 | continue; |
2791 | } | 2787 | } |
2792 | 2788 | ||
2793 | if (!zone_balanced(zone, testorder, 0, end_zone)) { | 2789 | if (zone_balanced(zone, testorder, 0, end_zone)) |
2794 | unbalanced_zone = zone; | ||
2795 | /* | ||
2796 | * We are still under min water mark. This | ||
2797 | * means that we have a GFP_ATOMIC allocation | ||
2798 | * failure risk. Hurry up! | ||
2799 | */ | ||
2800 | if (!zone_watermark_ok_safe(zone, order, | ||
2801 | min_wmark_pages(zone), end_zone, 0)) | ||
2802 | has_under_min_watermark_zone = 1; | ||
2803 | } else { | ||
2804 | /* | 2790 | /* |
2805 | * If a zone reaches its high watermark, | 2791 | * If a zone reaches its high watermark, |
2806 | * consider it to be no longer congested. It's | 2792 | * consider it to be no longer congested. It's |
@@ -2809,8 +2795,6 @@ loop_again: | |||
2809 | * speculatively avoid congestion waits | 2795 | * speculatively avoid congestion waits |
2810 | */ | 2796 | */ |
2811 | zone_clear_flag(zone, ZONE_CONGESTED); | 2797 | zone_clear_flag(zone, ZONE_CONGESTED); |
2812 | } | ||
2813 | |||
2814 | } | 2798 | } |
2815 | 2799 | ||
2816 | /* | 2800 | /* |
@@ -2828,17 +2812,6 @@ loop_again: | |||
2828 | } | 2812 | } |
2829 | 2813 | ||
2830 | /* | 2814 | /* |
2831 | * OK, kswapd is getting into trouble. Take a nap, then take | ||
2832 | * another pass across the zones. | ||
2833 | */ | ||
2834 | if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) { | ||
2835 | if (has_under_min_watermark_zone) | ||
2836 | count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); | ||
2837 | else if (unbalanced_zone) | ||
2838 | wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10); | ||
2839 | } | ||
2840 | |||
2841 | /* | ||
2842 | * We do this so kswapd doesn't build up large priorities for | 2815 | * We do this so kswapd doesn't build up large priorities for |
2843 | * example when it is freeing in parallel with allocators. It | 2816 | * example when it is freeing in parallel with allocators. It |
2844 | * matches the direct reclaim path behaviour in terms of impact | 2817 | * matches the direct reclaim path behaviour in terms of impact |
diff --git a/mm/vmstat.c b/mm/vmstat.c index c9d1f68120cd..57f02fd1768b 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -770,7 +770,6 @@ const char * const vmstat_text[] = { | |||
770 | "kswapd_inodesteal", | 770 | "kswapd_inodesteal", |
771 | "kswapd_low_wmark_hit_quickly", | 771 | "kswapd_low_wmark_hit_quickly", |
772 | "kswapd_high_wmark_hit_quickly", | 772 | "kswapd_high_wmark_hit_quickly", |
773 | "kswapd_skip_congestion_wait", | ||
774 | "pageoutrun", | 773 | "pageoutrun", |
775 | "allocstall", | 774 | "allocstall", |
776 | 775 | ||