aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorZlatko Calusic <zlatko.calusic@iskon.hr>2013-02-22 19:32:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:10 -0500
commitdafcb73e385e39b9a7ebd5c4ecbc4ae921862eb9 (patch)
tree407a7710a7c4c6382bb9d415cd8de05430821600 /mm
parent7103f16dbff20fa969c9500902d980d17f953fa6 (diff)
mm: avoid calling pgdat_balanced() needlessly
Now that balance_pgdat() is slightly tidied up, thanks to more capable pgdat_balanced(), it's become obvious that pgdat_balanced() is called to check the status, then break the loop if pgdat is balanced, just to be immediately called again. The second call is completely unnecessary, of course. The patch introduces pgdat_is_balanced boolean, which helps resolve the above suboptimal behavior, with the added benefit of slightly better documenting one other place in the function where we jump and skip lots of code. Signed-off-by: Zlatko Calusic <zlatko.calusic@iskon.hr> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 463990941a78..4093b99044f6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2616,6 +2616,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2616static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2616static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2617 int *classzone_idx) 2617 int *classzone_idx)
2618{ 2618{
2619 bool pgdat_is_balanced = false;
2619 struct zone *unbalanced_zone; 2620 struct zone *unbalanced_zone;
2620 int i; 2621 int i;
2621 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2622 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
@@ -2690,8 +2691,11 @@ loop_again:
2690 zone_clear_flag(zone, ZONE_CONGESTED); 2691 zone_clear_flag(zone, ZONE_CONGESTED);
2691 } 2692 }
2692 } 2693 }
2693 if (i < 0) 2694
2695 if (i < 0) {
2696 pgdat_is_balanced = true;
2694 goto out; 2697 goto out;
2698 }
2695 2699
2696 for (i = 0; i <= end_zone; i++) { 2700 for (i = 0; i <= end_zone; i++) {
2697 struct zone *zone = pgdat->node_zones + i; 2701 struct zone *zone = pgdat->node_zones + i;
@@ -2818,8 +2822,11 @@ loop_again:
2818 pfmemalloc_watermark_ok(pgdat)) 2822 pfmemalloc_watermark_ok(pgdat))
2819 wake_up(&pgdat->pfmemalloc_wait); 2823 wake_up(&pgdat->pfmemalloc_wait);
2820 2824
2821 if (pgdat_balanced(pgdat, order, *classzone_idx)) 2825 if (pgdat_balanced(pgdat, order, *classzone_idx)) {
2826 pgdat_is_balanced = true;
2822 break; /* kswapd: all done */ 2827 break; /* kswapd: all done */
2828 }
2829
2823 /* 2830 /*
2824 * OK, kswapd is getting into trouble. Take a nap, then take 2831 * OK, kswapd is getting into trouble. Take a nap, then take
2825 * another pass across the zones. 2832 * another pass across the zones.
@@ -2840,9 +2847,9 @@ loop_again:
2840 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2847 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2841 break; 2848 break;
2842 } while (--sc.priority >= 0); 2849 } while (--sc.priority >= 0);
2843out:
2844 2850
2845 if (!pgdat_balanced(pgdat, order, *classzone_idx)) { 2851out:
2852 if (!pgdat_is_balanced) {
2846 cond_resched(); 2853 cond_resched();
2847 2854
2848 try_to_freeze(); 2855 try_to_freeze();