aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c59
1 files changed, 35 insertions, 24 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4f49535d4cd3..5ed24b94c5e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2326,7 +2326,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2326 return true; 2326 return true;
2327 2327
2328 /* Check the watermark levels */ 2328 /* Check the watermark levels */
2329 for (i = 0; i < pgdat->nr_zones; i++) { 2329 for (i = 0; i <= classzone_idx; i++) {
2330 struct zone *zone = pgdat->node_zones + i; 2330 struct zone *zone = pgdat->node_zones + i;
2331 2331
2332 if (!populated_zone(zone)) 2332 if (!populated_zone(zone))
@@ -2344,7 +2344,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2344 } 2344 }
2345 2345
2346 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2346 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2347 classzone_idx, 0)) 2347 i, 0))
2348 all_zones_ok = false; 2348 all_zones_ok = false;
2349 else 2349 else
2350 balanced += zone->present_pages; 2350 balanced += zone->present_pages;
@@ -2451,7 +2451,6 @@ loop_again:
2451 if (!zone_watermark_ok_safe(zone, order, 2451 if (!zone_watermark_ok_safe(zone, order,
2452 high_wmark_pages(zone), 0, 0)) { 2452 high_wmark_pages(zone), 0, 0)) {
2453 end_zone = i; 2453 end_zone = i;
2454 *classzone_idx = i;
2455 break; 2454 break;
2456 } 2455 }
2457 } 2456 }
@@ -2510,18 +2509,18 @@ loop_again:
2510 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2509 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2511 if (!zone_watermark_ok_safe(zone, order, 2510 if (!zone_watermark_ok_safe(zone, order,
2512 high_wmark_pages(zone) + balance_gap, 2511 high_wmark_pages(zone) + balance_gap,
2513 end_zone, 0)) 2512 end_zone, 0)) {
2514 shrink_zone(priority, zone, &sc); 2513 shrink_zone(priority, zone, &sc);
2515 reclaim_state->reclaimed_slab = 0;
2516 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2517 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2518 total_scanned += sc.nr_scanned;
2519 2514
2520 if (zone->all_unreclaimable) 2515 reclaim_state->reclaimed_slab = 0;
2521 continue; 2516 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2522 if (nr_slab == 0 && 2517 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2523 !zone_reclaimable(zone)) 2518 total_scanned += sc.nr_scanned;
2524 zone->all_unreclaimable = 1; 2519
2520 if (nr_slab == 0 && !zone_reclaimable(zone))
2521 zone->all_unreclaimable = 1;
2522 }
2523
2525 /* 2524 /*
2526 * If we've done a decent amount of scanning and 2525 * If we've done a decent amount of scanning and
2527 * the reclaim ratio is low, start doing writepage 2526 * the reclaim ratio is low, start doing writepage
@@ -2531,6 +2530,12 @@ loop_again:
2531 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2530 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2532 sc.may_writepage = 1; 2531 sc.may_writepage = 1;
2533 2532
2533 if (zone->all_unreclaimable) {
2534 if (end_zone && end_zone == i)
2535 end_zone--;
2536 continue;
2537 }
2538
2534 if (!zone_watermark_ok_safe(zone, order, 2539 if (!zone_watermark_ok_safe(zone, order,
2535 high_wmark_pages(zone), end_zone, 0)) { 2540 high_wmark_pages(zone), end_zone, 0)) {
2536 all_zones_ok = 0; 2541 all_zones_ok = 0;
@@ -2709,8 +2714,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2709 */ 2714 */
2710static int kswapd(void *p) 2715static int kswapd(void *p)
2711{ 2716{
2712 unsigned long order; 2717 unsigned long order, new_order;
2713 int classzone_idx; 2718 int classzone_idx, new_classzone_idx;
2714 pg_data_t *pgdat = (pg_data_t*)p; 2719 pg_data_t *pgdat = (pg_data_t*)p;
2715 struct task_struct *tsk = current; 2720 struct task_struct *tsk = current;
2716 2721
@@ -2740,17 +2745,23 @@ static int kswapd(void *p)
2740 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2745 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2741 set_freezable(); 2746 set_freezable();
2742 2747
2743 order = 0; 2748 order = new_order = 0;
2744 classzone_idx = MAX_NR_ZONES - 1; 2749 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
2745 for ( ; ; ) { 2750 for ( ; ; ) {
2746 unsigned long new_order;
2747 int new_classzone_idx;
2748 int ret; 2751 int ret;
2749 2752
2750 new_order = pgdat->kswapd_max_order; 2753 /*
2751 new_classzone_idx = pgdat->classzone_idx; 2754 * If the last balance_pgdat was unsuccessful it's unlikely a
2752 pgdat->kswapd_max_order = 0; 2755 * new request of a similar or harder type will succeed soon
2753 pgdat->classzone_idx = MAX_NR_ZONES - 1; 2756 * so consider going to sleep on the basis we reclaimed at
2757 */
2758 if (classzone_idx >= new_classzone_idx && order == new_order) {
2759 new_order = pgdat->kswapd_max_order;
2760 new_classzone_idx = pgdat->classzone_idx;
2761 pgdat->kswapd_max_order = 0;
2762 pgdat->classzone_idx = pgdat->nr_zones - 1;
2763 }
2764
2754 if (order < new_order || classzone_idx > new_classzone_idx) { 2765 if (order < new_order || classzone_idx > new_classzone_idx) {
2755 /* 2766 /*
2756 * Don't sleep if someone wants a larger 'order' 2767 * Don't sleep if someone wants a larger 'order'
@@ -2763,7 +2774,7 @@ static int kswapd(void *p)
2763 order = pgdat->kswapd_max_order; 2774 order = pgdat->kswapd_max_order;
2764 classzone_idx = pgdat->classzone_idx; 2775 classzone_idx = pgdat->classzone_idx;
2765 pgdat->kswapd_max_order = 0; 2776 pgdat->kswapd_max_order = 0;
2766 pgdat->classzone_idx = MAX_NR_ZONES - 1; 2777 pgdat->classzone_idx = pgdat->nr_zones - 1;
2767 } 2778 }
2768 2779
2769 ret = try_to_freeze(); 2780 ret = try_to_freeze();