aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1eea4fa0d410..5f378dd58802 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -380,7 +380,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
380 struct zone *z = 380 struct zone *z =
381 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; 381 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
382 382
383 x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); 383 x += zone_page_state(z, NR_FREE_PAGES) +
384 zone_reclaimable_pages(z);
384 } 385 }
385 /* 386 /*
386 * Make sure that the number of highmem pages is never larger 387 * Make sure that the number of highmem pages is never larger
@@ -404,7 +405,7 @@ unsigned long determine_dirtyable_memory(void)
404{ 405{
405 unsigned long x; 406 unsigned long x;
406 407
407 x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); 408 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
408 409
409 if (!vm_highmem_is_dirtyable) 410 if (!vm_highmem_is_dirtyable)
410 x -= highmem_dirtyable_memory(x); 411 x -= highmem_dirtyable_memory(x);
@@ -485,6 +486,7 @@ static void balance_dirty_pages(struct address_space *mapping)
485 unsigned long bdi_thresh; 486 unsigned long bdi_thresh;
486 unsigned long pages_written = 0; 487 unsigned long pages_written = 0;
487 unsigned long write_chunk = sync_writeback_pages(); 488 unsigned long write_chunk = sync_writeback_pages();
489 unsigned long pause = 1;
488 490
489 struct backing_dev_info *bdi = mapping->backing_dev_info; 491 struct backing_dev_info *bdi = mapping->backing_dev_info;
490 492
@@ -561,7 +563,15 @@ static void balance_dirty_pages(struct address_space *mapping)
561 if (pages_written >= write_chunk) 563 if (pages_written >= write_chunk)
562 break; /* We've done our duty */ 564 break; /* We've done our duty */
563 565
564 schedule_timeout(1); 566 schedule_timeout_interruptible(pause);
567
568 /*
569 * Increase the delay for each loop, up to our previous
570 * default of taking a 100ms nap.
571 */
572 pause <<= 1;
573 if (pause > HZ / 10)
574 pause = HZ / 10;
565 } 575 }
566 576
567 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && 577 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&