diff options
Diffstat (limited to 'mm/page-writeback.c')
| -rw-r--r-- | mm/page-writeback.c | 33 | 
1 files changed, 19 insertions, 14 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d99664e8607e..2c5d79236ead 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c  | |||
| @@ -44,18 +44,21 @@ static long ratelimit_pages = 32; | |||
| 44 | /* | 44 | /* | 
| 45 | * When balance_dirty_pages decides that the caller needs to perform some | 45 | * When balance_dirty_pages decides that the caller needs to perform some | 
| 46 | * non-background writeback, this is how many pages it will attempt to write. | 46 | * non-background writeback, this is how many pages it will attempt to write. | 
| 47 | * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | 47 | * It should be somewhat larger than dirtied pages to ensure that reasonably | 
| 48 | * large amounts of I/O are submitted. | 48 | * large amounts of I/O are submitted. | 
| 49 | */ | 49 | */ | 
| 50 | static inline long sync_writeback_pages(void) | 50 | static inline long sync_writeback_pages(unsigned long dirtied) | 
| 51 | { | 51 | { | 
| 52 | return ratelimit_pages + ratelimit_pages / 2; | 52 | if (dirtied < ratelimit_pages) | 
| 53 | dirtied = ratelimit_pages; | ||
| 54 | |||
| 55 | return dirtied + dirtied / 2; | ||
| 53 | } | 56 | } | 
| 54 | 57 | ||
| 55 | /* The following parameters are exported via /proc/sys/vm */ | 58 | /* The following parameters are exported via /proc/sys/vm */ | 
| 56 | 59 | ||
| 57 | /* | 60 | /* | 
| 58 | * Start background writeback (via pdflush) at this percentage | 61 | * Start background writeback (via writeback threads) at this percentage | 
| 59 | */ | 62 | */ | 
| 60 | int dirty_background_ratio = 10; | 63 | int dirty_background_ratio = 10; | 
| 61 | 64 | ||
| @@ -474,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, | |||
| 474 | * balance_dirty_pages() must be called by processes which are generating dirty | 477 | * balance_dirty_pages() must be called by processes which are generating dirty | 
| 475 | * data. It looks at the number of dirty pages in the machine and will force | 478 | * data. It looks at the number of dirty pages in the machine and will force | 
| 476 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 479 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 
| 477 | * If we're over `background_thresh' then pdflush is woken to perform some | 480 | * If we're over `background_thresh' then the writeback threads are woken to | 
| 478 | * writeout. | 481 | * perform some writeout. | 
| 479 | */ | 482 | */ | 
| 480 | static void balance_dirty_pages(struct address_space *mapping) | 483 | static void balance_dirty_pages(struct address_space *mapping, | 
| 484 | unsigned long write_chunk) | ||
| 481 | { | 485 | { | 
| 482 | long nr_reclaimable, bdi_nr_reclaimable; | 486 | long nr_reclaimable, bdi_nr_reclaimable; | 
| 483 | long nr_writeback, bdi_nr_writeback; | 487 | long nr_writeback, bdi_nr_writeback; | 
| @@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 485 | unsigned long dirty_thresh; | 489 | unsigned long dirty_thresh; | 
| 486 | unsigned long bdi_thresh; | 490 | unsigned long bdi_thresh; | 
| 487 | unsigned long pages_written = 0; | 491 | unsigned long pages_written = 0; | 
| 488 | unsigned long write_chunk = sync_writeback_pages(); | ||
| 489 | unsigned long pause = 1; | 492 | unsigned long pause = 1; | 
| 490 | 493 | ||
| 491 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 494 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| @@ -563,7 +566,8 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 563 | if (pages_written >= write_chunk) | 566 | if (pages_written >= write_chunk) | 
| 564 | break; /* We've done our duty */ | 567 | break; /* We've done our duty */ | 
| 565 | 568 | ||
| 566 | schedule_timeout_interruptible(pause); | 569 | __set_current_state(TASK_INTERRUPTIBLE); | 
| 570 | io_schedule_timeout(pause); | ||
| 567 | 571 | ||
| 568 | /* | 572 | /* | 
| 569 | * Increase the delay for each loop, up to our previous | 573 | * Increase the delay for each loop, up to our previous | 
| @@ -579,7 +583,7 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 579 | bdi->dirty_exceeded = 0; | 583 | bdi->dirty_exceeded = 0; | 
| 580 | 584 | ||
| 581 | if (writeback_in_progress(bdi)) | 585 | if (writeback_in_progress(bdi)) | 
| 582 | return; /* pdflush is already working this queue */ | 586 | return; | 
| 583 | 587 | ||
| 584 | /* | 588 | /* | 
| 585 | * In laptop mode, we wait until hitting the higher threshold before | 589 | * In laptop mode, we wait until hitting the higher threshold before | 
| @@ -590,10 +594,10 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 590 | * background_thresh, to keep the amount of dirty memory low. | 594 | * background_thresh, to keep the amount of dirty memory low. | 
| 591 | */ | 595 | */ | 
| 592 | if ((laptop_mode && pages_written) || | 596 | if ((laptop_mode && pages_written) || | 
| 593 | (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) | 597 | (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) | 
| 594 | + global_page_state(NR_UNSTABLE_NFS)) | 598 | + global_page_state(NR_UNSTABLE_NFS)) | 
| 595 | > background_thresh))) | 599 | > background_thresh))) | 
| 596 | bdi_start_writeback(bdi, nr_writeback); | 600 | bdi_start_writeback(bdi, NULL, 0); | 
| 597 | } | 601 | } | 
| 598 | 602 | ||
| 599 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 603 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 
| @@ -640,9 +644,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
| 640 | p = &__get_cpu_var(bdp_ratelimits); | 644 | p = &__get_cpu_var(bdp_ratelimits); | 
| 641 | *p += nr_pages_dirtied; | 645 | *p += nr_pages_dirtied; | 
| 642 | if (unlikely(*p >= ratelimit)) { | 646 | if (unlikely(*p >= ratelimit)) { | 
| 647 | ratelimit = sync_writeback_pages(*p); | ||
| 643 | *p = 0; | 648 | *p = 0; | 
| 644 | preempt_enable(); | 649 | preempt_enable(); | 
| 645 | balance_dirty_pages(mapping); | 650 | balance_dirty_pages(mapping, ratelimit); | 
| 646 | return; | 651 | return; | 
| 647 | } | 652 | } | 
| 648 | preempt_enable(); | 653 | preempt_enable(); | 
