aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-25 12:27:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-25 12:27:30 -0400
commit6d7f18f6ea3a13af95bdf507fc54d42b165e1712 (patch)
tree8f6f3a6d46835aa767823fa7049609408a87afc2 /mm/page-writeback.c
parent53cddfcc0e760d2b364878b6dadbd0c6d087cfae (diff)
parent56a131dcf7ed36c3c6e36bea448b674ea85ed5bb (diff)
Merge branch 'writeback' of git://git.kernel.dk/linux-2.6-block
* 'writeback' of git://git.kernel.dk/linux-2.6-block: writeback: writeback_inodes_sb() should use bdi_start_writeback() writeback: don't delay inodes redirtied by a fast dirtier writeback: make the super_block pinning more efficient writeback: don't resort for a single super_block in move_expired_inodes() writeback: move inodes from one super_block together writeback: get rid to incorrect references to pdflush in comments writeback: improve readability of the wb_writeback() continue/break logic writeback: cleanup writeback_single_inode() writeback: kupdate writeback shall not stop when more io is possible writeback: stop background writeback when below background threshold writeback: balance_dirty_pages() shall write more than dirtied pages fs: Fix busyloop in wb_writeback()
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d99664e8607e..69b5fbabc8bd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -44,18 +44,21 @@ static long ratelimit_pages = 32;
44/* 44/*
45 * When balance_dirty_pages decides that the caller needs to perform some 45 * When balance_dirty_pages decides that the caller needs to perform some
46 * non-background writeback, this is how many pages it will attempt to write. 46 * non-background writeback, this is how many pages it will attempt to write.
47 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably 47 * It should be somewhat larger than dirtied pages to ensure that reasonably
48 * large amounts of I/O are submitted. 48 * large amounts of I/O are submitted.
49 */ 49 */
50static inline long sync_writeback_pages(void) 50static inline long sync_writeback_pages(unsigned long dirtied)
51{ 51{
52 return ratelimit_pages + ratelimit_pages / 2; 52 if (dirtied < ratelimit_pages)
53 dirtied = ratelimit_pages;
54
55 return dirtied + dirtied / 2;
53} 56}
54 57
55/* The following parameters are exported via /proc/sys/vm */ 58/* The following parameters are exported via /proc/sys/vm */
56 59
57/* 60/*
58 * Start background writeback (via pdflush) at this percentage 61 * Start background writeback (via writeback threads) at this percentage
59 */ 62 */
60int dirty_background_ratio = 10; 63int dirty_background_ratio = 10;
61 64
@@ -474,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
474 * balance_dirty_pages() must be called by processes which are generating dirty 477 * balance_dirty_pages() must be called by processes which are generating dirty
475 * data. It looks at the number of dirty pages in the machine and will force 478 * data. It looks at the number of dirty pages in the machine and will force
476 * the caller to perform writeback if the system is over `vm_dirty_ratio'. 479 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
477 * If we're over `background_thresh' then pdflush is woken to perform some 480 * If we're over `background_thresh' then the writeback threads are woken to
478 * writeout. 481 * perform some writeout.
479 */ 482 */
480static void balance_dirty_pages(struct address_space *mapping) 483static void balance_dirty_pages(struct address_space *mapping,
484 unsigned long write_chunk)
481{ 485{
482 long nr_reclaimable, bdi_nr_reclaimable; 486 long nr_reclaimable, bdi_nr_reclaimable;
483 long nr_writeback, bdi_nr_writeback; 487 long nr_writeback, bdi_nr_writeback;
@@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping)
485 unsigned long dirty_thresh; 489 unsigned long dirty_thresh;
486 unsigned long bdi_thresh; 490 unsigned long bdi_thresh;
487 unsigned long pages_written = 0; 491 unsigned long pages_written = 0;
488 unsigned long write_chunk = sync_writeback_pages();
489 unsigned long pause = 1; 492 unsigned long pause = 1;
490 493
491 struct backing_dev_info *bdi = mapping->backing_dev_info; 494 struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -579,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping)
579 bdi->dirty_exceeded = 0; 582 bdi->dirty_exceeded = 0;
580 583
581 if (writeback_in_progress(bdi)) 584 if (writeback_in_progress(bdi))
582 return; /* pdflush is already working this queue */ 585 return;
583 586
584 /* 587 /*
585 * In laptop mode, we wait until hitting the higher threshold before 588 * In laptop mode, we wait until hitting the higher threshold before
@@ -590,10 +593,10 @@ static void balance_dirty_pages(struct address_space *mapping)
590 * background_thresh, to keep the amount of dirty memory low. 593 * background_thresh, to keep the amount of dirty memory low.
591 */ 594 */
592 if ((laptop_mode && pages_written) || 595 if ((laptop_mode && pages_written) ||
593 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) 596 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
594 + global_page_state(NR_UNSTABLE_NFS)) 597 + global_page_state(NR_UNSTABLE_NFS))
595 > background_thresh))) 598 > background_thresh)))
596 bdi_start_writeback(bdi, nr_writeback); 599 bdi_start_writeback(bdi, 0);
597} 600}
598 601
599void set_page_dirty_balance(struct page *page, int page_mkwrite) 602void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -640,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
640 p = &__get_cpu_var(bdp_ratelimits); 643 p = &__get_cpu_var(bdp_ratelimits);
641 *p += nr_pages_dirtied; 644 *p += nr_pages_dirtied;
642 if (unlikely(*p >= ratelimit)) { 645 if (unlikely(*p >= ratelimit)) {
646 ratelimit = sync_writeback_pages(*p);
643 *p = 0; 647 *p = 0;
644 preempt_enable(); 648 preempt_enable();
645 balance_dirty_pages(mapping); 649 balance_dirty_pages(mapping, ratelimit);
646 return; 650 return;
647 } 651 }
648 preempt_enable(); 652 preempt_enable();