aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-25 12:27:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-25 12:27:30 -0400
commit6d7f18f6ea3a13af95bdf507fc54d42b165e1712 (patch)
tree8f6f3a6d46835aa767823fa7049609408a87afc2 /mm
parent53cddfcc0e760d2b364878b6dadbd0c6d087cfae (diff)
parent56a131dcf7ed36c3c6e36bea448b674ea85ed5bb (diff)
Merge branch 'writeback' of git://git.kernel.dk/linux-2.6-block
* 'writeback' of git://git.kernel.dk/linux-2.6-block: writeback: writeback_inodes_sb() should use bdi_start_writeback() writeback: don't delay inodes redirtied by a fast dirtier writeback: make the super_block pinning more efficient writeback: don't resort for a single super_block in move_expired_inodes() writeback: move inodes from one super_block together writeback: get rid to incorrect references to pdflush in comments writeback: improve readability of the wb_writeback() continue/break logic writeback: cleanup writeback_single_inode() writeback: kupdate writeback shall not stop when more io is possible writeback: stop background writeback when below background threshold writeback: balance_dirty_pages() shall write more than dirtied pages fs: Fix busyloop in wb_writeback()
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c30
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c8
3 files changed, 24 insertions, 19 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d99664e8607e..69b5fbabc8bd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -44,18 +44,21 @@ static long ratelimit_pages = 32;
44/* 44/*
45 * When balance_dirty_pages decides that the caller needs to perform some 45 * When balance_dirty_pages decides that the caller needs to perform some
46 * non-background writeback, this is how many pages it will attempt to write. 46 * non-background writeback, this is how many pages it will attempt to write.
47 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably 47 * It should be somewhat larger than dirtied pages to ensure that reasonably
48 * large amounts of I/O are submitted. 48 * large amounts of I/O are submitted.
49 */ 49 */
50static inline long sync_writeback_pages(void) 50static inline long sync_writeback_pages(unsigned long dirtied)
51{ 51{
52 return ratelimit_pages + ratelimit_pages / 2; 52 if (dirtied < ratelimit_pages)
53 dirtied = ratelimit_pages;
54
55 return dirtied + dirtied / 2;
53} 56}
54 57
55/* The following parameters are exported via /proc/sys/vm */ 58/* The following parameters are exported via /proc/sys/vm */
56 59
57/* 60/*
58 * Start background writeback (via pdflush) at this percentage 61 * Start background writeback (via writeback threads) at this percentage
59 */ 62 */
60int dirty_background_ratio = 10; 63int dirty_background_ratio = 10;
61 64
@@ -474,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
474 * balance_dirty_pages() must be called by processes which are generating dirty 477 * balance_dirty_pages() must be called by processes which are generating dirty
475 * data. It looks at the number of dirty pages in the machine and will force 478 * data. It looks at the number of dirty pages in the machine and will force
476 * the caller to perform writeback if the system is over `vm_dirty_ratio'. 479 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
477 * If we're over `background_thresh' then pdflush is woken to perform some 480 * If we're over `background_thresh' then the writeback threads are woken to
478 * writeout. 481 * perform some writeout.
479 */ 482 */
480static void balance_dirty_pages(struct address_space *mapping) 483static void balance_dirty_pages(struct address_space *mapping,
484 unsigned long write_chunk)
481{ 485{
482 long nr_reclaimable, bdi_nr_reclaimable; 486 long nr_reclaimable, bdi_nr_reclaimable;
483 long nr_writeback, bdi_nr_writeback; 487 long nr_writeback, bdi_nr_writeback;
@@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping)
485 unsigned long dirty_thresh; 489 unsigned long dirty_thresh;
486 unsigned long bdi_thresh; 490 unsigned long bdi_thresh;
487 unsigned long pages_written = 0; 491 unsigned long pages_written = 0;
488 unsigned long write_chunk = sync_writeback_pages();
489 unsigned long pause = 1; 492 unsigned long pause = 1;
490 493
491 struct backing_dev_info *bdi = mapping->backing_dev_info; 494 struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -579,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping)
579 bdi->dirty_exceeded = 0; 582 bdi->dirty_exceeded = 0;
580 583
581 if (writeback_in_progress(bdi)) 584 if (writeback_in_progress(bdi))
582 return; /* pdflush is already working this queue */ 585 return;
583 586
584 /* 587 /*
585 * In laptop mode, we wait until hitting the higher threshold before 588 * In laptop mode, we wait until hitting the higher threshold before
@@ -590,10 +593,10 @@ static void balance_dirty_pages(struct address_space *mapping)
590 * background_thresh, to keep the amount of dirty memory low. 593 * background_thresh, to keep the amount of dirty memory low.
591 */ 594 */
592 if ((laptop_mode && pages_written) || 595 if ((laptop_mode && pages_written) ||
593 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) 596 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
594 + global_page_state(NR_UNSTABLE_NFS)) 597 + global_page_state(NR_UNSTABLE_NFS))
595 > background_thresh))) 598 > background_thresh)))
596 bdi_start_writeback(bdi, nr_writeback); 599 bdi_start_writeback(bdi, 0);
597} 600}
598 601
599void set_page_dirty_balance(struct page *page, int page_mkwrite) 602void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -640,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
640 p = &__get_cpu_var(bdp_ratelimits); 643 p = &__get_cpu_var(bdp_ratelimits);
641 *p += nr_pages_dirtied; 644 *p += nr_pages_dirtied;
642 if (unlikely(*p >= ratelimit)) { 645 if (unlikely(*p >= ratelimit)) {
646 ratelimit = sync_writeback_pages(*p);
643 *p = 0; 647 *p = 0;
644 preempt_enable(); 648 preempt_enable();
645 balance_dirty_pages(mapping); 649 balance_dirty_pages(mapping, ratelimit);
646 return; 650 return;
647 } 651 }
648 preempt_enable(); 652 preempt_enable();
diff --git a/mm/shmem.c b/mm/shmem.c
index 98631c26c200..ccf446a9faa1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1046,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1046 * sync from ever calling shmem_writepage; but a stacking filesystem 1046 * sync from ever calling shmem_writepage; but a stacking filesystem
1047 * may use the ->writepage of its underlying filesystem, in which case 1047 * may use the ->writepage of its underlying filesystem, in which case
1048 * tmpfs should write out to swap only in response to memory pressure, 1048 * tmpfs should write out to swap only in response to memory pressure,
1049 * and not for pdflush or sync. However, in those cases, we do still 1049 * and not for the writeback threads or sync. However, in those cases,
1050 * want to check if there's a redundant swappage to be discarded. 1050 * we do still want to check if there's a redundant swappage to be
1051 * discarded.
1051 */ 1052 */
1052 if (wbc->for_reclaim) 1053 if (wbc->for_reclaim)
1053 swap = get_swap_page(); 1054 swap = get_swap_page();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1219ceb8a9b2..64e438898832 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1709,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1709 * 1709 *
1710 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1710 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1711 * high - the zone may be full of dirty or under-writeback pages, which this 1711 * high - the zone may be full of dirty or under-writeback pages, which this
1712 * caller can't do much about. We kick pdflush and take explicit naps in the 1712 * caller can't do much about. We kick the writeback threads and take explicit
1713 * hope that some of these pages can be written. But if the allocating task 1713 * naps in the hope that some of these pages can be written. But if the
1714 * holds filesystem locks which prevent writeout this might not work, and the 1714 * allocating task holds filesystem locks which prevent writeout this might not
1715 * allocation attempt will fail. 1715 * work, and the allocation attempt will fail.
1716 * 1716 *
1717 * returns: 0, if no pages reclaimed 1717 * returns: 0, if no pages reclaimed
1718 * else, the number of pages reclaimed 1718 * else, the number of pages reclaimed