aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-06 17:39:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:58:59 -0500
commitbd19e012f6fd3b7309689165ea865cbb7bb88c1e (patch)
tree66ca4f5bfc683d2935e7fc5cab2f57a7682ad65a /mm/page-writeback.c
parent31a12666d8f0c22235297e1c1575f82061480029 (diff)
mm: write_cache_pages early loop termination
We'd like to break out of the loop early in many situations, however the existing code has been setting mapping->writeback_index past the final page in the pagevec lookup for cyclic writeback. This is a problem if we don't process all pages up to the final page. Currently the code mostly keeps writeback_index reasonable and hacked around this by not breaking out of the loop or writing pages outside the range in these cases. Keep track of a real "done index" that enables us to terminate the loop in a much more flexible manner. Needed by the subsequent patch to preserve writepage errors, and then further patches to break out of the loop early for other reasons. However there are no functional changes with this patch alone. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Chris Mason <chris.mason@oracle.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index eb277bdd4c5d..01b9cb8ccf68 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -871,6 +871,7 @@ int write_cache_pages(struct address_space *mapping,
871 pgoff_t uninitialized_var(writeback_index); 871 pgoff_t uninitialized_var(writeback_index);
872 pgoff_t index; 872 pgoff_t index;
873 pgoff_t end; /* Inclusive */ 873 pgoff_t end; /* Inclusive */
874 pgoff_t done_index;
874 int cycled; 875 int cycled;
875 int range_whole = 0; 876 int range_whole = 0;
876 long nr_to_write = wbc->nr_to_write; 877 long nr_to_write = wbc->nr_to_write;
@@ -897,6 +898,7 @@ int write_cache_pages(struct address_space *mapping,
897 cycled = 1; /* ignore range_cyclic tests */ 898 cycled = 1; /* ignore range_cyclic tests */
898 } 899 }
899retry: 900retry:
901 done_index = index;
900 while (!done && (index <= end) && 902 while (!done && (index <= end) &&
901 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 903 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
902 PAGECACHE_TAG_DIRTY, 904 PAGECACHE_TAG_DIRTY,
@@ -906,6 +908,8 @@ retry:
906 for (i = 0; i < nr_pages; i++) { 908 for (i = 0; i < nr_pages; i++) {
907 struct page *page = pvec.pages[i]; 909 struct page *page = pvec.pages[i];
908 910
911 done_index = page->index + 1;
912
909 /* 913 /*
910 * At this point we hold neither mapping->tree_lock nor 914 * At this point we hold neither mapping->tree_lock nor
911 * lock on the page itself: the page may be truncated or 915 * lock on the page itself: the page may be truncated or
@@ -968,7 +972,7 @@ retry:
968 } 972 }
969 if (!wbc->no_nrwrite_index_update) { 973 if (!wbc->no_nrwrite_index_update) {
970 if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) 974 if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
971 mapping->writeback_index = index; 975 mapping->writeback_index = done_index;
972 wbc->nr_to_write = nr_to_write; 976 wbc->nr_to_write = nr_to_write;
973 } 977 }
974 978