aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-06 17:39:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:58:59 -0500
commit31a12666d8f0c22235297e1c1575f82061480029 (patch)
tree89106848d2072278cc9338b091ad059db460420f /mm
parent38c8e6180939e5619140b2e9e479cb26029ff8b1 (diff)
mm: write_cache_pages cyclic fix
In write_cache_pages, scanned == 1 is supposed to mean that cyclic writeback has circled through zero, thus we should not circle again. However it gets set to 1 after the first successful pagevec lookup. This leads to cases where not enough data gets written. Counterexample: file with first 10 pages dirty, writeback_index == 5, nr_to_write == 10. Then the 5 last pages will be found, and scanned will be set to 1, after writing those out, we will not cycle back to get the first 5. Rework this logic, now we'll always cycle unless we started off from index 0. When cycling, only write out as far as 1 page before the start page from the first cycle (so we don't write parts of the file twice). Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Chris Mason <chris.mason@oracle.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2970e35fd03f..eb277bdd4c5d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -868,9 +868,10 @@ int write_cache_pages(struct address_space *mapping,
868 int done = 0; 868 int done = 0;
869 struct pagevec pvec; 869 struct pagevec pvec;
870 int nr_pages; 870 int nr_pages;
871 pgoff_t uninitialized_var(writeback_index);
871 pgoff_t index; 872 pgoff_t index;
872 pgoff_t end; /* Inclusive */ 873 pgoff_t end; /* Inclusive */
873 int scanned = 0; 874 int cycled;
874 int range_whole = 0; 875 int range_whole = 0;
875 long nr_to_write = wbc->nr_to_write; 876 long nr_to_write = wbc->nr_to_write;
876 877
@@ -881,14 +882,19 @@ int write_cache_pages(struct address_space *mapping,
881 882
882 pagevec_init(&pvec, 0); 883 pagevec_init(&pvec, 0);
883 if (wbc->range_cyclic) { 884 if (wbc->range_cyclic) {
884 index = mapping->writeback_index; /* Start from prev offset */ 885 writeback_index = mapping->writeback_index; /* prev offset */
886 index = writeback_index;
887 if (index == 0)
888 cycled = 1;
889 else
890 cycled = 0;
885 end = -1; 891 end = -1;
886 } else { 892 } else {
887 index = wbc->range_start >> PAGE_CACHE_SHIFT; 893 index = wbc->range_start >> PAGE_CACHE_SHIFT;
888 end = wbc->range_end >> PAGE_CACHE_SHIFT; 894 end = wbc->range_end >> PAGE_CACHE_SHIFT;
889 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 895 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
890 range_whole = 1; 896 range_whole = 1;
891 scanned = 1; 897 cycled = 1; /* ignore range_cyclic tests */
892 } 898 }
893retry: 899retry:
894 while (!done && (index <= end) && 900 while (!done && (index <= end) &&
@@ -897,7 +903,6 @@ retry:
897 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { 903 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
898 unsigned i; 904 unsigned i;
899 905
900 scanned = 1;
901 for (i = 0; i < nr_pages; i++) { 906 for (i = 0; i < nr_pages; i++) {
902 struct page *page = pvec.pages[i]; 907 struct page *page = pvec.pages[i];
903 908
@@ -915,7 +920,11 @@ retry:
915 continue; 920 continue;
916 } 921 }
917 922
918 if (!wbc->range_cyclic && page->index > end) { 923 if (page->index > end) {
924 /*
925 * can't be range_cyclic (1st pass) because
926 * end == -1 in that case.
927 */
919 done = 1; 928 done = 1;
920 unlock_page(page); 929 unlock_page(page);
921 continue; 930 continue;
@@ -946,13 +955,15 @@ retry:
946 pagevec_release(&pvec); 955 pagevec_release(&pvec);
947 cond_resched(); 956 cond_resched();
948 } 957 }
949 if (!scanned && !done) { 958 if (!cycled) {
950 /* 959 /*
960 * range_cyclic:
951 * We hit the last page and there is more work to be done: wrap 961 * We hit the last page and there is more work to be done: wrap
952 * back to the start of the file 962 * back to the start of the file
953 */ 963 */
954 scanned = 1; 964 cycled = 1;
955 index = 0; 965 index = 0;
966 end = writeback_index - 1;
956 goto retry; 967 goto retry;
957 } 968 }
958 if (!wbc->no_nrwrite_index_update) { 969 if (!wbc->no_nrwrite_index_update) {