aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOGAWA Hirofumi <hirofumi@mail.parknet.co.jp>2006-06-23 05:03:26 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:49 -0400
commit111ebb6e6f7bd7de6d722c5848e95621f43700d9 (patch)
treebb00b13001db9be201e9b6d31468a79f4d1240bf
parent4c91c3648c620003cb7b21b8858f36cd6132e168 (diff)
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Nathan Scott <nathans@sgi.com> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Steven French <sfrench@us.ibm.com> Cc: "Vladimir V. Saveliev" <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/cifs/file.c24
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/sync.c2
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/writeback.h5
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/vmscan.c2
9 files changed, 40 insertions, 31 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e2b4ce1dad66..487ea8b3baaa 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1079,9 +1079,9 @@ static int cifs_writepages(struct address_space *mapping,
1079 unsigned int bytes_written; 1079 unsigned int bytes_written;
1080 struct cifs_sb_info *cifs_sb; 1080 struct cifs_sb_info *cifs_sb;
1081 int done = 0; 1081 int done = 0;
1082 pgoff_t end = -1; 1082 pgoff_t end;
1083 pgoff_t index; 1083 pgoff_t index;
1084 int is_range = 0; 1084 int range_whole = 0;
1085 struct kvec iov[32]; 1085 struct kvec iov[32];
1086 int len; 1086 int len;
1087 int n_iov = 0; 1087 int n_iov = 0;
@@ -1122,16 +1122,14 @@ static int cifs_writepages(struct address_space *mapping,
1122 xid = GetXid(); 1122 xid = GetXid();
1123 1123
1124 pagevec_init(&pvec, 0); 1124 pagevec_init(&pvec, 0);
1125 if (wbc->sync_mode == WB_SYNC_NONE) 1125 if (wbc->range_cyclic) {
1126 index = mapping->writeback_index; /* Start from prev offset */ 1126 index = mapping->writeback_index; /* Start from prev offset */
1127 else { 1127 end = -1;
1128 index = 0; 1128 } else {
1129 scanned = 1; 1129 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1130 } 1130 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1131 if (wbc->start || wbc->end) { 1131 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1132 index = wbc->start >> PAGE_CACHE_SHIFT; 1132 range_whole = 1;
1133 end = wbc->end >> PAGE_CACHE_SHIFT;
1134 is_range = 1;
1135 scanned = 1; 1133 scanned = 1;
1136 } 1134 }
1137retry: 1135retry:
@@ -1167,7 +1165,7 @@ retry:
1167 break; 1165 break;
1168 } 1166 }
1169 1167
1170 if (unlikely(is_range) && (page->index > end)) { 1168 if (!wbc->range_cyclic && page->index > end) {
1171 done = 1; 1169 done = 1;
1172 unlock_page(page); 1170 unlock_page(page);
1173 break; 1171 break;
@@ -1271,7 +1269,7 @@ retry:
1271 index = 0; 1269 index = 0;
1272 goto retry; 1270 goto retry;
1273 } 1271 }
1274 if (!is_range) 1272 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1275 mapping->writeback_index = index; 1273 mapping->writeback_index = index;
1276 1274
1277 FreeXid(xid); 1275 FreeXid(xid);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f3fbe2d030f4..6db95cf3aaa2 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -461,6 +461,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
461{ 461{
462 struct writeback_control wbc = { 462 struct writeback_control wbc = {
463 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, 463 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
464 .range_start = 0,
465 .range_end = LLONG_MAX,
464 }; 466 };
465 unsigned long nr_dirty = read_page_state(nr_dirty); 467 unsigned long nr_dirty = read_page_state(nr_dirty);
466 unsigned long nr_unstable = read_page_state(nr_unstable); 468 unsigned long nr_unstable = read_page_state(nr_unstable);
@@ -559,6 +561,8 @@ int write_inode_now(struct inode *inode, int sync)
559 struct writeback_control wbc = { 561 struct writeback_control wbc = {
560 .nr_to_write = LONG_MAX, 562 .nr_to_write = LONG_MAX,
561 .sync_mode = WB_SYNC_ALL, 563 .sync_mode = WB_SYNC_ALL,
564 .range_start = 0,
565 .range_end = LLONG_MAX,
562 }; 566 };
563 567
564 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 568 if (!mapping_cap_writeback_dirty(inode->i_mapping))
diff --git a/fs/mpage.c b/fs/mpage.c
index 9bf2eb30e6f4..1e4598247d0b 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping,
707 struct pagevec pvec; 707 struct pagevec pvec;
708 int nr_pages; 708 int nr_pages;
709 pgoff_t index; 709 pgoff_t index;
710 pgoff_t end = -1; /* Inclusive */ 710 pgoff_t end; /* Inclusive */
711 int scanned = 0; 711 int scanned = 0;
712 int is_range = 0; 712 int range_whole = 0;
713 713
714 if (wbc->nonblocking && bdi_write_congested(bdi)) { 714 if (wbc->nonblocking && bdi_write_congested(bdi)) {
715 wbc->encountered_congestion = 1; 715 wbc->encountered_congestion = 1;
@@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping,
721 writepage = mapping->a_ops->writepage; 721 writepage = mapping->a_ops->writepage;
722 722
723 pagevec_init(&pvec, 0); 723 pagevec_init(&pvec, 0);
724 if (wbc->sync_mode == WB_SYNC_NONE) { 724 if (wbc->range_cyclic) {
725 index = mapping->writeback_index; /* Start from prev offset */ 725 index = mapping->writeback_index; /* Start from prev offset */
726 end = -1;
726 } else { 727 } else {
727 index = 0; /* whole-file sweep */ 728 index = wbc->range_start >> PAGE_CACHE_SHIFT;
728 scanned = 1; 729 end = wbc->range_end >> PAGE_CACHE_SHIFT;
729 } 730 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
730 if (wbc->start || wbc->end) { 731 range_whole = 1;
731 index = wbc->start >> PAGE_CACHE_SHIFT;
732 end = wbc->end >> PAGE_CACHE_SHIFT;
733 is_range = 1;
734 scanned = 1; 732 scanned = 1;
735 } 733 }
736retry: 734retry:
@@ -759,7 +757,7 @@ retry:
759 continue; 757 continue;
760 } 758 }
761 759
762 if (unlikely(is_range) && page->index > end) { 760 if (!wbc->range_cyclic && page->index > end) {
763 done = 1; 761 done = 1;
764 unlock_page(page); 762 unlock_page(page);
765 continue; 763 continue;
@@ -810,7 +808,7 @@ retry:
810 index = 0; 808 index = 0;
811 goto retry; 809 goto retry;
812 } 810 }
813 if (!is_range) 811 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
814 mapping->writeback_index = index; 812 mapping->writeback_index = index;
815 if (bio) 813 if (bio)
816 mpage_bio_submit(WRITE, bio); 814 mpage_bio_submit(WRITE, bio);
diff --git a/fs/sync.c b/fs/sync.c
index aab5ffe77e9f..955aef04da28 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -100,7 +100,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
100 } 100 }
101 101
102 if (nbytes == 0) 102 if (nbytes == 0)
103 endbyte = -1; 103 endbyte = LLONG_MAX;
104 else 104 else
105 endbyte--; /* inclusive */ 105 endbyte--; /* inclusive */
106 106
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f4fc576ed4c4..25fccd859fbf 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -24,6 +24,9 @@ extern const char linux_banner[];
24#define LONG_MAX ((long)(~0UL>>1)) 24#define LONG_MAX ((long)(~0UL>>1))
25#define LONG_MIN (-LONG_MAX - 1) 25#define LONG_MIN (-LONG_MAX - 1)
26#define ULONG_MAX (~0UL) 26#define ULONG_MAX (~0UL)
27#define LLONG_MAX ((long long)(~0ULL>>1))
28#define LLONG_MIN (-LLONG_MAX - 1)
29#define ULLONG_MAX (~0ULL)
27 30
28#define STACK_MAGIC 0xdeadbeef 31#define STACK_MAGIC 0xdeadbeef
29 32
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 56f92fcbe94a..9e38b566d0e7 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -50,14 +50,15 @@ struct writeback_control {
50 * a hint that the filesystem need only write out the pages inside that 50 * a hint that the filesystem need only write out the pages inside that
51 * byterange. The byte at `end' is included in the writeout request. 51 * byterange. The byte at `end' is included in the writeout request.
52 */ 52 */
53 loff_t start; 53 loff_t range_start;
54 loff_t end; 54 loff_t range_end;
55 55
56 unsigned nonblocking:1; /* Don't get stuck on request queues */ 56 unsigned nonblocking:1; /* Don't get stuck on request queues */
57 unsigned encountered_congestion:1; /* An output: a queue is full */ 57 unsigned encountered_congestion:1; /* An output: a queue is full */
58 unsigned for_kupdate:1; /* A kupdate writeback */ 58 unsigned for_kupdate:1; /* A kupdate writeback */
59 unsigned for_reclaim:1; /* Invoked from the page allocator */ 59 unsigned for_reclaim:1; /* Invoked from the page allocator */
60 unsigned for_writepages:1; /* This is a writepages() call */ 60 unsigned for_writepages:1; /* This is a writepages() call */
61 unsigned range_cyclic:1; /* range_start is cyclic */
61}; 62};
62 63
63/* 64/*
diff --git a/mm/filemap.c b/mm/filemap.c
index fd57442186cb..3342067ca436 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -190,8 +190,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
190 struct writeback_control wbc = { 190 struct writeback_control wbc = {
191 .sync_mode = sync_mode, 191 .sync_mode = sync_mode,
192 .nr_to_write = mapping->nrpages * 2, 192 .nr_to_write = mapping->nrpages * 2,
193 .start = start, 193 .range_start = start,
194 .end = end, 194 .range_end = end,
195 }; 195 };
196 196
197 if (!mapping_cap_writeback_dirty(mapping)) 197 if (!mapping_cap_writeback_dirty(mapping))
@@ -204,7 +204,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
204static inline int __filemap_fdatawrite(struct address_space *mapping, 204static inline int __filemap_fdatawrite(struct address_space *mapping,
205 int sync_mode) 205 int sync_mode)
206{ 206{
207 return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); 207 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
208} 208}
209 209
210int filemap_fdatawrite(struct address_space *mapping) 210int filemap_fdatawrite(struct address_space *mapping)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 75d7f48b79bb..8ccf6f1b1473 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,6 +204,7 @@ static void balance_dirty_pages(struct address_space *mapping)
204 .sync_mode = WB_SYNC_NONE, 204 .sync_mode = WB_SYNC_NONE,
205 .older_than_this = NULL, 205 .older_than_this = NULL,
206 .nr_to_write = write_chunk, 206 .nr_to_write = write_chunk,
207 .range_cyclic = 1,
207 }; 208 };
208 209
209 get_dirty_limits(&wbs, &background_thresh, 210 get_dirty_limits(&wbs, &background_thresh,
@@ -331,6 +332,7 @@ static void background_writeout(unsigned long _min_pages)
331 .older_than_this = NULL, 332 .older_than_this = NULL,
332 .nr_to_write = 0, 333 .nr_to_write = 0,
333 .nonblocking = 1, 334 .nonblocking = 1,
335 .range_cyclic = 1,
334 }; 336 };
335 337
336 for ( ; ; ) { 338 for ( ; ; ) {
@@ -407,6 +409,7 @@ static void wb_kupdate(unsigned long arg)
407 .nr_to_write = 0, 409 .nr_to_write = 0,
408 .nonblocking = 1, 410 .nonblocking = 1,
409 .for_kupdate = 1, 411 .for_kupdate = 1,
412 .range_cyclic = 1,
410 }; 413 };
411 414
412 sync_supers(); 415 sync_supers();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 46be8a02280e..bc5d4f43036c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -339,6 +339,8 @@ pageout_t pageout(struct page *page, struct address_space *mapping)
339 struct writeback_control wbc = { 339 struct writeback_control wbc = {
340 .sync_mode = WB_SYNC_NONE, 340 .sync_mode = WB_SYNC_NONE,
341 .nr_to_write = SWAP_CLUSTER_MAX, 341 .nr_to_write = SWAP_CLUSTER_MAX,
342 .range_start = 0,
343 .range_end = LLONG_MAX,
342 .nonblocking = 1, 344 .nonblocking = 1,
343 .for_reclaim = 1, 345 .for_reclaim = 1,
344 }; 346 };