aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-08-07 09:28:20 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:04 -0400
commita97adc9fffb1707da4e97f91c801660c6be92aac (patch)
tree9385c1ada9d5b86b75cd00865bacc6bc39deaa9d /fs/btrfs/extent_io.c
parent4f878e8475a465ddbd951e06a23317303f1b5b30 (diff)
Btrfs: use larger nr_to_write for larger extents
When btrfs fills a large delayed allocation extent, it is a good idea to try and convince the write_cache_pages caller to go ahead and write a good chunk of that extent. The extra IO is basically free because we know it is contiguous. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7e5c5a0749e2..8d7a152a90c6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2135,6 +2135,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2135 delalloc_end = 0; 2135 delalloc_end = 0;
2136 page_started = 0; 2136 page_started = 0;
2137 if (!epd->extent_locked) { 2137 if (!epd->extent_locked) {
2138 u64 delalloc_to_write;
2138 /* 2139 /*
2139 * make sure the wbc mapping index is at least updated 2140 * make sure the wbc mapping index is at least updated
2140 * to this page. 2141 * to this page.
@@ -2154,6 +2155,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2154 tree->ops->fill_delalloc(inode, page, delalloc_start, 2155 tree->ops->fill_delalloc(inode, page, delalloc_start,
2155 delalloc_end, &page_started, 2156 delalloc_end, &page_started,
2156 &nr_written); 2157 &nr_written);
2158 delalloc_to_write = (delalloc_end -
2159 max_t(u64, page_offset(page),
2160 delalloc_start) + 1) >>
2161 PAGE_CACHE_SHIFT;
2162 if (wbc->nr_to_write < delalloc_to_write) {
2163 wbc->nr_to_write = min_t(long, 8192,
2164 delalloc_to_write);
2165 }
2157 delalloc_start = delalloc_end + 1; 2166 delalloc_start = delalloc_end + 1;
2158 } 2167 }
2159 2168
@@ -2350,7 +2359,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
2350 writepage_t writepage, void *data, 2359 writepage_t writepage, void *data,
2351 void (*flush_fn)(void *)) 2360 void (*flush_fn)(void *))
2352{ 2361{
2353 struct backing_dev_info *bdi = mapping->backing_dev_info;
2354 int ret = 0; 2362 int ret = 0;
2355 int done = 0; 2363 int done = 0;
2356 struct pagevec pvec; 2364 struct pagevec pvec;
@@ -2425,10 +2433,6 @@ retry:
2425 } 2433 }
2426 if (ret || wbc->nr_to_write <= 0) 2434 if (ret || wbc->nr_to_write <= 0)
2427 done = 1; 2435 done = 1;
2428 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2429 wbc->encountered_congestion = 1;
2430 done = 1;
2431 }
2432 } 2436 }
2433 pagevec_release(&pvec); 2437 pagevec_release(&pvec);
2434 cond_resched(); 2438 cond_resched();