diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-04-21 20:27:47 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-04-21 20:27:47 -0400 |
commit | 99ce567ba912109c78762246c964327f3f81f27d (patch) | |
tree | 685265d60792c11d386db6c005ca8b8e714ecc23 /fs/btrfs/extent_io.c | |
parent | 8fb2bae4b41eb64f6e233e9bd3f3a789fbb04a06 (diff) | |
parent | ccc5ff94c66e628d3c501b26ace5d4339667715d (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 86 |
1 files changed, 63 insertions, 23 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eb2bee8b7fbf..05a1c42e25bf 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -50,7 +50,10 @@ struct extent_page_data { | |||
50 | /* tells writepage not to lock the state bits for this range | 50 | /* tells writepage not to lock the state bits for this range |
51 | * it still does the unlocking | 51 | * it still does the unlocking |
52 | */ | 52 | */ |
53 | int extent_locked; | 53 | unsigned int extent_locked:1; |
54 | |||
55 | /* tells the submit_bio code to use a WRITE_SYNC */ | ||
56 | unsigned int sync_io:1; | ||
54 | }; | 57 | }; |
55 | 58 | ||
56 | int __init extent_io_init(void) | 59 | int __init extent_io_init(void) |
@@ -2101,6 +2104,16 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, | |||
2101 | return ret; | 2104 | return ret; |
2102 | } | 2105 | } |
2103 | 2106 | ||
2107 | static noinline void update_nr_written(struct page *page, | ||
2108 | struct writeback_control *wbc, | ||
2109 | unsigned long nr_written) | ||
2110 | { | ||
2111 | wbc->nr_to_write -= nr_written; | ||
2112 | if (wbc->range_cyclic || (wbc->nr_to_write > 0 && | ||
2113 | wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) | ||
2114 | page->mapping->writeback_index = page->index + nr_written; | ||
2115 | } | ||
2116 | |||
2104 | /* | 2117 | /* |
2105 | * the writepage semantics are similar to regular writepage. extent | 2118 | * the writepage semantics are similar to regular writepage. extent |
2106 | * records are inserted to lock ranges in the tree, and as dirty areas | 2119 | * records are inserted to lock ranges in the tree, and as dirty areas |
@@ -2136,8 +2149,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2136 | u64 delalloc_end; | 2149 | u64 delalloc_end; |
2137 | int page_started; | 2150 | int page_started; |
2138 | int compressed; | 2151 | int compressed; |
2152 | int write_flags; | ||
2139 | unsigned long nr_written = 0; | 2153 | unsigned long nr_written = 0; |
2140 | 2154 | ||
2155 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
2156 | write_flags = WRITE_SYNC_PLUG; | ||
2157 | else | ||
2158 | write_flags = WRITE; | ||
2159 | |||
2141 | WARN_ON(!PageLocked(page)); | 2160 | WARN_ON(!PageLocked(page)); |
2142 | pg_offset = i_size & (PAGE_CACHE_SIZE - 1); | 2161 | pg_offset = i_size & (PAGE_CACHE_SIZE - 1); |
2143 | if (page->index > end_index || | 2162 | if (page->index > end_index || |
@@ -2164,6 +2183,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2164 | delalloc_end = 0; | 2183 | delalloc_end = 0; |
2165 | page_started = 0; | 2184 | page_started = 0; |
2166 | if (!epd->extent_locked) { | 2185 | if (!epd->extent_locked) { |
2186 | /* | ||
2187 | * make sure the wbc mapping index is at least updated | ||
2188 | * to this page. | ||
2189 | */ | ||
2190 | update_nr_written(page, wbc, 0); | ||
2191 | |||
2167 | while (delalloc_end < page_end) { | 2192 | while (delalloc_end < page_end) { |
2168 | nr_delalloc = find_lock_delalloc_range(inode, tree, | 2193 | nr_delalloc = find_lock_delalloc_range(inode, tree, |
2169 | page, | 2194 | page, |
@@ -2185,7 +2210,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2185 | */ | 2210 | */ |
2186 | if (page_started) { | 2211 | if (page_started) { |
2187 | ret = 0; | 2212 | ret = 0; |
2188 | goto update_nr_written; | 2213 | /* |
2214 | * we've unlocked the page, so we can't update | ||
2215 | * the mapping's writeback index, just update | ||
2216 | * nr_to_write. | ||
2217 | */ | ||
2218 | wbc->nr_to_write -= nr_written; | ||
2219 | goto done_unlocked; | ||
2189 | } | 2220 | } |
2190 | } | 2221 | } |
2191 | lock_extent(tree, start, page_end, GFP_NOFS); | 2222 | lock_extent(tree, start, page_end, GFP_NOFS); |
@@ -2198,13 +2229,18 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2198 | if (ret == -EAGAIN) { | 2229 | if (ret == -EAGAIN) { |
2199 | unlock_extent(tree, start, page_end, GFP_NOFS); | 2230 | unlock_extent(tree, start, page_end, GFP_NOFS); |
2200 | redirty_page_for_writepage(wbc, page); | 2231 | redirty_page_for_writepage(wbc, page); |
2232 | update_nr_written(page, wbc, nr_written); | ||
2201 | unlock_page(page); | 2233 | unlock_page(page); |
2202 | ret = 0; | 2234 | ret = 0; |
2203 | goto update_nr_written; | 2235 | goto done_unlocked; |
2204 | } | 2236 | } |
2205 | } | 2237 | } |
2206 | 2238 | ||
2207 | nr_written++; | 2239 | /* |
2240 | * we don't want to touch the inode after unlocking the page, | ||
2241 | * so we update the mapping writeback index now | ||
2242 | */ | ||
2243 | update_nr_written(page, wbc, nr_written + 1); | ||
2208 | 2244 | ||
2209 | end = page_end; | 2245 | end = page_end; |
2210 | if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) | 2246 | if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) |
@@ -2314,9 +2350,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2314 | (unsigned long long)end); | 2350 | (unsigned long long)end); |
2315 | } | 2351 | } |
2316 | 2352 | ||
2317 | ret = submit_extent_page(WRITE, tree, page, sector, | 2353 | ret = submit_extent_page(write_flags, tree, page, |
2318 | iosize, pg_offset, bdev, | 2354 | sector, iosize, pg_offset, |
2319 | &epd->bio, max_nr, | 2355 | bdev, &epd->bio, max_nr, |
2320 | end_bio_extent_writepage, | 2356 | end_bio_extent_writepage, |
2321 | 0, 0, 0); | 2357 | 0, 0, 0); |
2322 | if (ret) | 2358 | if (ret) |
@@ -2336,11 +2372,8 @@ done: | |||
2336 | unlock_extent(tree, unlock_start, page_end, GFP_NOFS); | 2372 | unlock_extent(tree, unlock_start, page_end, GFP_NOFS); |
2337 | unlock_page(page); | 2373 | unlock_page(page); |
2338 | 2374 | ||
2339 | update_nr_written: | 2375 | done_unlocked: |
2340 | wbc->nr_to_write -= nr_written; | 2376 | |
2341 | if (wbc->range_cyclic || (wbc->nr_to_write > 0 && | ||
2342 | wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) | ||
2343 | page->mapping->writeback_index = page->index + nr_written; | ||
2344 | return 0; | 2377 | return 0; |
2345 | } | 2378 | } |
2346 | 2379 | ||
@@ -2460,15 +2493,23 @@ retry: | |||
2460 | return ret; | 2493 | return ret; |
2461 | } | 2494 | } |
2462 | 2495 | ||
2463 | static noinline void flush_write_bio(void *data) | 2496 | static void flush_epd_write_bio(struct extent_page_data *epd) |
2464 | { | 2497 | { |
2465 | struct extent_page_data *epd = data; | ||
2466 | if (epd->bio) { | 2498 | if (epd->bio) { |
2467 | submit_one_bio(WRITE, epd->bio, 0, 0); | 2499 | if (epd->sync_io) |
2500 | submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); | ||
2501 | else | ||
2502 | submit_one_bio(WRITE, epd->bio, 0, 0); | ||
2468 | epd->bio = NULL; | 2503 | epd->bio = NULL; |
2469 | } | 2504 | } |
2470 | } | 2505 | } |
2471 | 2506 | ||
2507 | static noinline void flush_write_bio(void *data) | ||
2508 | { | ||
2509 | struct extent_page_data *epd = data; | ||
2510 | flush_epd_write_bio(epd); | ||
2511 | } | ||
2512 | |||
2472 | int extent_write_full_page(struct extent_io_tree *tree, struct page *page, | 2513 | int extent_write_full_page(struct extent_io_tree *tree, struct page *page, |
2473 | get_extent_t *get_extent, | 2514 | get_extent_t *get_extent, |
2474 | struct writeback_control *wbc) | 2515 | struct writeback_control *wbc) |
@@ -2480,23 +2521,22 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, | |||
2480 | .tree = tree, | 2521 | .tree = tree, |
2481 | .get_extent = get_extent, | 2522 | .get_extent = get_extent, |
2482 | .extent_locked = 0, | 2523 | .extent_locked = 0, |
2524 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, | ||
2483 | }; | 2525 | }; |
2484 | struct writeback_control wbc_writepages = { | 2526 | struct writeback_control wbc_writepages = { |
2485 | .bdi = wbc->bdi, | 2527 | .bdi = wbc->bdi, |
2486 | .sync_mode = WB_SYNC_NONE, | 2528 | .sync_mode = wbc->sync_mode, |
2487 | .older_than_this = NULL, | 2529 | .older_than_this = NULL, |
2488 | .nr_to_write = 64, | 2530 | .nr_to_write = 64, |
2489 | .range_start = page_offset(page) + PAGE_CACHE_SIZE, | 2531 | .range_start = page_offset(page) + PAGE_CACHE_SIZE, |
2490 | .range_end = (loff_t)-1, | 2532 | .range_end = (loff_t)-1, |
2491 | }; | 2533 | }; |
2492 | 2534 | ||
2493 | |||
2494 | ret = __extent_writepage(page, wbc, &epd); | 2535 | ret = __extent_writepage(page, wbc, &epd); |
2495 | 2536 | ||
2496 | extent_write_cache_pages(tree, mapping, &wbc_writepages, | 2537 | extent_write_cache_pages(tree, mapping, &wbc_writepages, |
2497 | __extent_writepage, &epd, flush_write_bio); | 2538 | __extent_writepage, &epd, flush_write_bio); |
2498 | if (epd.bio) | 2539 | flush_epd_write_bio(&epd); |
2499 | submit_one_bio(WRITE, epd.bio, 0, 0); | ||
2500 | return ret; | 2540 | return ret; |
2501 | } | 2541 | } |
2502 | 2542 | ||
@@ -2515,6 +2555,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, | |||
2515 | .tree = tree, | 2555 | .tree = tree, |
2516 | .get_extent = get_extent, | 2556 | .get_extent = get_extent, |
2517 | .extent_locked = 1, | 2557 | .extent_locked = 1, |
2558 | .sync_io = mode == WB_SYNC_ALL, | ||
2518 | }; | 2559 | }; |
2519 | struct writeback_control wbc_writepages = { | 2560 | struct writeback_control wbc_writepages = { |
2520 | .bdi = inode->i_mapping->backing_dev_info, | 2561 | .bdi = inode->i_mapping->backing_dev_info, |
@@ -2540,8 +2581,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, | |||
2540 | start += PAGE_CACHE_SIZE; | 2581 | start += PAGE_CACHE_SIZE; |
2541 | } | 2582 | } |
2542 | 2583 | ||
2543 | if (epd.bio) | 2584 | flush_epd_write_bio(&epd); |
2544 | submit_one_bio(WRITE, epd.bio, 0, 0); | ||
2545 | return ret; | 2585 | return ret; |
2546 | } | 2586 | } |
2547 | 2587 | ||
@@ -2556,13 +2596,13 @@ int extent_writepages(struct extent_io_tree *tree, | |||
2556 | .tree = tree, | 2596 | .tree = tree, |
2557 | .get_extent = get_extent, | 2597 | .get_extent = get_extent, |
2558 | .extent_locked = 0, | 2598 | .extent_locked = 0, |
2599 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, | ||
2559 | }; | 2600 | }; |
2560 | 2601 | ||
2561 | ret = extent_write_cache_pages(tree, mapping, wbc, | 2602 | ret = extent_write_cache_pages(tree, mapping, wbc, |
2562 | __extent_writepage, &epd, | 2603 | __extent_writepage, &epd, |
2563 | flush_write_bio); | 2604 | flush_write_bio); |
2564 | if (epd.bio) | 2605 | flush_epd_write_bio(&epd); |
2565 | submit_one_bio(WRITE, epd.bio, 0, 0); | ||
2566 | return ret; | 2606 | return ret; |
2567 | } | 2607 | } |
2568 | 2608 | ||