aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c44
1 files changed, 30 insertions, 14 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index eb2bee8b7fb..483b6727aaa 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -50,7 +50,10 @@ struct extent_page_data {
50 /* tells writepage not to lock the state bits for this range 50 /* tells writepage not to lock the state bits for this range
51 * it still does the unlocking 51 * it still does the unlocking
52 */ 52 */
53 int extent_locked; 53 unsigned int extent_locked:1;
54
55 /* tells the submit_bio code to use a WRITE_SYNC */
56 unsigned int sync_io:1;
54}; 57};
55 58
56int __init extent_io_init(void) 59int __init extent_io_init(void)
@@ -2136,8 +2139,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2136 u64 delalloc_end; 2139 u64 delalloc_end;
2137 int page_started; 2140 int page_started;
2138 int compressed; 2141 int compressed;
2142 int write_flags;
2139 unsigned long nr_written = 0; 2143 unsigned long nr_written = 0;
2140 2144
2145 if (wbc->sync_mode == WB_SYNC_ALL)
2146 write_flags = WRITE_SYNC_PLUG;
2147 else
2148 write_flags = WRITE;
2149
2141 WARN_ON(!PageLocked(page)); 2150 WARN_ON(!PageLocked(page));
2142 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 2151 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2143 if (page->index > end_index || 2152 if (page->index > end_index ||
@@ -2314,9 +2323,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2314 (unsigned long long)end); 2323 (unsigned long long)end);
2315 } 2324 }
2316 2325
2317 ret = submit_extent_page(WRITE, tree, page, sector, 2326 ret = submit_extent_page(write_flags, tree, page,
2318 iosize, pg_offset, bdev, 2327 sector, iosize, pg_offset,
2319 &epd->bio, max_nr, 2328 bdev, &epd->bio, max_nr,
2320 end_bio_extent_writepage, 2329 end_bio_extent_writepage,
2321 0, 0, 0); 2330 0, 0, 0);
2322 if (ret) 2331 if (ret)
@@ -2460,15 +2469,23 @@ retry:
2460 return ret; 2469 return ret;
2461} 2470}
2462 2471
2463static noinline void flush_write_bio(void *data) 2472static void flush_epd_write_bio(struct extent_page_data *epd)
2464{ 2473{
2465 struct extent_page_data *epd = data;
2466 if (epd->bio) { 2474 if (epd->bio) {
2467 submit_one_bio(WRITE, epd->bio, 0, 0); 2475 if (epd->sync_io)
2476 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2477 else
2478 submit_one_bio(WRITE, epd->bio, 0, 0);
2468 epd->bio = NULL; 2479 epd->bio = NULL;
2469 } 2480 }
2470} 2481}
2471 2482
2483static noinline void flush_write_bio(void *data)
2484{
2485 struct extent_page_data *epd = data;
2486 flush_epd_write_bio(epd);
2487}
2488
2472int extent_write_full_page(struct extent_io_tree *tree, struct page *page, 2489int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2473 get_extent_t *get_extent, 2490 get_extent_t *get_extent,
2474 struct writeback_control *wbc) 2491 struct writeback_control *wbc)
@@ -2480,6 +2497,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2480 .tree = tree, 2497 .tree = tree,
2481 .get_extent = get_extent, 2498 .get_extent = get_extent,
2482 .extent_locked = 0, 2499 .extent_locked = 0,
2500 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2483 }; 2501 };
2484 struct writeback_control wbc_writepages = { 2502 struct writeback_control wbc_writepages = {
2485 .bdi = wbc->bdi, 2503 .bdi = wbc->bdi,
@@ -2490,13 +2508,11 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2490 .range_end = (loff_t)-1, 2508 .range_end = (loff_t)-1,
2491 }; 2509 };
2492 2510
2493
2494 ret = __extent_writepage(page, wbc, &epd); 2511 ret = __extent_writepage(page, wbc, &epd);
2495 2512
2496 extent_write_cache_pages(tree, mapping, &wbc_writepages, 2513 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2497 __extent_writepage, &epd, flush_write_bio); 2514 __extent_writepage, &epd, flush_write_bio);
2498 if (epd.bio) 2515 flush_epd_write_bio(&epd);
2499 submit_one_bio(WRITE, epd.bio, 0, 0);
2500 return ret; 2516 return ret;
2501} 2517}
2502 2518
@@ -2515,6 +2531,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2515 .tree = tree, 2531 .tree = tree,
2516 .get_extent = get_extent, 2532 .get_extent = get_extent,
2517 .extent_locked = 1, 2533 .extent_locked = 1,
2534 .sync_io = mode == WB_SYNC_ALL,
2518 }; 2535 };
2519 struct writeback_control wbc_writepages = { 2536 struct writeback_control wbc_writepages = {
2520 .bdi = inode->i_mapping->backing_dev_info, 2537 .bdi = inode->i_mapping->backing_dev_info,
@@ -2540,8 +2557,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2540 start += PAGE_CACHE_SIZE; 2557 start += PAGE_CACHE_SIZE;
2541 } 2558 }
2542 2559
2543 if (epd.bio) 2560 flush_epd_write_bio(&epd);
2544 submit_one_bio(WRITE, epd.bio, 0, 0);
2545 return ret; 2561 return ret;
2546} 2562}
2547 2563
@@ -2556,13 +2572,13 @@ int extent_writepages(struct extent_io_tree *tree,
2556 .tree = tree, 2572 .tree = tree,
2557 .get_extent = get_extent, 2573 .get_extent = get_extent,
2558 .extent_locked = 0, 2574 .extent_locked = 0,
2575 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2559 }; 2576 };
2560 2577
2561 ret = extent_write_cache_pages(tree, mapping, wbc, 2578 ret = extent_write_cache_pages(tree, mapping, wbc,
2562 __extent_writepage, &epd, 2579 __extent_writepage, &epd,
2563 flush_write_bio); 2580 flush_write_bio);
2564 if (epd.bio) 2581 flush_epd_write_bio(&epd);
2565 submit_one_bio(WRITE, epd.bio, 0, 0);
2566 return ret; 2582 return ret;
2567} 2583}
2568 2584