aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ordered-data.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-11-06 22:02:51 -0500
committerChris Mason <chris.mason@oracle.com>2008-11-06 22:02:51 -0500
commit771ed689d2cd53439e28e095bc38fbe40a71429e (patch)
tree518801f7141928e398d40c2b5955720d4346ce1a /fs/btrfs/ordered-data.c
parent4a69a41009c4ac691f7d9c289f5f37fabeddce46 (diff)
Btrfs: Optimize compressed writeback and reads
When reading compressed extents, try to put pages into the page cache for any pages covered by the compressed extent that readpages didn't already preload. Add an async work queue to handle transformations at delayed allocation processing time. Right now this is just compression. The workflow is: 1) Find offsets in the file marked for delayed allocation 2) Lock the pages 3) Lock the state bits 4) Call the async delalloc code The async delalloc code clears the state lock bits and delalloc bits. It is important this happens before the range goes into the work queue because otherwise it might deadlock with other work queue items that try to lock those extent bits. The file pages are compressed, and if the compression doesn't work the pages are written back directly. An ordered work queue is used to make sure the inodes are written in the same order that pdflush or writepages sent them down. This changes extent_write_cache_pages to let the writepage function update the wbc nr_written count. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r--fs/btrfs/ordered-data.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 370bb4285597..027ad6b3839e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -390,7 +390,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
390 * start IO on any dirty ones so the wait doesn't stall waiting 390 * start IO on any dirty ones so the wait doesn't stall waiting
391 * for pdflush to find them 391 * for pdflush to find them
392 */ 392 */
393 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); 393 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
394 if (wait) { 394 if (wait) {
395 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 395 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
396 &entry->flags)); 396 &entry->flags));
@@ -421,6 +421,12 @@ again:
421 */ 421 */
422 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); 422 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE);
423 423
424 /* The compression code will leave pages locked but return from
425 * writepage without setting the page writeback. Starting again
426 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
427 */
428 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
429
424 btrfs_wait_on_page_writeback_range(inode->i_mapping, 430 btrfs_wait_on_page_writeback_range(inode->i_mapping,
425 start >> PAGE_CACHE_SHIFT, 431 start >> PAGE_CACHE_SHIFT,
426 orig_end >> PAGE_CACHE_SHIFT); 432 orig_end >> PAGE_CACHE_SHIFT);
@@ -448,10 +454,7 @@ again:
448 } 454 }
449 if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, 455 if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
450 EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { 456 EXTENT_ORDERED | EXTENT_DELALLOC, 0)) {
451 printk("inode %lu still ordered or delalloc after wait " 457 schedule_timeout(1);
452 "%llu %llu\n", inode->i_ino,
453 (unsigned long long)start,
454 (unsigned long long)orig_end);
455 goto again; 458 goto again;
456 } 459 }
457 return 0; 460 return 0;