aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2009-11-10 21:23:48 -0500
committerChris Mason <chris.mason@oracle.com>2009-11-11 14:20:20 -0500
commitf5a84ee3cdd88d96b7bcede10af58598ad8d52a7 (patch)
tree5f69342df0c457aca31176234a801bf3607978c5 /fs
parentccf0e72537a9f68611ca575121afd08e2b4d0fb0 (diff)
Btrfs: fallback on uncompressed io if compressed io fails
Currently compressed IO does not deal with not having its entire extent able to be allocated. So if we have enough free space to allocate for the extent, but its not contiguous, it will fail spectacularly. This patch fixes this by falling back on uncompressed IO which lets us spread the delalloc extent across multiple extents. I tested this by making us randomly think the reservation had failed to make it fallback on the uncompressed io way and it seemed to work fine. Thanks, Signed-off-by: Josef Bacik <josef@redhat.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/inode.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index bb7fd8072802..d3d7d46a6af2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
538 struct btrfs_root *root = BTRFS_I(inode)->root; 538 struct btrfs_root *root = BTRFS_I(inode)->root;
539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
540 struct extent_io_tree *io_tree; 540 struct extent_io_tree *io_tree;
541 int ret; 541 int ret = 0;
542 542
543 if (list_empty(&async_cow->extents)) 543 if (list_empty(&async_cow->extents))
544 return 0; 544 return 0;
@@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
552 552
553 io_tree = &BTRFS_I(inode)->io_tree; 553 io_tree = &BTRFS_I(inode)->io_tree;
554 554
555retry:
555 /* did the compression code fall back to uncompressed IO? */ 556 /* did the compression code fall back to uncompressed IO? */
556 if (!async_extent->pages) { 557 if (!async_extent->pages) {
557 int page_started = 0; 558 int page_started = 0;
@@ -562,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
562 async_extent->ram_size - 1, GFP_NOFS); 563 async_extent->ram_size - 1, GFP_NOFS);
563 564
564 /* allocate blocks */ 565 /* allocate blocks */
565 cow_file_range(inode, async_cow->locked_page, 566 ret = cow_file_range(inode, async_cow->locked_page,
566 async_extent->start, 567 async_extent->start,
567 async_extent->start + 568 async_extent->start +
568 async_extent->ram_size - 1, 569 async_extent->ram_size - 1,
569 &page_started, &nr_written, 0); 570 &page_started, &nr_written, 0);
570 571
571 /* 572 /*
572 * if page_started, cow_file_range inserted an 573 * if page_started, cow_file_range inserted an
@@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
574 * and IO for us. Otherwise, we need to submit 575 * and IO for us. Otherwise, we need to submit
575 * all those pages down to the drive. 576 * all those pages down to the drive.
576 */ 577 */
577 if (!page_started) 578 if (!page_started && !ret)
578 extent_write_locked_range(io_tree, 579 extent_write_locked_range(io_tree,
579 inode, async_extent->start, 580 inode, async_extent->start,
580 async_extent->start + 581 async_extent->start +
@@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
602 async_extent->compressed_size, 603 async_extent->compressed_size,
603 0, alloc_hint, 604 0, alloc_hint,
604 (u64)-1, &ins, 1); 605 (u64)-1, &ins, 1);
605 BUG_ON(ret); 606 if (ret) {
607 int i;
608 for (i = 0; i < async_extent->nr_pages; i++) {
609 WARN_ON(async_extent->pages[i]->mapping);
610 page_cache_release(async_extent->pages[i]);
611 }
612 kfree(async_extent->pages);
613 async_extent->nr_pages = 0;
614 async_extent->pages = NULL;
615 unlock_extent(io_tree, async_extent->start,
616 async_extent->start +
617 async_extent->ram_size - 1, GFP_NOFS);
618 goto retry;
619 }
620
606 em = alloc_extent_map(GFP_NOFS); 621 em = alloc_extent_map(GFP_NOFS);
607 em->start = async_extent->start; 622 em->start = async_extent->start;
608 em->len = async_extent->ram_size; 623 em->len = async_extent->ram_size;