diff options
author | Chris Mason <chris.mason@oracle.com> | 2008-08-20 08:51:50 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:04:06 -0400 |
commit | 7c2fe32a238eb12422beca5cbd5194a594baa559 (patch) | |
tree | a95bada9d991780e0611e94529e63c8178f4f3be | |
parent | 902b22f341efa00be802418a0a8c57bddcd269a6 (diff) |
Btrfs: Fix add_extent_mapping to check for duplicates across the whole range
add_extent_mapping was allowing the insertion of overlapping extents.
This never used to happen because it only inserted the extents from disk
and those were never overlapping.
But, with the data=ordered code, the disk and memory representations of the
file are not the same. add_extent_mapping needs to ensure a new extent
does not overlap before it inserts.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r-- | fs/btrfs/extent_map.c | 7 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 21 |
2 files changed, 24 insertions, 4 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 954b047639ab..78ced11d18c7 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -207,7 +207,14 @@ int add_extent_mapping(struct extent_map_tree *tree, | |||
207 | int ret = 0; | 207 | int ret = 0; |
208 | struct extent_map *merge = NULL; | 208 | struct extent_map *merge = NULL; |
209 | struct rb_node *rb; | 209 | struct rb_node *rb; |
210 | struct extent_map *exist; | ||
210 | 211 | ||
212 | exist = lookup_extent_mapping(tree, em->start, em->len); | ||
213 | if (exist) { | ||
214 | free_extent_map(exist); | ||
215 | ret = -EEXIST; | ||
216 | goto out; | ||
217 | } | ||
211 | assert_spin_locked(&tree->lock); | 218 | assert_spin_locked(&tree->lock); |
212 | rb = tree_insert(&tree->map, em->start, &em->rb_node); | 219 | rb = tree_insert(&tree->map, em->start, &em->rb_node); |
213 | if (rb) { | 220 | if (rb) { |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 65107894a5bb..6c778043207d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -641,8 +641,9 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) | |||
641 | if (ret == -ENOENT || ret == -EFBIG) | 641 | if (ret == -ENOENT || ret == -EFBIG) |
642 | ret = 0; | 642 | ret = 0; |
643 | csum = 0; | 643 | csum = 0; |
644 | printk("no csum found for inode %lu start %Lu\n", inode->i_ino, | 644 | if (printk_ratelimit()) |
645 | start); | 645 | printk("no csum found for inode %lu start %Lu\n", inode->i_ino, |
646 | start); | ||
646 | goto out; | 647 | goto out; |
647 | } | 648 | } |
648 | read_extent_buffer(path->nodes[0], &csum, (unsigned long)item, | 649 | read_extent_buffer(path->nodes[0], &csum, (unsigned long)item, |
@@ -1653,8 +1654,20 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
1653 | btrfs_truncate_page(inode->i_mapping, inode->i_size); | 1654 | btrfs_truncate_page(inode->i_mapping, inode->i_size); |
1654 | 1655 | ||
1655 | hole_size = block_end - hole_start; | 1656 | hole_size = block_end - hole_start; |
1656 | btrfs_wait_ordered_range(inode, hole_start, hole_size); | 1657 | while(1) { |
1657 | lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | 1658 | struct btrfs_ordered_extent *ordered; |
1659 | btrfs_wait_ordered_range(inode, hole_start, hole_size); | ||
1660 | |||
1661 | lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | ||
1662 | ordered = btrfs_lookup_ordered_extent(inode, hole_start); | ||
1663 | if (ordered) { | ||
1664 | unlock_extent(io_tree, hole_start, | ||
1665 | block_end - 1, GFP_NOFS); | ||
1666 | btrfs_put_ordered_extent(ordered); | ||
1667 | } else { | ||
1668 | break; | ||
1669 | } | ||
1670 | } | ||
1658 | 1671 | ||
1659 | trans = btrfs_start_transaction(root, 1); | 1672 | trans = btrfs_start_transaction(root, 1); |
1660 | btrfs_set_trans_block_group(trans, inode); | 1673 | btrfs_set_trans_block_group(trans, inode); |