diff options
author | Chris Mason <chris.mason@oracle.com> | 2007-11-01 11:28:41 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:03:57 -0400 |
commit | 179e29e488cc74f1e9bd67bc45f70b832740e9ec (patch) | |
tree | aa055d77b8d3f2b4bc59b1bb8a5b98ec0b223fb5 /fs/btrfs/extent_map.c | |
parent | 35ebb934bd7fcc7ca991b155b7980c3c4ff9f1a5 (diff) |
Btrfs: Fix a number of inline extent problems that Yan Zheng reported.
The fixes do a number of things:
1) Most btrfs_drop_extent callers will try to leave the inline extents in
place. It can truncate bytes off the beginning of the inline extent if
required.
2) writepage can now update the inline extent, allowing mmap writes to
go directly into the inline extent.
3) btrfs_truncate_in_transaction truncates inline extents
4) extent_map.c fixed to not merge inline extent mappings and hole
mappings together
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r-- | fs/btrfs/extent_map.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 238cb1d81d56..44be9cfd30ee 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -263,7 +263,12 @@ int add_extent_mapping(struct extent_map_tree *tree, | |||
263 | if (prev && prev->end + 1 == em->start && | 263 | if (prev && prev->end + 1 == em->start && |
264 | ((em->block_start == EXTENT_MAP_HOLE && | 264 | ((em->block_start == EXTENT_MAP_HOLE && |
265 | prev->block_start == EXTENT_MAP_HOLE) || | 265 | prev->block_start == EXTENT_MAP_HOLE) || |
266 | (em->block_start == prev->block_end + 1))) { | 266 | (em->block_start == EXTENT_MAP_INLINE && |
267 | prev->block_start == EXTENT_MAP_INLINE) || | ||
268 | (em->block_start == EXTENT_MAP_DELALLOC && | ||
269 | prev->block_start == EXTENT_MAP_DELALLOC) || | ||
270 | (em->block_start < EXTENT_MAP_DELALLOC - 1 && | ||
271 | em->block_start == prev->block_end + 1))) { | ||
267 | em->start = prev->start; | 272 | em->start = prev->start; |
268 | em->block_start = prev->block_start; | 273 | em->block_start = prev->block_start; |
269 | rb_erase(&prev->rb_node, &tree->map); | 274 | rb_erase(&prev->rb_node, &tree->map); |
@@ -1618,13 +1623,13 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | |||
1618 | u64 extent_offset; | 1623 | u64 extent_offset; |
1619 | u64 last_byte = i_size_read(inode); | 1624 | u64 last_byte = i_size_read(inode); |
1620 | u64 block_start; | 1625 | u64 block_start; |
1626 | u64 iosize; | ||
1621 | sector_t sector; | 1627 | sector_t sector; |
1622 | struct extent_map *em; | 1628 | struct extent_map *em; |
1623 | struct block_device *bdev; | 1629 | struct block_device *bdev; |
1624 | int ret; | 1630 | int ret; |
1625 | int nr = 0; | 1631 | int nr = 0; |
1626 | size_t page_offset = 0; | 1632 | size_t page_offset = 0; |
1627 | size_t iosize; | ||
1628 | size_t blocksize; | 1633 | size_t blocksize; |
1629 | loff_t i_size = i_size_read(inode); | 1634 | loff_t i_size = i_size_read(inode); |
1630 | unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; | 1635 | unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; |
@@ -1684,7 +1689,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | |||
1684 | clear_extent_dirty(tree, cur, page_end, GFP_NOFS); | 1689 | clear_extent_dirty(tree, cur, page_end, GFP_NOFS); |
1685 | break; | 1690 | break; |
1686 | } | 1691 | } |
1687 | em = get_extent(inode, page, page_offset, cur, end, 0); | 1692 | em = get_extent(inode, page, page_offset, cur, end, 1); |
1688 | if (IS_ERR(em) || !em) { | 1693 | if (IS_ERR(em) || !em) { |
1689 | SetPageError(page); | 1694 | SetPageError(page); |
1690 | break; | 1695 | break; |