diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/extent_io.c | 27 | ||||
-rw-r--r-- | fs/btrfs/extent_map.c | 4 | ||||
-rw-r--r-- | fs/btrfs/extent_map.h | 3 | ||||
-rw-r--r-- | fs/btrfs/file-item.c | 5 | ||||
-rw-r--r-- | fs/btrfs/file.c | 14 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 15 |
6 files changed, 48 insertions, 20 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3153b4fdd0b3..d4a63ae7ed1b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2000,7 +2000,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2000 | struct block_device *bdev; | 2000 | struct block_device *bdev; |
2001 | int ret; | 2001 | int ret; |
2002 | int nr = 0; | 2002 | int nr = 0; |
2003 | size_t page_offset = 0; | 2003 | size_t pg_offset = 0; |
2004 | size_t blocksize; | 2004 | size_t blocksize; |
2005 | loff_t i_size = i_size_read(inode); | 2005 | loff_t i_size = i_size_read(inode); |
2006 | unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; | 2006 | unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; |
@@ -2008,9 +2008,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2008 | u64 delalloc_end; | 2008 | u64 delalloc_end; |
2009 | 2009 | ||
2010 | WARN_ON(!PageLocked(page)); | 2010 | WARN_ON(!PageLocked(page)); |
2011 | page_offset = i_size & (PAGE_CACHE_SIZE - 1); | 2011 | pg_offset = i_size & (PAGE_CACHE_SIZE - 1); |
2012 | if (page->index > end_index || | 2012 | if (page->index > end_index || |
2013 | (page->index == end_index && !page_offset)) { | 2013 | (page->index == end_index && !pg_offset)) { |
2014 | page->mapping->a_ops->invalidatepage(page, 0); | 2014 | page->mapping->a_ops->invalidatepage(page, 0); |
2015 | unlock_page(page); | 2015 | unlock_page(page); |
2016 | return 0; | 2016 | return 0; |
@@ -2020,12 +2020,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2020 | char *userpage; | 2020 | char *userpage; |
2021 | 2021 | ||
2022 | userpage = kmap_atomic(page, KM_USER0); | 2022 | userpage = kmap_atomic(page, KM_USER0); |
2023 | memset(userpage + page_offset, 0, | 2023 | memset(userpage + pg_offset, 0, |
2024 | PAGE_CACHE_SIZE - page_offset); | 2024 | PAGE_CACHE_SIZE - pg_offset); |
2025 | kunmap_atomic(userpage, KM_USER0); | 2025 | kunmap_atomic(userpage, KM_USER0); |
2026 | flush_dcache_page(page); | 2026 | flush_dcache_page(page); |
2027 | } | 2027 | } |
2028 | page_offset = 0; | 2028 | pg_offset = 0; |
2029 | 2029 | ||
2030 | set_page_extent_mapped(page); | 2030 | set_page_extent_mapped(page); |
2031 | 2031 | ||
@@ -2088,7 +2088,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2088 | unlock_start = page_end + 1; | 2088 | unlock_start = page_end + 1; |
2089 | break; | 2089 | break; |
2090 | } | 2090 | } |
2091 | em = epd->get_extent(inode, page, page_offset, cur, | 2091 | em = epd->get_extent(inode, page, pg_offset, cur, |
2092 | end - cur + 1, 1); | 2092 | end - cur + 1, 1); |
2093 | if (IS_ERR(em) || !em) { | 2093 | if (IS_ERR(em) || !em) { |
2094 | SetPageError(page); | 2094 | SetPageError(page); |
@@ -2113,12 +2113,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2113 | 2113 | ||
2114 | unlock_extent(tree, unlock_start, cur + iosize -1, | 2114 | unlock_extent(tree, unlock_start, cur + iosize -1, |
2115 | GFP_NOFS); | 2115 | GFP_NOFS); |
2116 | |||
2116 | if (tree->ops && tree->ops->writepage_end_io_hook) | 2117 | if (tree->ops && tree->ops->writepage_end_io_hook) |
2117 | tree->ops->writepage_end_io_hook(page, cur, | 2118 | tree->ops->writepage_end_io_hook(page, cur, |
2118 | cur + iosize - 1, | 2119 | cur + iosize - 1, |
2119 | NULL, 1); | 2120 | NULL, 1); |
2120 | cur = cur + iosize; | 2121 | cur = cur + iosize; |
2121 | page_offset += iosize; | 2122 | pg_offset += iosize; |
2122 | unlock_start = cur; | 2123 | unlock_start = cur; |
2123 | continue; | 2124 | continue; |
2124 | } | 2125 | } |
@@ -2127,7 +2128,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2127 | if (0 && !test_range_bit(tree, cur, cur + iosize - 1, | 2128 | if (0 && !test_range_bit(tree, cur, cur + iosize - 1, |
2128 | EXTENT_DIRTY, 0)) { | 2129 | EXTENT_DIRTY, 0)) { |
2129 | cur = cur + iosize; | 2130 | cur = cur + iosize; |
2130 | page_offset += iosize; | 2131 | pg_offset += iosize; |
2131 | continue; | 2132 | continue; |
2132 | } | 2133 | } |
2133 | clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); | 2134 | clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); |
@@ -2141,6 +2142,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2141 | SetPageError(page); | 2142 | SetPageError(page); |
2142 | } else { | 2143 | } else { |
2143 | unsigned long max_nr = end_index + 1; | 2144 | unsigned long max_nr = end_index + 1; |
2145 | |||
2144 | set_range_writeback(tree, cur, cur + iosize - 1); | 2146 | set_range_writeback(tree, cur, cur + iosize - 1); |
2145 | if (!PageWriteback(page)) { | 2147 | if (!PageWriteback(page)) { |
2146 | printk("warning page %lu not writeback, " | 2148 | printk("warning page %lu not writeback, " |
@@ -2150,14 +2152,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2150 | } | 2152 | } |
2151 | 2153 | ||
2152 | ret = submit_extent_page(WRITE, tree, page, sector, | 2154 | ret = submit_extent_page(WRITE, tree, page, sector, |
2153 | iosize, page_offset, bdev, | 2155 | iosize, pg_offset, bdev, |
2154 | &epd->bio, max_nr, | 2156 | &epd->bio, max_nr, |
2155 | end_bio_extent_writepage, 0); | 2157 | end_bio_extent_writepage, 0); |
2156 | if (ret) | 2158 | if (ret) |
2157 | SetPageError(page); | 2159 | SetPageError(page); |
2158 | } | 2160 | } |
2159 | cur = cur + iosize; | 2161 | cur = cur + iosize; |
2160 | page_offset += iosize; | 2162 | pg_offset += iosize; |
2161 | nr++; | 2163 | nr++; |
2162 | } | 2164 | } |
2163 | done: | 2165 | done: |
@@ -2579,7 +2581,8 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
2579 | spin_unlock(&map->lock); | 2581 | spin_unlock(&map->lock); |
2580 | break; | 2582 | break; |
2581 | } | 2583 | } |
2582 | if (em->start != start) { | 2584 | if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || |
2585 | em->start != start) { | ||
2583 | spin_unlock(&map->lock); | 2586 | spin_unlock(&map->lock); |
2584 | free_extent_map(em); | 2587 | free_extent_map(em); |
2585 | break; | 2588 | break; |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 81123277c2b8..71b1ac155355 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -173,6 +173,9 @@ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) | |||
173 | 173 | ||
174 | static int mergable_maps(struct extent_map *prev, struct extent_map *next) | 174 | static int mergable_maps(struct extent_map *prev, struct extent_map *next) |
175 | { | 175 | { |
176 | if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) | ||
177 | return 0; | ||
178 | |||
176 | if (extent_map_end(prev) == next->start && | 179 | if (extent_map_end(prev) == next->start && |
177 | prev->flags == next->flags && | 180 | prev->flags == next->flags && |
178 | prev->bdev == next->bdev && | 181 | prev->bdev == next->bdev && |
@@ -320,6 +323,7 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) | |||
320 | { | 323 | { |
321 | int ret = 0; | 324 | int ret = 0; |
322 | 325 | ||
326 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); | ||
323 | BUG_ON(spin_trylock(&tree->lock)); | 327 | BUG_ON(spin_trylock(&tree->lock)); |
324 | rb_erase(&em->rb_node, &tree->map); | 328 | rb_erase(&em->rb_node, &tree->map); |
325 | em->in_tree = 0; | 329 | em->in_tree = 0; |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 56314217cfc0..a3978ec27846 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -8,6 +8,9 @@ | |||
8 | #define EXTENT_MAP_INLINE (u64)-2 | 8 | #define EXTENT_MAP_INLINE (u64)-2 |
9 | #define EXTENT_MAP_DELALLOC (u64)-1 | 9 | #define EXTENT_MAP_DELALLOC (u64)-1 |
10 | 10 | ||
11 | /* bits for the flags field */ | ||
12 | #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */ | ||
13 | |||
11 | struct extent_map { | 14 | struct extent_map { |
12 | struct rb_node rb_node; | 15 | struct rb_node rb_node; |
13 | 16 | ||
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index e02f1e5acb0a..d9c69e16d368 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -192,7 +192,6 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | |||
192 | (char *)§or_sum->sum); | 192 | (char *)§or_sum->sum); |
193 | sector_sum->offset = page_offset(bvec->bv_page) + | 193 | sector_sum->offset = page_offset(bvec->bv_page) + |
194 | bvec->bv_offset; | 194 | bvec->bv_offset; |
195 | |||
196 | sector_sum++; | 195 | sector_sum++; |
197 | bio_index++; | 196 | bio_index++; |
198 | total_bytes += bvec->bv_len; | 197 | total_bytes += bvec->bv_len; |
@@ -201,9 +200,6 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | |||
201 | } | 200 | } |
202 | btrfs_add_ordered_sum(inode, ordered, sums); | 201 | btrfs_add_ordered_sum(inode, ordered, sums); |
203 | btrfs_put_ordered_extent(ordered); | 202 | btrfs_put_ordered_extent(ordered); |
204 | if (total_bytes != bio->bi_size) { | ||
205 | printk("warning, total bytes %lu bio size %u\n", total_bytes, bio->bi_size); | ||
206 | } | ||
207 | return 0; | 203 | return 0; |
208 | } | 204 | } |
209 | 205 | ||
@@ -372,6 +368,7 @@ next_sector: | |||
372 | write_extent_buffer(leaf, §or_sum->sum, | 368 | write_extent_buffer(leaf, §or_sum->sum, |
373 | (unsigned long)item, BTRFS_CRC32_SIZE); | 369 | (unsigned long)item, BTRFS_CRC32_SIZE); |
374 | } | 370 | } |
371 | |||
375 | total_bytes += root->sectorsize; | 372 | total_bytes += root->sectorsize; |
376 | sector_sum++; | 373 | sector_sum++; |
377 | if (total_bytes < sums->len) { | 374 | if (total_bytes < sums->len) { |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 40ad1b2958cb..eccdb9562ba8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -358,9 +358,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) | |||
358 | struct extent_map *split = NULL; | 358 | struct extent_map *split = NULL; |
359 | struct extent_map *split2 = NULL; | 359 | struct extent_map *split2 = NULL; |
360 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 360 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
361 | struct extent_map *tmp; | ||
362 | u64 len = end - start + 1; | 361 | u64 len = end - start + 1; |
363 | u64 next_start; | ||
364 | int ret; | 362 | int ret; |
365 | int testend = 1; | 363 | int testend = 1; |
366 | 364 | ||
@@ -381,8 +379,16 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) | |||
381 | spin_unlock(&em_tree->lock); | 379 | spin_unlock(&em_tree->lock); |
382 | break; | 380 | break; |
383 | } | 381 | } |
384 | tmp = rb_entry(&em->rb_node, struct extent_map, rb_node); | 382 | if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) { |
385 | next_start = tmp->start; | 383 | start = em->start + em->len; |
384 | free_extent_map(em); | ||
385 | spin_unlock(&em_tree->lock); | ||
386 | if (start < end) { | ||
387 | len = end - start + 1; | ||
388 | continue; | ||
389 | } | ||
390 | break; | ||
391 | } | ||
386 | remove_extent_mapping(em_tree, em); | 392 | remove_extent_mapping(em_tree, em); |
387 | 393 | ||
388 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | 394 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8803abc89bb8..08dbe738b512 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -144,6 +144,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) | |||
144 | em->len = ins.offset; | 144 | em->len = ins.offset; |
145 | em->block_start = ins.objectid; | 145 | em->block_start = ins.objectid; |
146 | em->bdev = root->fs_info->fs_devices->latest_bdev; | 146 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
147 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | ||
147 | while(1) { | 148 | while(1) { |
148 | spin_lock(&em_tree->lock); | 149 | spin_lock(&em_tree->lock); |
149 | ret = add_extent_mapping(em_tree, em); | 150 | ret = add_extent_mapping(em_tree, em); |
@@ -483,6 +484,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
483 | struct btrfs_trans_handle *trans; | 484 | struct btrfs_trans_handle *trans; |
484 | struct btrfs_ordered_extent *ordered_extent; | 485 | struct btrfs_ordered_extent *ordered_extent; |
485 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 486 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
487 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
488 | struct extent_map *em; | ||
486 | u64 alloc_hint = 0; | 489 | u64 alloc_hint = 0; |
487 | struct list_head list; | 490 | struct list_head list; |
488 | struct btrfs_key ins; | 491 | struct btrfs_key ins; |
@@ -524,6 +527,17 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
524 | ordered_extent->len, | 527 | ordered_extent->len, |
525 | ordered_extent->len, 0); | 528 | ordered_extent->len, 0); |
526 | BUG_ON(ret); | 529 | BUG_ON(ret); |
530 | |||
531 | |||
532 | spin_lock(&em_tree->lock); | ||
533 | em = lookup_extent_mapping(em_tree, ordered_extent->file_offset, | ||
534 | ordered_extent->len); | ||
535 | if (em) { | ||
536 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); | ||
537 | free_extent_map(em); | ||
538 | } | ||
539 | spin_unlock(&em_tree->lock); | ||
540 | |||
527 | btrfs_drop_extent_cache(inode, ordered_extent->file_offset, | 541 | btrfs_drop_extent_cache(inode, ordered_extent->file_offset, |
528 | ordered_extent->file_offset + | 542 | ordered_extent->file_offset + |
529 | ordered_extent->len - 1); | 543 | ordered_extent->len - 1); |
@@ -538,6 +552,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
538 | 552 | ||
539 | btrfs_ordered_update_i_size(inode, ordered_extent); | 553 | btrfs_ordered_update_i_size(inode, ordered_extent); |
540 | btrfs_remove_ordered_extent(inode, ordered_extent); | 554 | btrfs_remove_ordered_extent(inode, ordered_extent); |
555 | |||
541 | /* once for us */ | 556 | /* once for us */ |
542 | btrfs_put_ordered_extent(ordered_extent); | 557 | btrfs_put_ordered_extent(ordered_extent); |
543 | /* once for the tree */ | 558 | /* once for the tree */ |