aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c61
1 files changed, 44 insertions, 17 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 01c21b6c6d43..45c81bb4ac82 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -929,7 +929,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
929 929
930 930
931/** 931/**
932 * convert_extent - convert all bits in a given range from one bit to another 932 * convert_extent_bit - convert all bits in a given range from one bit to
933 * another
933 * @tree: the io tree to search 934 * @tree: the io tree to search
934 * @start: the start offset in bytes 935 * @start: the start offset in bytes
935 * @end: the end offset in bytes (inclusive) 936 * @end: the end offset in bytes (inclusive)
@@ -1918,7 +1919,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1918 return -EIO; 1919 return -EIO;
1919 } 1920 }
1920 1921
1921 printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " 1922 printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
1922 "(dev %s sector %llu)\n", page->mapping->host->i_ino, 1923 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1923 start, rcu_str_deref(dev->name), sector); 1924 start, rcu_str_deref(dev->name), sector);
1924 1925
@@ -3077,8 +3078,15 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3077 } 3078 }
3078 } 3079 }
3079 3080
3081 /*
3082 * We need to do this to prevent races in people who check if the eb is
3083 * under IO since we can end up having no IO bits set for a short period
3084 * of time.
3085 */
3086 spin_lock(&eb->refs_lock);
3080 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 3087 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3081 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3088 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3089 spin_unlock(&eb->refs_lock);
3082 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 3090 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3083 spin_lock(&fs_info->delalloc_lock); 3091 spin_lock(&fs_info->delalloc_lock);
3084 if (fs_info->dirty_metadata_bytes >= eb->len) 3092 if (fs_info->dirty_metadata_bytes >= eb->len)
@@ -3087,6 +3095,8 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3087 WARN_ON(1); 3095 WARN_ON(1);
3088 spin_unlock(&fs_info->delalloc_lock); 3096 spin_unlock(&fs_info->delalloc_lock);
3089 ret = 1; 3097 ret = 1;
3098 } else {
3099 spin_unlock(&eb->refs_lock);
3090 } 3100 }
3091 3101
3092 btrfs_tree_unlock(eb); 3102 btrfs_tree_unlock(eb);
@@ -3557,19 +3567,38 @@ int extent_readpages(struct extent_io_tree *tree,
3557 struct bio *bio = NULL; 3567 struct bio *bio = NULL;
3558 unsigned page_idx; 3568 unsigned page_idx;
3559 unsigned long bio_flags = 0; 3569 unsigned long bio_flags = 0;
3570 struct page *pagepool[16];
3571 struct page *page;
3572 int i = 0;
3573 int nr = 0;
3560 3574
3561 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 3575 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3562 struct page *page = list_entry(pages->prev, struct page, lru); 3576 page = list_entry(pages->prev, struct page, lru);
3563 3577
3564 prefetchw(&page->flags); 3578 prefetchw(&page->flags);
3565 list_del(&page->lru); 3579 list_del(&page->lru);
3566 if (!add_to_page_cache_lru(page, mapping, 3580 if (add_to_page_cache_lru(page, mapping,
3567 page->index, GFP_NOFS)) { 3581 page->index, GFP_NOFS)) {
3568 __extent_read_full_page(tree, page, get_extent, 3582 page_cache_release(page);
3569 &bio, 0, &bio_flags); 3583 continue;
3570 } 3584 }
3571 page_cache_release(page); 3585
3586 pagepool[nr++] = page;
3587 if (nr < ARRAY_SIZE(pagepool))
3588 continue;
3589 for (i = 0; i < nr; i++) {
3590 __extent_read_full_page(tree, pagepool[i], get_extent,
3591 &bio, 0, &bio_flags);
3592 page_cache_release(pagepool[i]);
3593 }
3594 nr = 0;
3595 }
3596 for (i = 0; i < nr; i++) {
3597 __extent_read_full_page(tree, pagepool[i], get_extent,
3598 &bio, 0, &bio_flags);
3599 page_cache_release(pagepool[i]);
3572 } 3600 }
3601
3573 BUG_ON(!list_empty(pages)); 3602 BUG_ON(!list_empty(pages));
3574 if (bio) 3603 if (bio)
3575 return submit_one_bio(READ, bio, 0, bio_flags); 3604 return submit_one_bio(READ, bio, 0, bio_flags);
@@ -4123,11 +4152,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
4123 * So bump the ref count first, then set the bit. If someone 4152 * So bump the ref count first, then set the bit. If someone
4124 * beat us to it, drop the ref we added. 4153 * beat us to it, drop the ref we added.
4125 */ 4154 */
4126 if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { 4155 spin_lock(&eb->refs_lock);
4156 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4127 atomic_inc(&eb->refs); 4157 atomic_inc(&eb->refs);
4128 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4158 spin_unlock(&eb->refs_lock);
4129 atomic_dec(&eb->refs);
4130 }
4131} 4159}
4132 4160
4133static void mark_extent_buffer_accessed(struct extent_buffer *eb) 4161static void mark_extent_buffer_accessed(struct extent_buffer *eb)
@@ -4239,9 +4267,7 @@ again:
4239 goto free_eb; 4267 goto free_eb;
4240 } 4268 }
4241 /* add one reference for the tree */ 4269 /* add one reference for the tree */
4242 spin_lock(&eb->refs_lock);
4243 check_buffer_tree_ref(eb); 4270 check_buffer_tree_ref(eb);
4244 spin_unlock(&eb->refs_lock);
4245 spin_unlock(&tree->buffer_lock); 4271 spin_unlock(&tree->buffer_lock);
4246 radix_tree_preload_end(); 4272 radix_tree_preload_end();
4247 4273
@@ -4300,7 +4326,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4300} 4326}
4301 4327
4302/* Expects to have eb->eb_lock already held */ 4328/* Expects to have eb->eb_lock already held */
4303static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask) 4329static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4304{ 4330{
4305 WARN_ON(atomic_read(&eb->refs) == 0); 4331 WARN_ON(atomic_read(&eb->refs) == 0);
4306 if (atomic_dec_and_test(&eb->refs)) { 4332 if (atomic_dec_and_test(&eb->refs)) {
@@ -4321,9 +4347,11 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4321 btrfs_release_extent_buffer_page(eb, 0); 4347 btrfs_release_extent_buffer_page(eb, 0);
4322 4348
4323 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 4349 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4324 return; 4350 return 1;
4325 } 4351 }
4326 spin_unlock(&eb->refs_lock); 4352 spin_unlock(&eb->refs_lock);
4353
4354 return 0;
4327} 4355}
4328 4356
4329void free_extent_buffer(struct extent_buffer *eb) 4357void free_extent_buffer(struct extent_buffer *eb)
@@ -4962,7 +4990,6 @@ int try_release_extent_buffer(struct page *page, gfp_t mask)
4962 spin_unlock(&eb->refs_lock); 4990 spin_unlock(&eb->refs_lock);
4963 return 0; 4991 return 0;
4964 } 4992 }
4965 release_extent_buffer(eb, mask);
4966 4993
4967 return 1; 4994 return release_extent_buffer(eb, mask);
4968} 4995}