aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c33
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c43
-rw-r--r--fs/btrfs/extent-tree.c18
-rw-r--r--fs/btrfs/file-item.c2
-rw-r--r--fs/btrfs/ioctl.c20
-rw-r--r--fs/btrfs/locking.c24
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/lzo.c15
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/zlib.c20
14 files changed, 121 insertions, 79 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3220d31d3cb..dcd9be32ac57 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1011 bytes = min(bytes, working_bytes); 1011 bytes = min(bytes, working_bytes);
1012 kaddr = kmap_atomic(page_out); 1012 kaddr = kmap_atomic(page_out);
1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1014 if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1015 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1016 kunmap_atomic(kaddr); 1014 kunmap_atomic(kaddr);
1017 flush_dcache_page(page_out); 1015 flush_dcache_page(page_out);
1018 1016
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1054 1052
1055 return 1; 1053 return 1;
1056} 1054}
1055
1056/*
1057 * When uncompressing data, we need to make sure and zero any parts of
1058 * the biovec that were not filled in by the decompression code. pg_index
1059 * and pg_offset indicate the last page and the last offset of that page
1060 * that have been filled in. This will zero everything remaining in the
1061 * biovec.
1062 */
1063void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
1064 unsigned long pg_index,
1065 unsigned long pg_offset)
1066{
1067 while (pg_index < vcnt) {
1068 struct page *page = bvec[pg_index].bv_page;
1069 unsigned long off = bvec[pg_index].bv_offset;
1070 unsigned long len = bvec[pg_index].bv_len;
1071
1072 if (pg_offset < off)
1073 pg_offset = off;
1074 if (pg_offset < off + len) {
1075 unsigned long bytes = off + len - pg_offset;
1076 char *kaddr;
1077
1078 kaddr = kmap_atomic(page);
1079 memset(kaddr + pg_offset, 0, bytes);
1080 kunmap_atomic(kaddr);
1081 }
1082 pg_index++;
1083 pg_offset = 0;
1084 }
1085}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0c803b4fbf93..d181f70caae0 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
45 unsigned long nr_pages); 45 unsigned long nr_pages);
46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
47 int mirror_num, unsigned long bio_flags); 47 int mirror_num, unsigned long bio_flags);
48 48void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
49 unsigned long pg_index,
50 unsigned long pg_offset);
49struct btrfs_compress_op { 51struct btrfs_compress_op {
50 struct list_head *(*alloc_workspace)(void); 52 struct list_head *(*alloc_workspace)(void);
51 53
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 19bc6162fb8e..150822ee0a0b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80{ 80{
81 int i; 81 int i;
82 82
83#ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
88 * the path blocking.
89 */
90 if (held) { 83 if (held) {
91 btrfs_set_lock_blocking_rw(held, held_rw); 84 btrfs_set_lock_blocking_rw(held, held_rw);
92 if (held_rw == BTRFS_WRITE_LOCK) 85 if (held_rw == BTRFS_WRITE_LOCK)
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
95 held_rw = BTRFS_READ_LOCK_BLOCKING; 88 held_rw = BTRFS_READ_LOCK_BLOCKING;
96 } 89 }
97 btrfs_set_path_blocking(p); 90 btrfs_set_path_blocking(p);
98#endif
99 91
100 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 if (p->nodes[i] && p->locks[i]) { 93 if (p->nodes[i] && p->locks[i]) {
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
107 } 99 }
108 } 100 }
109 101
110#ifdef CONFIG_DEBUG_LOCK_ALLOC
111 if (held) 102 if (held)
112 btrfs_clear_lock_blocking_rw(held, held_rw); 103 btrfs_clear_lock_blocking_rw(held, held_rw);
113#endif
114} 104}
115 105
116/* this also releases the path */ 106/* this also releases the path */
@@ -2893,7 +2883,7 @@ cow_done:
2893 } 2883 }
2894 p->locks[level] = BTRFS_WRITE_LOCK; 2884 p->locks[level] = BTRFS_WRITE_LOCK;
2895 } else { 2885 } else {
2896 err = btrfs_try_tree_read_lock(b); 2886 err = btrfs_tree_read_lock_atomic(b);
2897 if (!err) { 2887 if (!err) {
2898 btrfs_set_path_blocking(p); 2888 btrfs_set_path_blocking(p);
2899 btrfs_tree_read_lock(b); 2889 btrfs_tree_read_lock(b);
@@ -3025,7 +3015,7 @@ again:
3025 } 3015 }
3026 3016
3027 level = btrfs_header_level(b); 3017 level = btrfs_header_level(b);
3028 err = btrfs_try_tree_read_lock(b); 3018 err = btrfs_tree_read_lock_atomic(b);
3029 if (!err) { 3019 if (!err) {
3030 btrfs_set_path_blocking(p); 3020 btrfs_set_path_blocking(p);
3031 btrfs_tree_read_lock(b); 3021 btrfs_tree_read_lock(b);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index d557264ee974..fe69edda11fb 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3276,7 +3276,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
3276 struct btrfs_root *root, unsigned long count); 3276 struct btrfs_root *root, unsigned long count);
3277int btrfs_async_run_delayed_refs(struct btrfs_root *root, 3277int btrfs_async_run_delayed_refs(struct btrfs_root *root,
3278 unsigned long count, int wait); 3278 unsigned long count, int wait);
3279int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); 3279int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
3280int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 3280int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
3281 struct btrfs_root *root, u64 bytenr, 3281 struct btrfs_root *root, u64 bytenr,
3282 u64 offset, int metadata, u64 *refs, u64 *flags); 3282 u64 offset, int metadata, u64 *refs, u64 *flags);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1ad0f47ac850..1bf9f897065d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3817,19 +3817,19 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3817 struct btrfs_super_block *sb = fs_info->super_copy; 3817 struct btrfs_super_block *sb = fs_info->super_copy;
3818 int ret = 0; 3818 int ret = 0;
3819 3819
3820 if (sb->root_level > BTRFS_MAX_LEVEL) { 3820 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
3821 printk(KERN_ERR "BTRFS: tree_root level too big: %d > %d\n", 3821 printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
3822 sb->root_level, BTRFS_MAX_LEVEL); 3822 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
3823 ret = -EINVAL; 3823 ret = -EINVAL;
3824 } 3824 }
3825 if (sb->chunk_root_level > BTRFS_MAX_LEVEL) { 3825 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
3826 printk(KERN_ERR "BTRFS: chunk_root level too big: %d > %d\n", 3826 printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
3827 sb->chunk_root_level, BTRFS_MAX_LEVEL); 3827 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
3828 ret = -EINVAL; 3828 ret = -EINVAL;
3829 } 3829 }
3830 if (sb->log_root_level > BTRFS_MAX_LEVEL) { 3830 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
3831 printk(KERN_ERR "BTRFS: log_root level too big: %d > %d\n", 3831 printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
3832 sb->log_root_level, BTRFS_MAX_LEVEL); 3832 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
3833 ret = -EINVAL; 3833 ret = -EINVAL;
3834 } 3834 }
3835 3835
@@ -3837,15 +3837,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3837 * The common minimum, we don't know if we can trust the nodesize/sectorsize 3837 * The common minimum, we don't know if we can trust the nodesize/sectorsize
3838 * items yet, they'll be verified later. Issue just a warning. 3838 * items yet, they'll be verified later. Issue just a warning.
3839 */ 3839 */
3840 if (!IS_ALIGNED(sb->root, 4096)) 3840 if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
3841 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3841 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3842 sb->root); 3842 sb->root);
3843 if (!IS_ALIGNED(sb->chunk_root, 4096)) 3843 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
3844 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3844 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3845 sb->chunk_root); 3845 sb->chunk_root);
3846 if (!IS_ALIGNED(sb->log_root, 4096)) 3846 if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
3847 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3847 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3848 sb->log_root); 3848 btrfs_super_log_root(sb));
3849 3849
3850 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { 3850 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
3851 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", 3851 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
@@ -3857,13 +3857,13 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3857 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 3857 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
3858 * done later 3858 * done later
3859 */ 3859 */
3860 if (sb->num_devices > (1UL << 31)) 3860 if (btrfs_super_num_devices(sb) > (1UL << 31))
3861 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 3861 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
3862 sb->num_devices); 3862 btrfs_super_num_devices(sb));
3863 3863
3864 if (sb->bytenr != BTRFS_SUPER_INFO_OFFSET) { 3864 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
3865 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", 3865 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
3866 sb->bytenr, BTRFS_SUPER_INFO_OFFSET); 3866 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
3867 ret = -EINVAL; 3867 ret = -EINVAL;
3868 } 3868 }
3869 3869
@@ -3871,14 +3871,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3871 * The generation is a global counter, we'll trust it more than the others 3871 * The generation is a global counter, we'll trust it more than the others
3872 * but it's still possible that it's the one that's wrong. 3872 * but it's still possible that it's the one that's wrong.
3873 */ 3873 */
3874 if (sb->generation < sb->chunk_root_generation) 3874 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
3875 printk(KERN_WARNING 3875 printk(KERN_WARNING
3876 "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n", 3876 "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
3877 sb->generation, sb->chunk_root_generation); 3877 btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
3878 if (sb->generation < sb->cache_generation && sb->cache_generation != (u64)-1) 3878 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
3879 && btrfs_super_cache_generation(sb) != (u64)-1)
3879 printk(KERN_WARNING 3880 printk(KERN_WARNING
3880 "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n", 3881 "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
3881 sb->generation, sb->cache_generation); 3882 btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
3882 3883
3883 return ret; 3884 return ret;
3884} 3885}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d56589571012..47c1ba141082 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -710,8 +710,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
710 rcu_read_unlock(); 710 rcu_read_unlock();
711} 711}
712 712
713/* simple helper to search for an existing extent at a given offset */ 713/* simple helper to search for an existing data extent at a given offset */
714int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) 714int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715{ 715{
716 int ret; 716 int ret;
717 struct btrfs_key key; 717 struct btrfs_key key;
@@ -726,12 +726,6 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
726 key.type = BTRFS_EXTENT_ITEM_KEY; 726 key.type = BTRFS_EXTENT_ITEM_KEY;
727 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 727 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728 0, 0); 728 0, 0);
729 if (ret > 0) {
730 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
731 if (key.objectid == start &&
732 key.type == BTRFS_METADATA_ITEM_KEY)
733 ret = 0;
734 }
735 btrfs_free_path(path); 729 btrfs_free_path(path);
736 return ret; 730 return ret;
737} 731}
@@ -786,7 +780,6 @@ search_again:
786 else 780 else
787 key.type = BTRFS_EXTENT_ITEM_KEY; 781 key.type = BTRFS_EXTENT_ITEM_KEY;
788 782
789again:
790 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 783 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
791 &key, path, 0, 0); 784 &key, path, 0, 0);
792 if (ret < 0) 785 if (ret < 0)
@@ -802,13 +795,6 @@ again:
802 key.offset == root->nodesize) 795 key.offset == root->nodesize)
803 ret = 0; 796 ret = 0;
804 } 797 }
805 if (ret) {
806 key.objectid = bytenr;
807 key.type = BTRFS_EXTENT_ITEM_KEY;
808 key.offset = root->nodesize;
809 btrfs_release_path(path);
810 goto again;
811 }
812 } 798 }
813 799
814 if (ret == 0) { 800 if (ret == 0) {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 783a94355efd..84a2d1868271 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -413,7 +413,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
413 ret = 0; 413 ret = 0;
414fail: 414fail:
415 while (ret < 0 && !list_empty(&tmplist)) { 415 while (ret < 0 && !list_empty(&tmplist)) {
416 sums = list_entry(&tmplist, struct btrfs_ordered_sum, list); 416 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
417 list_del(&sums->list); 417 list_del(&sums->list);
418 kfree(sums); 418 kfree(sums);
419 } 419 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 8d2b76e29d3b..4399f0c3a4ce 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -765,23 +765,6 @@ out:
765 return ret; 765 return ret;
766} 766}
767 767
768/* copy of check_sticky in fs/namei.c()
769* It's inline, so penalty for filesystems that don't use sticky bit is
770* minimal.
771*/
772static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode)
773{
774 kuid_t fsuid = current_fsuid();
775
776 if (!(dir->i_mode & S_ISVTX))
777 return 0;
778 if (uid_eq(inode->i_uid, fsuid))
779 return 0;
780 if (uid_eq(dir->i_uid, fsuid))
781 return 0;
782 return !capable(CAP_FOWNER);
783}
784
785/* copy of may_delete in fs/namei.c() 768/* copy of may_delete in fs/namei.c()
786 * Check whether we can remove a link victim from directory dir, check 769 * Check whether we can remove a link victim from directory dir, check
787 * whether the type of victim is right. 770 * whether the type of victim is right.
@@ -817,8 +800,7 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
817 return error; 800 return error;
818 if (IS_APPEND(dir)) 801 if (IS_APPEND(dir))
819 return -EPERM; 802 return -EPERM;
820 if (btrfs_check_sticky(dir, victim->d_inode)|| 803 if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) ||
821 IS_APPEND(victim->d_inode)||
822 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) 804 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
823 return -EPERM; 805 return -EPERM;
824 if (isdir) { 806 if (isdir) {
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5665d2149249..f8229ef1b46d 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -128,6 +128,26 @@ again:
128} 128}
129 129
130/* 130/*
131 * take a spinning read lock.
132 * returns 1 if we get the read lock and 0 if we don't
133 * this won't wait for blocking writers
134 */
135int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
136{
137 if (atomic_read(&eb->blocking_writers))
138 return 0;
139
140 read_lock(&eb->lock);
141 if (atomic_read(&eb->blocking_writers)) {
142 read_unlock(&eb->lock);
143 return 0;
144 }
145 atomic_inc(&eb->read_locks);
146 atomic_inc(&eb->spinning_readers);
147 return 1;
148}
149
150/*
131 * returns 1 if we get the read lock and 0 if we don't 151 * returns 1 if we get the read lock and 0 if we don't
132 * this won't wait for blocking writers 152 * this won't wait for blocking writers
133 */ 153 */
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
158 atomic_read(&eb->blocking_readers)) 178 atomic_read(&eb->blocking_readers))
159 return 0; 179 return 0;
160 180
161 if (!write_trylock(&eb->lock)) 181 write_lock(&eb->lock);
162 return 0;
163
164 if (atomic_read(&eb->blocking_writers) || 182 if (atomic_read(&eb->blocking_writers) ||
165 atomic_read(&eb->blocking_readers)) { 183 atomic_read(&eb->blocking_readers)) {
166 write_unlock(&eb->lock); 184 write_unlock(&eb->lock);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index b81e0e9a4894..c44a9d5f5362 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
35void btrfs_assert_tree_locked(struct extent_buffer *eb); 35void btrfs_assert_tree_locked(struct extent_buffer *eb);
36int btrfs_try_tree_read_lock(struct extent_buffer *eb); 36int btrfs_try_tree_read_lock(struct extent_buffer *eb);
37int btrfs_try_tree_write_lock(struct extent_buffer *eb); 37int btrfs_try_tree_write_lock(struct extent_buffer *eb);
38int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
39
38 40
39static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) 41static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
40{ 42{
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 78285f30909e..617553cdb7d3 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -373,6 +373,8 @@ cont:
373 } 373 }
374done: 374done:
375 kunmap(pages_in[page_in_index]); 375 kunmap(pages_in[page_in_index]);
376 if (!ret)
377 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
376 return ret; 378 return ret;
377} 379}
378 380
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
410 goto out; 412 goto out;
411 } 413 }
412 414
415 /*
416 * the caller is already checking against PAGE_SIZE, but lets
417 * move this check closer to the memcpy/memset
418 */
419 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
413 bytes = min_t(unsigned long, destlen, out_len - start_byte); 420 bytes = min_t(unsigned long, destlen, out_len - start_byte);
414 421
415 kaddr = kmap_atomic(dest_page); 422 kaddr = kmap_atomic(dest_page);
416 memcpy(kaddr, workspace->buf + start_byte, bytes); 423 memcpy(kaddr, workspace->buf + start_byte, bytes);
424
425 /*
426 * btrfs_getblock is doing a zero on the tail of the page too,
427 * but this will cover anything missing from the decompressed
428 * data.
429 */
430 if (bytes < destlen)
431 memset(kaddr+bytes, 0, destlen-bytes);
417 kunmap_atomic(kaddr); 432 kunmap_atomic(kaddr);
418out: 433out:
419 return ret; 434 return ret;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a2b97ef10317..54bd91ece35b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2151,6 +2151,7 @@ static void __exit exit_btrfs_fs(void)
2151 extent_map_exit(); 2151 extent_map_exit();
2152 extent_io_exit(); 2152 extent_io_exit();
2153 btrfs_interface_exit(); 2153 btrfs_interface_exit();
2154 btrfs_end_io_wq_exit();
2154 unregister_filesystem(&btrfs_fs_type); 2155 unregister_filesystem(&btrfs_fs_type);
2155 btrfs_exit_sysfs(); 2156 btrfs_exit_sysfs();
2156 btrfs_cleanup_fs_uuids(); 2157 btrfs_cleanup_fs_uuids();
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1475979e5718..286213cec861 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -672,7 +672,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
672 * is this extent already allocated in the extent 672 * is this extent already allocated in the extent
673 * allocation tree? If so, just add a reference 673 * allocation tree? If so, just add a reference
674 */ 674 */
675 ret = btrfs_lookup_extent(root, ins.objectid, 675 ret = btrfs_lookup_data_extent(root, ins.objectid,
676 ins.offset); 676 ins.offset);
677 if (ret == 0) { 677 if (ret == 0) {
678 ret = btrfs_inc_extent_ref(trans, root, 678 ret = btrfs_inc_extent_ref(trans, root,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 759fa4e2de8f..fb22fd8d8fb8 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -299,6 +299,8 @@ done:
299 zlib_inflateEnd(&workspace->strm); 299 zlib_inflateEnd(&workspace->strm);
300 if (data_in) 300 if (data_in)
301 kunmap(pages_in[page_in_index]); 301 kunmap(pages_in[page_in_index]);
302 if (!ret)
303 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
302 return ret; 304 return ret;
303} 305}
304 306
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
310 struct workspace *workspace = list_entry(ws, struct workspace, list); 312 struct workspace *workspace = list_entry(ws, struct workspace, list);
311 int ret = 0; 313 int ret = 0;
312 int wbits = MAX_WBITS; 314 int wbits = MAX_WBITS;
313 unsigned long bytes_left = destlen; 315 unsigned long bytes_left;
314 unsigned long total_out = 0; 316 unsigned long total_out = 0;
317 unsigned long pg_offset = 0;
315 char *kaddr; 318 char *kaddr;
316 319
320 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
321 bytes_left = destlen;
322
317 workspace->strm.next_in = data_in; 323 workspace->strm.next_in = data_in;
318 workspace->strm.avail_in = srclen; 324 workspace->strm.avail_in = srclen;
319 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
341 unsigned long buf_start; 347 unsigned long buf_start;
342 unsigned long buf_offset; 348 unsigned long buf_offset;
343 unsigned long bytes; 349 unsigned long bytes;
344 unsigned long pg_offset = 0;
345 350
346 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); 351 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
347 if (ret != Z_OK && ret != Z_STREAM_END) 352 if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
384 ret = 0; 389 ret = 0;
385 390
386 zlib_inflateEnd(&workspace->strm); 391 zlib_inflateEnd(&workspace->strm);
392
393 /*
394 * this should only happen if zlib returned fewer bytes than we
395 * expected. btrfs_get_block is responsible for zeroing from the
396 * end of the inline extent (destlen) to the end of the page
397 */
398 if (pg_offset < destlen) {
399 kaddr = kmap_atomic(dest_page);
400 memset(kaddr + pg_offset, 0, destlen - pg_offset);
401 kunmap_atomic(kaddr);
402 }
387 return ret; 403 return ret;
388} 404}
389 405