aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c33
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/locking.c24
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/lzo.c15
-rw-r--r--fs/btrfs/zlib.c20
7 files changed, 92 insertions, 20 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3220d31d3cb..dcd9be32ac57 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1011 bytes = min(bytes, working_bytes); 1011 bytes = min(bytes, working_bytes);
1012 kaddr = kmap_atomic(page_out); 1012 kaddr = kmap_atomic(page_out);
1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1014 if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1015 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1016 kunmap_atomic(kaddr); 1014 kunmap_atomic(kaddr);
1017 flush_dcache_page(page_out); 1015 flush_dcache_page(page_out);
1018 1016
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1054 1052
1055 return 1; 1053 return 1;
1056} 1054}
1055
1056/*
1057 * When uncompressing data, we need to make sure and zero any parts of
1058 * the biovec that were not filled in by the decompression code. pg_index
1059 * and pg_offset indicate the last page and the last offset of that page
1060 * that have been filled in. This will zero everything remaining in the
1061 * biovec.
1062 */
1063void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
1064 unsigned long pg_index,
1065 unsigned long pg_offset)
1066{
1067 while (pg_index < vcnt) {
1068 struct page *page = bvec[pg_index].bv_page;
1069 unsigned long off = bvec[pg_index].bv_offset;
1070 unsigned long len = bvec[pg_index].bv_len;
1071
1072 if (pg_offset < off)
1073 pg_offset = off;
1074 if (pg_offset < off + len) {
1075 unsigned long bytes = off + len - pg_offset;
1076 char *kaddr;
1077
1078 kaddr = kmap_atomic(page);
1079 memset(kaddr + pg_offset, 0, bytes);
1080 kunmap_atomic(kaddr);
1081 }
1082 pg_index++;
1083 pg_offset = 0;
1084 }
1085}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0c803b4fbf93..d181f70caae0 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
45 unsigned long nr_pages); 45 unsigned long nr_pages);
46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
47 int mirror_num, unsigned long bio_flags); 47 int mirror_num, unsigned long bio_flags);
48 48void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
49 unsigned long pg_index,
50 unsigned long pg_offset);
49struct btrfs_compress_op { 51struct btrfs_compress_op {
50 struct list_head *(*alloc_workspace)(void); 52 struct list_head *(*alloc_workspace)(void);
51 53
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 19bc6162fb8e..150822ee0a0b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80{ 80{
81 int i; 81 int i;
82 82
83#ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
88 * the path blocking.
89 */
90 if (held) { 83 if (held) {
91 btrfs_set_lock_blocking_rw(held, held_rw); 84 btrfs_set_lock_blocking_rw(held, held_rw);
92 if (held_rw == BTRFS_WRITE_LOCK) 85 if (held_rw == BTRFS_WRITE_LOCK)
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
95 held_rw = BTRFS_READ_LOCK_BLOCKING; 88 held_rw = BTRFS_READ_LOCK_BLOCKING;
96 } 89 }
97 btrfs_set_path_blocking(p); 90 btrfs_set_path_blocking(p);
98#endif
99 91
100 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 if (p->nodes[i] && p->locks[i]) { 93 if (p->nodes[i] && p->locks[i]) {
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
107 } 99 }
108 } 100 }
109 101
110#ifdef CONFIG_DEBUG_LOCK_ALLOC
111 if (held) 102 if (held)
112 btrfs_clear_lock_blocking_rw(held, held_rw); 103 btrfs_clear_lock_blocking_rw(held, held_rw);
113#endif
114} 104}
115 105
116/* this also releases the path */ 106/* this also releases the path */
@@ -2893,7 +2883,7 @@ cow_done:
2893 } 2883 }
2894 p->locks[level] = BTRFS_WRITE_LOCK; 2884 p->locks[level] = BTRFS_WRITE_LOCK;
2895 } else { 2885 } else {
2896 err = btrfs_try_tree_read_lock(b); 2886 err = btrfs_tree_read_lock_atomic(b);
2897 if (!err) { 2887 if (!err) {
2898 btrfs_set_path_blocking(p); 2888 btrfs_set_path_blocking(p);
2899 btrfs_tree_read_lock(b); 2889 btrfs_tree_read_lock(b);
@@ -3025,7 +3015,7 @@ again:
3025 } 3015 }
3026 3016
3027 level = btrfs_header_level(b); 3017 level = btrfs_header_level(b);
3028 err = btrfs_try_tree_read_lock(b); 3018 err = btrfs_tree_read_lock_atomic(b);
3029 if (!err) { 3019 if (!err) {
3030 btrfs_set_path_blocking(p); 3020 btrfs_set_path_blocking(p);
3031 btrfs_tree_read_lock(b); 3021 btrfs_tree_read_lock(b);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5665d2149249..f8229ef1b46d 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -128,6 +128,26 @@ again:
128} 128}
129 129
130/* 130/*
131 * take a spinning read lock.
132 * returns 1 if we get the read lock and 0 if we don't
133 * this won't wait for blocking writers
134 */
135int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
136{
137 if (atomic_read(&eb->blocking_writers))
138 return 0;
139
140 read_lock(&eb->lock);
141 if (atomic_read(&eb->blocking_writers)) {
142 read_unlock(&eb->lock);
143 return 0;
144 }
145 atomic_inc(&eb->read_locks);
146 atomic_inc(&eb->spinning_readers);
147 return 1;
148}
149
150/*
131 * returns 1 if we get the read lock and 0 if we don't 151 * returns 1 if we get the read lock and 0 if we don't
132 * this won't wait for blocking writers 152 * this won't wait for blocking writers
133 */ 153 */
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
158 atomic_read(&eb->blocking_readers)) 178 atomic_read(&eb->blocking_readers))
159 return 0; 179 return 0;
160 180
161 if (!write_trylock(&eb->lock)) 181 write_lock(&eb->lock);
162 return 0;
163
164 if (atomic_read(&eb->blocking_writers) || 182 if (atomic_read(&eb->blocking_writers) ||
165 atomic_read(&eb->blocking_readers)) { 183 atomic_read(&eb->blocking_readers)) {
166 write_unlock(&eb->lock); 184 write_unlock(&eb->lock);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index b81e0e9a4894..c44a9d5f5362 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
35void btrfs_assert_tree_locked(struct extent_buffer *eb); 35void btrfs_assert_tree_locked(struct extent_buffer *eb);
36int btrfs_try_tree_read_lock(struct extent_buffer *eb); 36int btrfs_try_tree_read_lock(struct extent_buffer *eb);
37int btrfs_try_tree_write_lock(struct extent_buffer *eb); 37int btrfs_try_tree_write_lock(struct extent_buffer *eb);
38int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
39
38 40
39static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) 41static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
40{ 42{
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 78285f30909e..617553cdb7d3 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -373,6 +373,8 @@ cont:
373 } 373 }
374done: 374done:
375 kunmap(pages_in[page_in_index]); 375 kunmap(pages_in[page_in_index]);
376 if (!ret)
377 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
376 return ret; 378 return ret;
377} 379}
378 380
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
410 goto out; 412 goto out;
411 } 413 }
412 414
415 /*
416 * the caller is already checking against PAGE_SIZE, but lets
417 * move this check closer to the memcpy/memset
418 */
419 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
413 bytes = min_t(unsigned long, destlen, out_len - start_byte); 420 bytes = min_t(unsigned long, destlen, out_len - start_byte);
414 421
415 kaddr = kmap_atomic(dest_page); 422 kaddr = kmap_atomic(dest_page);
416 memcpy(kaddr, workspace->buf + start_byte, bytes); 423 memcpy(kaddr, workspace->buf + start_byte, bytes);
424
425 /*
426 * btrfs_getblock is doing a zero on the tail of the page too,
427 * but this will cover anything missing from the decompressed
428 * data.
429 */
430 if (bytes < destlen)
431 memset(kaddr+bytes, 0, destlen-bytes);
417 kunmap_atomic(kaddr); 432 kunmap_atomic(kaddr);
418out: 433out:
419 return ret; 434 return ret;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 759fa4e2de8f..fb22fd8d8fb8 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -299,6 +299,8 @@ done:
299 zlib_inflateEnd(&workspace->strm); 299 zlib_inflateEnd(&workspace->strm);
300 if (data_in) 300 if (data_in)
301 kunmap(pages_in[page_in_index]); 301 kunmap(pages_in[page_in_index]);
302 if (!ret)
303 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
302 return ret; 304 return ret;
303} 305}
304 306
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
310 struct workspace *workspace = list_entry(ws, struct workspace, list); 312 struct workspace *workspace = list_entry(ws, struct workspace, list);
311 int ret = 0; 313 int ret = 0;
312 int wbits = MAX_WBITS; 314 int wbits = MAX_WBITS;
313 unsigned long bytes_left = destlen; 315 unsigned long bytes_left;
314 unsigned long total_out = 0; 316 unsigned long total_out = 0;
317 unsigned long pg_offset = 0;
315 char *kaddr; 318 char *kaddr;
316 319
320 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
321 bytes_left = destlen;
322
317 workspace->strm.next_in = data_in; 323 workspace->strm.next_in = data_in;
318 workspace->strm.avail_in = srclen; 324 workspace->strm.avail_in = srclen;
319 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
341 unsigned long buf_start; 347 unsigned long buf_start;
342 unsigned long buf_offset; 348 unsigned long buf_offset;
343 unsigned long bytes; 349 unsigned long bytes;
344 unsigned long pg_offset = 0;
345 350
346 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); 351 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
347 if (ret != Z_OK && ret != Z_STREAM_END) 352 if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
384 ret = 0; 389 ret = 0;
385 390
386 zlib_inflateEnd(&workspace->strm); 391 zlib_inflateEnd(&workspace->strm);
392
393 /*
394 * this should only happen if zlib returned fewer bytes than we
395 * expected. btrfs_get_block is responsible for zeroing from the
396 * end of the inline extent (destlen) to the end of the page
397 */
398 if (pg_offset < destlen) {
399 kaddr = kmap_atomic(dest_page);
400 memset(kaddr + pg_offset, 0, destlen - pg_offset);
401 kunmap_atomic(kaddr);
402 }
387 return ret; 403 return ret;
388} 404}
389 405