aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/Makefile4
-rw-r--r--fs/btrfs/acl.c27
-rw-r--r--fs/btrfs/compression.c14
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/dir-item.c30
-rw-r--r--fs/btrfs/extent-tree.c45
-rw-r--r--fs/btrfs/extent_io.c139
-rw-r--r--fs/btrfs/extent_io.h20
-rw-r--r--fs/btrfs/extent_map.c155
-rw-r--r--fs/btrfs/file-item.c7
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/inode.c145
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/ref-cache.c68
-rw-r--r--fs/btrfs/ref-cache.h52
-rw-r--r--fs/btrfs/root-tree.c5
-rw-r--r--fs/btrfs/transaction.c65
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/volumes.c12
19 files changed, 316 insertions, 538 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 9b72dcf1cd25..40e6ac08c21f 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -6,5 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
6 transaction.o inode.o file.o tree-defrag.o \ 6 transaction.o inode.o file.o tree-defrag.o \
7 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ 7 extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
8 extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ 8 extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
9 export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ 9 export.o tree-log.o free-space-cache.o zlib.o lzo.o \
10 compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o 10 compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
11
12btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 65a735d8f6e4..eb159aaa5a11 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -28,8 +28,6 @@
28#include "btrfs_inode.h" 28#include "btrfs_inode.h"
29#include "xattr.h" 29#include "xattr.h"
30 30
31#ifdef CONFIG_BTRFS_FS_POSIX_ACL
32
33struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 31struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
34{ 32{
35 int size; 33 int size;
@@ -111,7 +109,6 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
111 int ret, size = 0; 109 int ret, size = 0;
112 const char *name; 110 const char *name;
113 char *value = NULL; 111 char *value = NULL;
114 mode_t mode;
115 112
116 if (acl) { 113 if (acl) {
117 ret = posix_acl_valid(acl); 114 ret = posix_acl_valid(acl);
@@ -122,13 +119,11 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
122 119
123 switch (type) { 120 switch (type) {
124 case ACL_TYPE_ACCESS: 121 case ACL_TYPE_ACCESS:
125 mode = inode->i_mode;
126 name = POSIX_ACL_XATTR_ACCESS; 122 name = POSIX_ACL_XATTR_ACCESS;
127 if (acl) { 123 if (acl) {
128 ret = posix_acl_equiv_mode(acl, &mode); 124 ret = posix_acl_equiv_mode(acl, &inode->i_mode);
129 if (ret < 0) 125 if (ret < 0)
130 return ret; 126 return ret;
131 inode->i_mode = mode;
132 } 127 }
133 ret = 0; 128 ret = 0;
134 break; 129 break;
@@ -222,19 +217,16 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
222 } 217 }
223 218
224 if (IS_POSIXACL(dir) && acl) { 219 if (IS_POSIXACL(dir) && acl) {
225 mode_t mode = inode->i_mode;
226
227 if (S_ISDIR(inode->i_mode)) { 220 if (S_ISDIR(inode->i_mode)) {
228 ret = btrfs_set_acl(trans, inode, acl, 221 ret = btrfs_set_acl(trans, inode, acl,
229 ACL_TYPE_DEFAULT); 222 ACL_TYPE_DEFAULT);
230 if (ret) 223 if (ret)
231 goto failed; 224 goto failed;
232 } 225 }
233 ret = posix_acl_create(&acl, GFP_NOFS, &mode); 226 ret = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
234 if (ret < 0) 227 if (ret < 0)
235 return ret; 228 return ret;
236 229
237 inode->i_mode = mode;
238 if (ret > 0) { 230 if (ret > 0) {
239 /* we need an acl */ 231 /* we need an acl */
240 ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS); 232 ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
@@ -282,18 +274,3 @@ const struct xattr_handler btrfs_xattr_acl_access_handler = {
282 .get = btrfs_xattr_acl_get, 274 .get = btrfs_xattr_acl_get,
283 .set = btrfs_xattr_acl_set, 275 .set = btrfs_xattr_acl_set,
284}; 276};
285
286#else /* CONFIG_BTRFS_FS_POSIX_ACL */
287
288int btrfs_acl_chmod(struct inode *inode)
289{
290 return 0;
291}
292
293int btrfs_init_acl(struct btrfs_trans_handle *trans,
294 struct inode *inode, struct inode *dir)
295{
296 return 0;
297}
298
299#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index bfe42b03eaf9..8ec5d86f1734 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -338,6 +338,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
338 u64 first_byte = disk_start; 338 u64 first_byte = disk_start;
339 struct block_device *bdev; 339 struct block_device *bdev;
340 int ret; 340 int ret;
341 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
341 342
342 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 343 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
343 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 344 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
@@ -392,8 +393,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
392 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 393 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
393 BUG_ON(ret); 394 BUG_ON(ret);
394 395
395 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 396 if (!skip_sum) {
396 BUG_ON(ret); 397 ret = btrfs_csum_one_bio(root, inode, bio,
398 start, 1);
399 BUG_ON(ret);
400 }
397 401
398 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 402 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
399 BUG_ON(ret); 403 BUG_ON(ret);
@@ -418,8 +422,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
418 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 422 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
419 BUG_ON(ret); 423 BUG_ON(ret);
420 424
421 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 425 if (!skip_sum) {
422 BUG_ON(ret); 426 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
427 BUG_ON(ret);
428 }
423 429
424 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 430 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
425 BUG_ON(ret); 431 BUG_ON(ret);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 365c4e1dde04..0469263e327e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2406,8 +2406,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
2406 btrfs_root_item *item, struct btrfs_key *key); 2406 btrfs_root_item *item, struct btrfs_key *key);
2407int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); 2407int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
2408int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 2408int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
2409int btrfs_set_root_node(struct btrfs_root_item *item, 2409void btrfs_set_root_node(struct btrfs_root_item *item,
2410 struct extent_buffer *node); 2410 struct extent_buffer *node);
2411void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 2411void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
2412 2412
2413/* dir-item.c */ 2413/* dir-item.c */
@@ -2523,6 +2523,14 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
2523#define PageChecked PageFsMisc 2523#define PageChecked PageFsMisc
2524#endif 2524#endif
2525 2525
2526/* This forces readahead on a given range of bytes in an inode */
2527static inline void btrfs_force_ra(struct address_space *mapping,
2528 struct file_ra_state *ra, struct file *file,
2529 pgoff_t offset, unsigned long req_size)
2530{
2531 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2532}
2533
2526struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 2534struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
2527int btrfs_set_inode_index(struct inode *dir, u64 *index); 2535int btrfs_set_inode_index(struct inode *dir, u64 *index);
2528int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2536int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
@@ -2551,9 +2559,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
2551int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 2559int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
2552 size_t size, struct bio *bio, unsigned long bio_flags); 2560 size_t size, struct bio *bio, unsigned long bio_flags);
2553 2561
2554unsigned long btrfs_force_ra(struct address_space *mapping,
2555 struct file_ra_state *ra, struct file *file,
2556 pgoff_t offset, pgoff_t last_index);
2557int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 2562int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2558int btrfs_readpage(struct file *file, struct page *page); 2563int btrfs_readpage(struct file *file, struct page *page);
2559void btrfs_evict_inode(struct inode *inode); 2564void btrfs_evict_inode(struct inode *inode);
@@ -2648,12 +2653,21 @@ do { \
2648/* acl.c */ 2653/* acl.c */
2649#ifdef CONFIG_BTRFS_FS_POSIX_ACL 2654#ifdef CONFIG_BTRFS_FS_POSIX_ACL
2650struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 2655struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
2651#else
2652#define btrfs_get_acl NULL
2653#endif
2654int btrfs_init_acl(struct btrfs_trans_handle *trans, 2656int btrfs_init_acl(struct btrfs_trans_handle *trans,
2655 struct inode *inode, struct inode *dir); 2657 struct inode *inode, struct inode *dir);
2656int btrfs_acl_chmod(struct inode *inode); 2658int btrfs_acl_chmod(struct inode *inode);
2659#else
2660#define btrfs_get_acl NULL
2661static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
2662 struct inode *inode, struct inode *dir)
2663{
2664 return 0;
2665}
2666static inline int btrfs_acl_chmod(struct inode *inode)
2667{
2668 return 0;
2669}
2670#endif
2657 2671
2658/* relocation.c */ 2672/* relocation.c */
2659int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 2673int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c360a848d97f..31d84e78129b 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -198,8 +198,6 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
198 struct btrfs_key key; 198 struct btrfs_key key;
199 int ins_len = mod < 0 ? -1 : 0; 199 int ins_len = mod < 0 ? -1 : 0;
200 int cow = mod != 0; 200 int cow = mod != 0;
201 struct btrfs_key found_key;
202 struct extent_buffer *leaf;
203 201
204 key.objectid = dir; 202 key.objectid = dir;
205 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); 203 btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
@@ -209,18 +207,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
209 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); 207 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
210 if (ret < 0) 208 if (ret < 0)
211 return ERR_PTR(ret); 209 return ERR_PTR(ret);
212 if (ret > 0) { 210 if (ret > 0)
213 if (path->slots[0] == 0)
214 return NULL;
215 path->slots[0]--;
216 }
217
218 leaf = path->nodes[0];
219 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
220
221 if (found_key.objectid != dir ||
222 btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
223 found_key.offset != key.offset)
224 return NULL; 211 return NULL;
225 212
226 return btrfs_match_dir_item_name(root, path, name, name_len); 213 return btrfs_match_dir_item_name(root, path, name, name_len);
@@ -315,8 +302,6 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
315 struct btrfs_key key; 302 struct btrfs_key key;
316 int ins_len = mod < 0 ? -1 : 0; 303 int ins_len = mod < 0 ? -1 : 0;
317 int cow = mod != 0; 304 int cow = mod != 0;
318 struct btrfs_key found_key;
319 struct extent_buffer *leaf;
320 305
321 key.objectid = dir; 306 key.objectid = dir;
322 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); 307 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
@@ -324,18 +309,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
324 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); 309 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
325 if (ret < 0) 310 if (ret < 0)
326 return ERR_PTR(ret); 311 return ERR_PTR(ret);
327 if (ret > 0) { 312 if (ret > 0)
328 if (path->slots[0] == 0)
329 return NULL;
330 path->slots[0]--;
331 }
332
333 leaf = path->nodes[0];
334 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
335
336 if (found_key.objectid != dir ||
337 btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
338 found_key.offset != key.offset)
339 return NULL; 313 return NULL;
340 314
341 return btrfs_match_dir_item_name(root, path, name, name_len); 315 return btrfs_match_dir_item_name(root, path, name, name_len);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4d08ed79405d..66bac226944e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -663,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
663 struct btrfs_path *path; 663 struct btrfs_path *path;
664 664
665 path = btrfs_alloc_path(); 665 path = btrfs_alloc_path();
666 BUG_ON(!path); 666 if (!path)
667 return -ENOMEM;
668
667 key.objectid = start; 669 key.objectid = start;
668 key.offset = len; 670 key.offset = len;
669 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 671 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
@@ -3272,6 +3274,9 @@ again:
3272 } 3274 }
3273 3275
3274 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3276 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3277 if (ret < 0 && ret != -ENOSPC)
3278 goto out;
3279
3275 spin_lock(&space_info->lock); 3280 spin_lock(&space_info->lock);
3276 if (ret) 3281 if (ret)
3277 space_info->full = 1; 3282 space_info->full = 1;
@@ -3281,6 +3286,7 @@ again:
3281 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 3286 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3282 space_info->chunk_alloc = 0; 3287 space_info->chunk_alloc = 0;
3283 spin_unlock(&space_info->lock); 3288 spin_unlock(&space_info->lock);
3289out:
3284 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3290 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3285 return ret; 3291 return ret;
3286} 3292}
@@ -4456,7 +4462,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4456 printk(KERN_ERR "umm, got %d back from search" 4462 printk(KERN_ERR "umm, got %d back from search"
4457 ", was looking for %llu\n", ret, 4463 ", was looking for %llu\n", ret,
4458 (unsigned long long)bytenr); 4464 (unsigned long long)bytenr);
4459 btrfs_print_leaf(extent_root, path->nodes[0]); 4465 if (ret > 0)
4466 btrfs_print_leaf(extent_root,
4467 path->nodes[0]);
4460 } 4468 }
4461 BUG_ON(ret); 4469 BUG_ON(ret);
4462 extent_slot = path->slots[0]; 4470 extent_slot = path->slots[0];
@@ -5073,7 +5081,9 @@ have_block_group:
5073 * group is does point to and try again 5081 * group is does point to and try again
5074 */ 5082 */
5075 if (!last_ptr_loop && last_ptr->block_group && 5083 if (!last_ptr_loop && last_ptr->block_group &&
5076 last_ptr->block_group != block_group) { 5084 last_ptr->block_group != block_group &&
5085 index <=
5086 get_block_group_index(last_ptr->block_group)) {
5077 5087
5078 btrfs_put_block_group(block_group); 5088 btrfs_put_block_group(block_group);
5079 block_group = last_ptr->block_group; 5089 block_group = last_ptr->block_group;
@@ -5501,7 +5511,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5501 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref); 5511 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5502 5512
5503 path = btrfs_alloc_path(); 5513 path = btrfs_alloc_path();
5504 BUG_ON(!path); 5514 if (!path)
5515 return -ENOMEM;
5505 5516
5506 path->leave_spinning = 1; 5517 path->leave_spinning = 1;
5507 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5518 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -6272,10 +6283,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6272 int level; 6283 int level;
6273 6284
6274 path = btrfs_alloc_path(); 6285 path = btrfs_alloc_path();
6275 BUG_ON(!path); 6286 if (!path)
6287 return -ENOMEM;
6276 6288
6277 wc = kzalloc(sizeof(*wc), GFP_NOFS); 6289 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6278 BUG_ON(!wc); 6290 if (!wc) {
6291 btrfs_free_path(path);
6292 return -ENOMEM;
6293 }
6279 6294
6280 trans = btrfs_start_transaction(tree_root, 0); 6295 trans = btrfs_start_transaction(tree_root, 0);
6281 BUG_ON(IS_ERR(trans)); 6296 BUG_ON(IS_ERR(trans));
@@ -6538,8 +6553,6 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6538 u64 min_allocable_bytes; 6553 u64 min_allocable_bytes;
6539 int ret = -ENOSPC; 6554 int ret = -ENOSPC;
6540 6555
6541 if (cache->ro)
6542 return 0;
6543 6556
6544 /* 6557 /*
6545 * We need some metadata space and system metadata space for 6558 * We need some metadata space and system metadata space for
@@ -6555,6 +6568,12 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6555 6568
6556 spin_lock(&sinfo->lock); 6569 spin_lock(&sinfo->lock);
6557 spin_lock(&cache->lock); 6570 spin_lock(&cache->lock);
6571
6572 if (cache->ro) {
6573 ret = 0;
6574 goto out;
6575 }
6576
6558 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 6577 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6559 cache->bytes_super - btrfs_block_group_used(&cache->item); 6578 cache->bytes_super - btrfs_block_group_used(&cache->item);
6560 6579
@@ -6568,7 +6587,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6568 cache->ro = 1; 6587 cache->ro = 1;
6569 ret = 0; 6588 ret = 0;
6570 } 6589 }
6571 6590out:
6572 spin_unlock(&cache->lock); 6591 spin_unlock(&cache->lock);
6573 spin_unlock(&sinfo->lock); 6592 spin_unlock(&sinfo->lock);
6574 return ret; 6593 return ret;
@@ -7183,11 +7202,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7183 spin_unlock(&cluster->refill_lock); 7202 spin_unlock(&cluster->refill_lock);
7184 7203
7185 path = btrfs_alloc_path(); 7204 path = btrfs_alloc_path();
7186 BUG_ON(!path); 7205 if (!path) {
7206 ret = -ENOMEM;
7207 goto out;
7208 }
7187 7209
7188 inode = lookup_free_space_inode(root, block_group, path); 7210 inode = lookup_free_space_inode(root, block_group, path);
7189 if (!IS_ERR(inode)) { 7211 if (!IS_ERR(inode)) {
7190 btrfs_orphan_add(trans, inode); 7212 ret = btrfs_orphan_add(trans, inode);
7213 BUG_ON(ret);
7191 clear_nlink(inode); 7214 clear_nlink(inode);
7192 /* One for the block groups ref */ 7215 /* One for the block groups ref */
7193 spin_lock(&block_group->lock); 7216 spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 067b1747421b..d418164a35f1 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
254 * 254 *
255 * This should be called with the tree lock held. 255 * This should be called with the tree lock held.
256 */ 256 */
257static int merge_state(struct extent_io_tree *tree, 257static void merge_state(struct extent_io_tree *tree,
258 struct extent_state *state) 258 struct extent_state *state)
259{ 259{
260 struct extent_state *other; 260 struct extent_state *other;
261 struct rb_node *other_node; 261 struct rb_node *other_node;
262 262
263 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 263 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
264 return 0; 264 return;
265 265
266 other_node = rb_prev(&state->rb_node); 266 other_node = rb_prev(&state->rb_node);
267 if (other_node) { 267 if (other_node) {
@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree,
287 free_extent_state(other); 287 free_extent_state(other);
288 } 288 }
289 } 289 }
290
291 return 0;
292} 290}
293 291
294static int set_state_cb(struct extent_io_tree *tree, 292static void set_state_cb(struct extent_io_tree *tree,
295 struct extent_state *state, int *bits) 293 struct extent_state *state, int *bits)
296{ 294{
297 if (tree->ops && tree->ops->set_bit_hook) { 295 if (tree->ops && tree->ops->set_bit_hook)
298 return tree->ops->set_bit_hook(tree->mapping->host, 296 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
299 state, bits);
300 }
301
302 return 0;
303} 297}
304 298
305static void clear_state_cb(struct extent_io_tree *tree, 299static void clear_state_cb(struct extent_io_tree *tree,
@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
309 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); 303 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
310} 304}
311 305
306static void set_state_bits(struct extent_io_tree *tree,
307 struct extent_state *state, int *bits);
308
312/* 309/*
313 * insert an extent_state struct into the tree. 'bits' are set on the 310 * insert an extent_state struct into the tree. 'bits' are set on the
314 * struct before it is inserted. 311 * struct before it is inserted.
@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
324 int *bits) 321 int *bits)
325{ 322{
326 struct rb_node *node; 323 struct rb_node *node;
327 int bits_to_set = *bits & ~EXTENT_CTLBITS;
328 int ret;
329 324
330 if (end < start) { 325 if (end < start) {
331 printk(KERN_ERR "btrfs end < start %llu %llu\n", 326 printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
335 } 330 }
336 state->start = start; 331 state->start = start;
337 state->end = end; 332 state->end = end;
338 ret = set_state_cb(tree, state, bits);
339 if (ret)
340 return ret;
341 333
342 if (bits_to_set & EXTENT_DIRTY) 334 set_state_bits(tree, state, bits);
343 tree->dirty_bytes += end - start + 1; 335
344 state->state |= bits_to_set;
345 node = tree_insert(&tree->state, end, &state->rb_node); 336 node = tree_insert(&tree->state, end, &state->rb_node);
346 if (node) { 337 if (node) {
347 struct extent_state *found; 338 struct extent_state *found;
@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
357 return 0; 348 return 0;
358} 349}
359 350
360static int split_cb(struct extent_io_tree *tree, struct extent_state *orig, 351static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
361 u64 split) 352 u64 split)
362{ 353{
363 if (tree->ops && tree->ops->split_extent_hook) 354 if (tree->ops && tree->ops->split_extent_hook)
364 return tree->ops->split_extent_hook(tree->mapping->host, 355 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
365 orig, split);
366 return 0;
367} 356}
368 357
369/* 358/*
@@ -659,34 +648,25 @@ again:
659 if (start > end) 648 if (start > end)
660 break; 649 break;
661 650
662 if (need_resched()) { 651 cond_resched_lock(&tree->lock);
663 spin_unlock(&tree->lock);
664 cond_resched();
665 spin_lock(&tree->lock);
666 }
667 } 652 }
668out: 653out:
669 spin_unlock(&tree->lock); 654 spin_unlock(&tree->lock);
670 return 0; 655 return 0;
671} 656}
672 657
673static int set_state_bits(struct extent_io_tree *tree, 658static void set_state_bits(struct extent_io_tree *tree,
674 struct extent_state *state, 659 struct extent_state *state,
675 int *bits) 660 int *bits)
676{ 661{
677 int ret;
678 int bits_to_set = *bits & ~EXTENT_CTLBITS; 662 int bits_to_set = *bits & ~EXTENT_CTLBITS;
679 663
680 ret = set_state_cb(tree, state, bits); 664 set_state_cb(tree, state, bits);
681 if (ret)
682 return ret;
683 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 665 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
684 u64 range = state->end - state->start + 1; 666 u64 range = state->end - state->start + 1;
685 tree->dirty_bytes += range; 667 tree->dirty_bytes += range;
686 } 668 }
687 state->state |= bits_to_set; 669 state->state |= bits_to_set;
688
689 return 0;
690} 670}
691 671
692static void cache_state(struct extent_state *state, 672static void cache_state(struct extent_state *state,
@@ -779,9 +759,7 @@ hit_next:
779 goto out; 759 goto out;
780 } 760 }
781 761
782 err = set_state_bits(tree, state, &bits); 762 set_state_bits(tree, state, &bits);
783 if (err)
784 goto out;
785 763
786 cache_state(state, cached_state); 764 cache_state(state, cached_state);
787 merge_state(tree, state); 765 merge_state(tree, state);
@@ -830,9 +808,7 @@ hit_next:
830 if (err) 808 if (err)
831 goto out; 809 goto out;
832 if (state->end <= end) { 810 if (state->end <= end) {
833 err = set_state_bits(tree, state, &bits); 811 set_state_bits(tree, state, &bits);
834 if (err)
835 goto out;
836 cache_state(state, cached_state); 812 cache_state(state, cached_state);
837 merge_state(tree, state); 813 merge_state(tree, state);
838 if (last_end == (u64)-1) 814 if (last_end == (u64)-1)
@@ -893,11 +869,7 @@ hit_next:
893 err = split_state(tree, state, prealloc, end + 1); 869 err = split_state(tree, state, prealloc, end + 1);
894 BUG_ON(err == -EEXIST); 870 BUG_ON(err == -EEXIST);
895 871
896 err = set_state_bits(tree, prealloc, &bits); 872 set_state_bits(tree, prealloc, &bits);
897 if (err) {
898 prealloc = NULL;
899 goto out;
900 }
901 cache_state(prealloc, cached_state); 873 cache_state(prealloc, cached_state);
902 merge_state(tree, prealloc); 874 merge_state(tree, prealloc);
903 prealloc = NULL; 875 prealloc = NULL;
@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1059 return 0; 1031 return 0;
1060} 1032}
1061 1033
1062/*
1063 * find the first offset in the io tree with 'bits' set. zero is
1064 * returned if we find something, and *start_ret and *end_ret are
1065 * set to reflect the state struct that was found.
1066 *
1067 * If nothing was found, 1 is returned, < 0 on error
1068 */
1069int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1070 u64 *start_ret, u64 *end_ret, int bits)
1071{
1072 struct rb_node *node;
1073 struct extent_state *state;
1074 int ret = 1;
1075
1076 spin_lock(&tree->lock);
1077 /*
1078 * this search will find all the extents that end after
1079 * our range starts.
1080 */
1081 node = tree_search(tree, start);
1082 if (!node)
1083 goto out;
1084
1085 while (1) {
1086 state = rb_entry(node, struct extent_state, rb_node);
1087 if (state->end >= start && (state->state & bits)) {
1088 *start_ret = state->start;
1089 *end_ret = state->end;
1090 ret = 0;
1091 break;
1092 }
1093 node = rb_next(node);
1094 if (!node)
1095 break;
1096 }
1097out:
1098 spin_unlock(&tree->lock);
1099 return ret;
1100}
1101
1102/* find the first state struct with 'bits' set after 'start', and 1034/* find the first state struct with 'bits' set after 'start', and
1103 * return it. tree->lock must be held. NULL will returned if 1035 * return it. tree->lock must be held. NULL will returned if
1104 * nothing was found after 'start' 1036 * nothing was found after 'start'
@@ -1131,6 +1063,30 @@ out:
1131} 1063}
1132 1064
1133/* 1065/*
1066 * find the first offset in the io tree with 'bits' set. zero is
1067 * returned if we find something, and *start_ret and *end_ret are
1068 * set to reflect the state struct that was found.
1069 *
1070 * If nothing was found, 1 is returned, < 0 on error
1071 */
1072int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1073 u64 *start_ret, u64 *end_ret, int bits)
1074{
1075 struct extent_state *state;
1076 int ret = 1;
1077
1078 spin_lock(&tree->lock);
1079 state = find_first_extent_bit_state(tree, start, bits);
1080 if (state) {
1081 *start_ret = state->start;
1082 *end_ret = state->end;
1083 ret = 0;
1084 }
1085 spin_unlock(&tree->lock);
1086 return ret;
1087}
1088
1089/*
1134 * find a contiguous range of bytes in the file marked as delalloc, not 1090 * find a contiguous range of bytes in the file marked as delalloc, not
1135 * more than 'max_bytes'. start and end are used to return the range, 1091 * more than 'max_bytes'. start and end are used to return the range,
1136 * 1092 *
@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2546 struct writeback_control *wbc) 2502 struct writeback_control *wbc)
2547{ 2503{
2548 int ret; 2504 int ret;
2549 struct address_space *mapping = page->mapping;
2550 struct extent_page_data epd = { 2505 struct extent_page_data epd = {
2551 .bio = NULL, 2506 .bio = NULL,
2552 .tree = tree, 2507 .tree = tree,
@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2554 .extent_locked = 0, 2509 .extent_locked = 0,
2555 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 2510 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2556 }; 2511 };
2557 struct writeback_control wbc_writepages = {
2558 .sync_mode = wbc->sync_mode,
2559 .nr_to_write = 64,
2560 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2561 .range_end = (loff_t)-1,
2562 };
2563 2512
2564 ret = __extent_writepage(page, wbc, &epd); 2513 ret = __extent_writepage(page, wbc, &epd);
2565 2514
2566 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2567 __extent_writepage, &epd, flush_write_bio);
2568 flush_epd_write_bio(&epd); 2515 flush_epd_write_bio(&epd);
2569 return ret; 2516 return ret;
2570} 2517}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 21a7ca9e7282..7b2f0c3e7929 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -76,15 +76,15 @@ struct extent_io_ops {
76 struct extent_state *state); 76 struct extent_state *state);
77 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, 77 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
78 struct extent_state *state, int uptodate); 78 struct extent_state *state, int uptodate);
79 int (*set_bit_hook)(struct inode *inode, struct extent_state *state, 79 void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
80 int *bits); 80 int *bits);
81 int (*clear_bit_hook)(struct inode *inode, struct extent_state *state, 81 void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
82 int *bits); 82 int *bits);
83 int (*merge_extent_hook)(struct inode *inode, 83 void (*merge_extent_hook)(struct inode *inode,
84 struct extent_state *new, 84 struct extent_state *new,
85 struct extent_state *other); 85 struct extent_state *other);
86 int (*split_extent_hook)(struct inode *inode, 86 void (*split_extent_hook)(struct inode *inode,
87 struct extent_state *orig, u64 split); 87 struct extent_state *orig, u64 split);
88 int (*write_cache_pages_lock_hook)(struct page *page); 88 int (*write_cache_pages_lock_hook)(struct page *page);
89}; 89};
90 90
@@ -108,8 +108,6 @@ struct extent_state {
108 wait_queue_head_t wq; 108 wait_queue_head_t wq;
109 atomic_t refs; 109 atomic_t refs;
110 unsigned long state; 110 unsigned long state;
111 u64 split_start;
112 u64 split_end;
113 111
114 /* for use by the FS */ 112 /* for use by the FS */
115 u64 private; 113 u64 private;
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2d0410344ea3..7c97b3301459 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -183,22 +183,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
183 return 0; 183 return 0;
184} 184}
185 185
186int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) 186static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
187{ 187{
188 int ret = 0;
189 struct extent_map *merge = NULL; 188 struct extent_map *merge = NULL;
190 struct rb_node *rb; 189 struct rb_node *rb;
191 struct extent_map *em;
192
193 write_lock(&tree->lock);
194 em = lookup_extent_mapping(tree, start, len);
195
196 WARN_ON(!em || em->start != start);
197
198 if (!em)
199 goto out;
200
201 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
202 190
203 if (em->start != 0) { 191 if (em->start != 0) {
204 rb = rb_prev(&em->rb_node); 192 rb = rb_prev(&em->rb_node);
@@ -225,6 +213,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
225 merge->in_tree = 0; 213 merge->in_tree = 0;
226 free_extent_map(merge); 214 free_extent_map(merge);
227 } 215 }
216}
217
218int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
219{
220 int ret = 0;
221 struct extent_map *em;
222
223 write_lock(&tree->lock);
224 em = lookup_extent_mapping(tree, start, len);
225
226 WARN_ON(!em || em->start != start);
227
228 if (!em)
229 goto out;
230
231 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
232
233 try_merge_map(tree, em);
228 234
229 free_extent_map(em); 235 free_extent_map(em);
230out: 236out:
@@ -247,7 +253,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
247 struct extent_map *em) 253 struct extent_map *em)
248{ 254{
249 int ret = 0; 255 int ret = 0;
250 struct extent_map *merge = NULL;
251 struct rb_node *rb; 256 struct rb_node *rb;
252 struct extent_map *exist; 257 struct extent_map *exist;
253 258
@@ -263,30 +268,8 @@ int add_extent_mapping(struct extent_map_tree *tree,
263 goto out; 268 goto out;
264 } 269 }
265 atomic_inc(&em->refs); 270 atomic_inc(&em->refs);
266 if (em->start != 0) { 271
267 rb = rb_prev(&em->rb_node); 272 try_merge_map(tree, em);
268 if (rb)
269 merge = rb_entry(rb, struct extent_map, rb_node);
270 if (rb && mergable_maps(merge, em)) {
271 em->start = merge->start;
272 em->len += merge->len;
273 em->block_len += merge->block_len;
274 em->block_start = merge->block_start;
275 merge->in_tree = 0;
276 rb_erase(&merge->rb_node, &tree->map);
277 free_extent_map(merge);
278 }
279 }
280 rb = rb_next(&em->rb_node);
281 if (rb)
282 merge = rb_entry(rb, struct extent_map, rb_node);
283 if (rb && mergable_maps(em, merge)) {
284 em->len += merge->len;
285 em->block_len += merge->len;
286 rb_erase(&merge->rb_node, &tree->map);
287 merge->in_tree = 0;
288 free_extent_map(merge);
289 }
290out: 273out:
291 return ret; 274 return ret;
292} 275}
@@ -299,19 +282,8 @@ static u64 range_end(u64 start, u64 len)
299 return start + len; 282 return start + len;
300} 283}
301 284
302/** 285struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
303 * lookup_extent_mapping - lookup extent_map 286 u64 start, u64 len, int strict)
304 * @tree: tree to lookup in
305 * @start: byte offset to start the search
306 * @len: length of the lookup range
307 *
308 * Find and return the first extent_map struct in @tree that intersects the
309 * [start, len] range. There may be additional objects in the tree that
310 * intersect, so check the object returned carefully to make sure that no
311 * additional lookups are needed.
312 */
313struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
314 u64 start, u64 len)
315{ 287{
316 struct extent_map *em; 288 struct extent_map *em;
317 struct rb_node *rb_node; 289 struct rb_node *rb_node;
@@ -320,38 +292,42 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
320 u64 end = range_end(start, len); 292 u64 end = range_end(start, len);
321 293
322 rb_node = __tree_search(&tree->map, start, &prev, &next); 294 rb_node = __tree_search(&tree->map, start, &prev, &next);
323 if (!rb_node && prev) {
324 em = rb_entry(prev, struct extent_map, rb_node);
325 if (end > em->start && start < extent_map_end(em))
326 goto found;
327 }
328 if (!rb_node && next) {
329 em = rb_entry(next, struct extent_map, rb_node);
330 if (end > em->start && start < extent_map_end(em))
331 goto found;
332 }
333 if (!rb_node) { 295 if (!rb_node) {
334 em = NULL; 296 if (prev)
335 goto out; 297 rb_node = prev;
336 } 298 else if (next)
337 if (IS_ERR(rb_node)) { 299 rb_node = next;
338 em = ERR_CAST(rb_node); 300 else
339 goto out; 301 return NULL;
340 } 302 }
303
341 em = rb_entry(rb_node, struct extent_map, rb_node); 304 em = rb_entry(rb_node, struct extent_map, rb_node);
342 if (end > em->start && start < extent_map_end(em))
343 goto found;
344 305
345 em = NULL; 306 if (strict && !(end > em->start && start < extent_map_end(em)))
346 goto out; 307 return NULL;
347 308
348found:
349 atomic_inc(&em->refs); 309 atomic_inc(&em->refs);
350out:
351 return em; 310 return em;
352} 311}
353 312
354/** 313/**
314 * lookup_extent_mapping - lookup extent_map
315 * @tree: tree to lookup in
316 * @start: byte offset to start the search
317 * @len: length of the lookup range
318 *
319 * Find and return the first extent_map struct in @tree that intersects the
320 * [start, len] range. There may be additional objects in the tree that
321 * intersect, so check the object returned carefully to make sure that no
322 * additional lookups are needed.
323 */
324struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
325 u64 start, u64 len)
326{
327 return __lookup_extent_mapping(tree, start, len, 1);
328}
329
330/**
355 * search_extent_mapping - find a nearby extent map 331 * search_extent_mapping - find a nearby extent map
356 * @tree: tree to lookup in 332 * @tree: tree to lookup in
357 * @start: byte offset to start the search 333 * @start: byte offset to start the search
@@ -365,38 +341,7 @@ out:
365struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 341struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
366 u64 start, u64 len) 342 u64 start, u64 len)
367{ 343{
368 struct extent_map *em; 344 return __lookup_extent_mapping(tree, start, len, 0);
369 struct rb_node *rb_node;
370 struct rb_node *prev = NULL;
371 struct rb_node *next = NULL;
372
373 rb_node = __tree_search(&tree->map, start, &prev, &next);
374 if (!rb_node && prev) {
375 em = rb_entry(prev, struct extent_map, rb_node);
376 goto found;
377 }
378 if (!rb_node && next) {
379 em = rb_entry(next, struct extent_map, rb_node);
380 goto found;
381 }
382 if (!rb_node) {
383 em = NULL;
384 goto out;
385 }
386 if (IS_ERR(rb_node)) {
387 em = ERR_CAST(rb_node);
388 goto out;
389 }
390 em = rb_entry(rb_node, struct extent_map, rb_node);
391 goto found;
392
393 em = NULL;
394 goto out;
395
396found:
397 atomic_inc(&em->refs);
398out:
399 return em;
400} 345}
401 346
402/** 347/**
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 08bcfa92a222..b910694f61ed 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -291,7 +291,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
291 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); 291 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
292 292
293 path = btrfs_alloc_path(); 293 path = btrfs_alloc_path();
294 BUG_ON(!path); 294 if (!path)
295 return -ENOMEM;
295 296
296 if (search_commit) { 297 if (search_commit) {
297 path->skip_locking = 1; 298 path->skip_locking = 1;
@@ -677,7 +678,9 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
677 btrfs_super_csum_size(&root->fs_info->super_copy); 678 btrfs_super_csum_size(&root->fs_info->super_copy);
678 679
679 path = btrfs_alloc_path(); 680 path = btrfs_alloc_path();
680 BUG_ON(!path); 681 if (!path)
682 return -ENOMEM;
683
681 sector_sum = sums->sums; 684 sector_sum = sums->sums;
682again: 685again:
683 next_offset = (u64)-1; 686 next_offset = (u64)-1;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a35e51c9f235..658d66959abe 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -74,7 +74,7 @@ struct inode_defrag {
74 * If an existing record is found the defrag item you 74 * If an existing record is found the defrag item you
75 * pass in is freed 75 * pass in is freed
76 */ 76 */
77static int __btrfs_add_inode_defrag(struct inode *inode, 77static void __btrfs_add_inode_defrag(struct inode *inode,
78 struct inode_defrag *defrag) 78 struct inode_defrag *defrag)
79{ 79{
80 struct btrfs_root *root = BTRFS_I(inode)->root; 80 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -106,11 +106,11 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
106 BTRFS_I(inode)->in_defrag = 1; 106 BTRFS_I(inode)->in_defrag = 1;
107 rb_link_node(&defrag->rb_node, parent, p); 107 rb_link_node(&defrag->rb_node, parent, p);
108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); 108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
109 return 0; 109 return;
110 110
111exists: 111exists:
112 kfree(defrag); 112 kfree(defrag);
113 return 0; 113 return;
114 114
115} 115}
116 116
@@ -123,7 +123,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
123{ 123{
124 struct btrfs_root *root = BTRFS_I(inode)->root; 124 struct btrfs_root *root = BTRFS_I(inode)->root;
125 struct inode_defrag *defrag; 125 struct inode_defrag *defrag;
126 int ret = 0;
127 u64 transid; 126 u64 transid;
128 127
129 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 128 if (!btrfs_test_opt(root, AUTO_DEFRAG))
@@ -150,9 +149,9 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
150 149
151 spin_lock(&root->fs_info->defrag_inodes_lock); 150 spin_lock(&root->fs_info->defrag_inodes_lock);
152 if (!BTRFS_I(inode)->in_defrag) 151 if (!BTRFS_I(inode)->in_defrag)
153 ret = __btrfs_add_inode_defrag(inode, defrag); 152 __btrfs_add_inode_defrag(inode, defrag);
154 spin_unlock(&root->fs_info->defrag_inodes_lock); 153 spin_unlock(&root->fs_info->defrag_inodes_lock);
155 return ret; 154 return 0;
156} 155}
157 156
158/* 157/*
@@ -855,7 +854,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
855 btrfs_drop_extent_cache(inode, start, end - 1, 0); 854 btrfs_drop_extent_cache(inode, start, end - 1, 0);
856 855
857 path = btrfs_alloc_path(); 856 path = btrfs_alloc_path();
858 BUG_ON(!path); 857 if (!path)
858 return -ENOMEM;
859again: 859again:
860 recow = 0; 860 recow = 0;
861 split = start; 861 split = start;
@@ -1059,7 +1059,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
1059static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 1059static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1060 struct page **pages, size_t num_pages, 1060 struct page **pages, size_t num_pages,
1061 loff_t pos, unsigned long first_index, 1061 loff_t pos, unsigned long first_index,
1062 unsigned long last_index, size_t write_bytes) 1062 size_t write_bytes)
1063{ 1063{
1064 struct extent_state *cached_state = NULL; 1064 struct extent_state *cached_state = NULL;
1065 int i; 1065 int i;
@@ -1159,7 +1159,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1159 struct btrfs_root *root = BTRFS_I(inode)->root; 1159 struct btrfs_root *root = BTRFS_I(inode)->root;
1160 struct page **pages = NULL; 1160 struct page **pages = NULL;
1161 unsigned long first_index; 1161 unsigned long first_index;
1162 unsigned long last_index;
1163 size_t num_written = 0; 1162 size_t num_written = 0;
1164 int nrptrs; 1163 int nrptrs;
1165 int ret = 0; 1164 int ret = 0;
@@ -1172,7 +1171,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1172 return -ENOMEM; 1171 return -ENOMEM;
1173 1172
1174 first_index = pos >> PAGE_CACHE_SHIFT; 1173 first_index = pos >> PAGE_CACHE_SHIFT;
1175 last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
1176 1174
1177 while (iov_iter_count(i) > 0) { 1175 while (iov_iter_count(i) > 0) {
1178 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1176 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
@@ -1206,8 +1204,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1206 * contents of pages from loop to loop 1204 * contents of pages from loop to loop
1207 */ 1205 */
1208 ret = prepare_pages(root, file, pages, num_pages, 1206 ret = prepare_pages(root, file, pages, num_pages,
1209 pos, first_index, last_index, 1207 pos, first_index, write_bytes);
1210 write_bytes);
1211 if (ret) { 1208 if (ret) {
1212 btrfs_delalloc_release_space(inode, 1209 btrfs_delalloc_release_space(inode,
1213 num_pages << PAGE_CACHE_SHIFT); 1210 num_pages << PAGE_CACHE_SHIFT);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 13e6255182e3..15fceefbca0a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1061,7 +1061,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1061 u64 ino = btrfs_ino(inode); 1061 u64 ino = btrfs_ino(inode);
1062 1062
1063 path = btrfs_alloc_path(); 1063 path = btrfs_alloc_path();
1064 BUG_ON(!path); 1064 if (!path)
1065 return -ENOMEM;
1065 1066
1066 nolock = btrfs_is_free_space_inode(root, inode); 1067 nolock = btrfs_is_free_space_inode(root, inode);
1067 1068
@@ -1282,17 +1283,16 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1282 return ret; 1283 return ret;
1283} 1284}
1284 1285
1285static int btrfs_split_extent_hook(struct inode *inode, 1286static void btrfs_split_extent_hook(struct inode *inode,
1286 struct extent_state *orig, u64 split) 1287 struct extent_state *orig, u64 split)
1287{ 1288{
1288 /* not delalloc, ignore it */ 1289 /* not delalloc, ignore it */
1289 if (!(orig->state & EXTENT_DELALLOC)) 1290 if (!(orig->state & EXTENT_DELALLOC))
1290 return 0; 1291 return;
1291 1292
1292 spin_lock(&BTRFS_I(inode)->lock); 1293 spin_lock(&BTRFS_I(inode)->lock);
1293 BTRFS_I(inode)->outstanding_extents++; 1294 BTRFS_I(inode)->outstanding_extents++;
1294 spin_unlock(&BTRFS_I(inode)->lock); 1295 spin_unlock(&BTRFS_I(inode)->lock);
1295 return 0;
1296} 1296}
1297 1297
1298/* 1298/*
@@ -1301,18 +1301,17 @@ static int btrfs_split_extent_hook(struct inode *inode,
1301 * extents, such as when we are doing sequential writes, so we can properly 1301 * extents, such as when we are doing sequential writes, so we can properly
1302 * account for the metadata space we'll need. 1302 * account for the metadata space we'll need.
1303 */ 1303 */
1304static int btrfs_merge_extent_hook(struct inode *inode, 1304static void btrfs_merge_extent_hook(struct inode *inode,
1305 struct extent_state *new, 1305 struct extent_state *new,
1306 struct extent_state *other) 1306 struct extent_state *other)
1307{ 1307{
1308 /* not delalloc, ignore it */ 1308 /* not delalloc, ignore it */
1309 if (!(other->state & EXTENT_DELALLOC)) 1309 if (!(other->state & EXTENT_DELALLOC))
1310 return 0; 1310 return;
1311 1311
1312 spin_lock(&BTRFS_I(inode)->lock); 1312 spin_lock(&BTRFS_I(inode)->lock);
1313 BTRFS_I(inode)->outstanding_extents--; 1313 BTRFS_I(inode)->outstanding_extents--;
1314 spin_unlock(&BTRFS_I(inode)->lock); 1314 spin_unlock(&BTRFS_I(inode)->lock);
1315 return 0;
1316} 1315}
1317 1316
1318/* 1317/*
@@ -1320,8 +1319,8 @@ static int btrfs_merge_extent_hook(struct inode *inode,
1320 * bytes in this file, and to maintain the list of inodes that 1319 * bytes in this file, and to maintain the list of inodes that
1321 * have pending delalloc work to be done. 1320 * have pending delalloc work to be done.
1322 */ 1321 */
1323static int btrfs_set_bit_hook(struct inode *inode, 1322static void btrfs_set_bit_hook(struct inode *inode,
1324 struct extent_state *state, int *bits) 1323 struct extent_state *state, int *bits)
1325{ 1324{
1326 1325
1327 /* 1326 /*
@@ -1351,14 +1350,13 @@ static int btrfs_set_bit_hook(struct inode *inode,
1351 } 1350 }
1352 spin_unlock(&root->fs_info->delalloc_lock); 1351 spin_unlock(&root->fs_info->delalloc_lock);
1353 } 1352 }
1354 return 0;
1355} 1353}
1356 1354
1357/* 1355/*
1358 * extent_io.c clear_bit_hook, see set_bit_hook for why 1356 * extent_io.c clear_bit_hook, see set_bit_hook for why
1359 */ 1357 */
1360static int btrfs_clear_bit_hook(struct inode *inode, 1358static void btrfs_clear_bit_hook(struct inode *inode,
1361 struct extent_state *state, int *bits) 1359 struct extent_state *state, int *bits)
1362{ 1360{
1363 /* 1361 /*
1364 * set_bit and clear bit hooks normally require _irqsave/restore 1362 * set_bit and clear bit hooks normally require _irqsave/restore
@@ -1395,7 +1393,6 @@ static int btrfs_clear_bit_hook(struct inode *inode,
1395 } 1393 }
1396 spin_unlock(&root->fs_info->delalloc_lock); 1394 spin_unlock(&root->fs_info->delalloc_lock);
1397 } 1395 }
1398 return 0;
1399} 1396}
1400 1397
1401/* 1398/*
@@ -1645,7 +1642,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1645 int ret; 1642 int ret;
1646 1643
1647 path = btrfs_alloc_path(); 1644 path = btrfs_alloc_path();
1648 BUG_ON(!path); 1645 if (!path)
1646 return -ENOMEM;
1649 1647
1650 path->leave_spinning = 1; 1648 path->leave_spinning = 1;
1651 1649
@@ -2215,7 +2213,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2215 2213
2216 if (!root->orphan_block_rsv) { 2214 if (!root->orphan_block_rsv) {
2217 block_rsv = btrfs_alloc_block_rsv(root); 2215 block_rsv = btrfs_alloc_block_rsv(root);
2218 BUG_ON(!block_rsv); 2216 if (!block_rsv)
2217 return -ENOMEM;
2219 } 2218 }
2220 2219
2221 spin_lock(&root->orphan_lock); 2220 spin_lock(&root->orphan_lock);
@@ -2517,7 +2516,9 @@ static void btrfs_read_locked_inode(struct inode *inode)
2517 filled = true; 2516 filled = true;
2518 2517
2519 path = btrfs_alloc_path(); 2518 path = btrfs_alloc_path();
2520 BUG_ON(!path); 2519 if (!path)
2520 goto make_bad;
2521
2521 path->leave_spinning = 1; 2522 path->leave_spinning = 1;
2522 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2523 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2523 2524
@@ -2998,13 +2999,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2998 2999
2999 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3000 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3000 dentry->d_name.name, dentry->d_name.len); 3001 dentry->d_name.name, dentry->d_name.len);
3001 BUG_ON(ret); 3002 if (ret)
3003 goto out;
3002 3004
3003 if (inode->i_nlink == 0) { 3005 if (inode->i_nlink == 0) {
3004 ret = btrfs_orphan_add(trans, inode); 3006 ret = btrfs_orphan_add(trans, inode);
3005 BUG_ON(ret); 3007 if (ret)
3008 goto out;
3006 } 3009 }
3007 3010
3011out:
3008 nr = trans->blocks_used; 3012 nr = trans->blocks_used;
3009 __unlink_end_trans(trans, root); 3013 __unlink_end_trans(trans, root);
3010 btrfs_btree_balance_dirty(root, nr); 3014 btrfs_btree_balance_dirty(root, nr);
@@ -3147,6 +3151,11 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3147 3151
3148 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 3152 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3149 3153
3154 path = btrfs_alloc_path();
3155 if (!path)
3156 return -ENOMEM;
3157 path->reada = -1;
3158
3150 if (root->ref_cows || root == root->fs_info->tree_root) 3159 if (root->ref_cows || root == root->fs_info->tree_root)
3151 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 3160 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3152 3161
@@ -3159,10 +3168,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3159 if (min_type == 0 && root == BTRFS_I(inode)->root) 3168 if (min_type == 0 && root == BTRFS_I(inode)->root)
3160 btrfs_kill_delayed_inode_items(inode); 3169 btrfs_kill_delayed_inode_items(inode);
3161 3170
3162 path = btrfs_alloc_path();
3163 BUG_ON(!path);
3164 path->reada = -1;
3165
3166 key.objectid = ino; 3171 key.objectid = ino;
3167 key.offset = (u64)-1; 3172 key.offset = (u64)-1;
3168 key.type = (u8)-1; 3173 key.type = (u8)-1;
@@ -3690,7 +3695,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3690 int ret = 0; 3695 int ret = 0;
3691 3696
3692 path = btrfs_alloc_path(); 3697 path = btrfs_alloc_path();
3693 BUG_ON(!path); 3698 if (!path)
3699 return -ENOMEM;
3694 3700
3695 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 3701 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3696 namelen, 0); 3702 namelen, 0);
@@ -3946,6 +3952,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3946 struct btrfs_root *root, int *new) 3952 struct btrfs_root *root, int *new)
3947{ 3953{
3948 struct inode *inode; 3954 struct inode *inode;
3955 int bad_inode = 0;
3949 3956
3950 inode = btrfs_iget_locked(s, location->objectid, root); 3957 inode = btrfs_iget_locked(s, location->objectid, root);
3951 if (!inode) 3958 if (!inode)
@@ -3955,10 +3962,19 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3955 BTRFS_I(inode)->root = root; 3962 BTRFS_I(inode)->root = root;
3956 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); 3963 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3957 btrfs_read_locked_inode(inode); 3964 btrfs_read_locked_inode(inode);
3958 inode_tree_add(inode); 3965 if (!is_bad_inode(inode)) {
3959 unlock_new_inode(inode); 3966 inode_tree_add(inode);
3960 if (new) 3967 unlock_new_inode(inode);
3961 *new = 1; 3968 if (new)
3969 *new = 1;
3970 } else {
3971 bad_inode = 1;
3972 }
3973 }
3974
3975 if (bad_inode) {
3976 iput(inode);
3977 inode = ERR_PTR(-ESTALE);
3962 } 3978 }
3963 3979
3964 return inode; 3980 return inode;
@@ -3993,12 +4009,19 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3993 struct btrfs_root *sub_root = root; 4009 struct btrfs_root *sub_root = root;
3994 struct btrfs_key location; 4010 struct btrfs_key location;
3995 int index; 4011 int index;
3996 int ret; 4012 int ret = 0;
3997 4013
3998 if (dentry->d_name.len > BTRFS_NAME_LEN) 4014 if (dentry->d_name.len > BTRFS_NAME_LEN)
3999 return ERR_PTR(-ENAMETOOLONG); 4015 return ERR_PTR(-ENAMETOOLONG);
4000 4016
4001 ret = btrfs_inode_by_name(dir, dentry, &location); 4017 if (unlikely(d_need_lookup(dentry))) {
4018 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
4019 kfree(dentry->d_fsdata);
4020 dentry->d_fsdata = NULL;
4021 d_clear_need_lookup(dentry);
4022 } else {
4023 ret = btrfs_inode_by_name(dir, dentry, &location);
4024 }
4002 4025
4003 if (ret < 0) 4026 if (ret < 0)
4004 return ERR_PTR(ret); 4027 return ERR_PTR(ret);
@@ -4053,6 +4076,12 @@ static int btrfs_dentry_delete(const struct dentry *dentry)
4053 return 0; 4076 return 0;
4054} 4077}
4055 4078
4079static void btrfs_dentry_release(struct dentry *dentry)
4080{
4081 if (dentry->d_fsdata)
4082 kfree(dentry->d_fsdata);
4083}
4084
4056static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4085static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4057 struct nameidata *nd) 4086 struct nameidata *nd)
4058{ 4087{
@@ -4075,6 +4104,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4075 struct btrfs_path *path; 4104 struct btrfs_path *path;
4076 struct list_head ins_list; 4105 struct list_head ins_list;
4077 struct list_head del_list; 4106 struct list_head del_list;
4107 struct qstr q;
4078 int ret; 4108 int ret;
4079 struct extent_buffer *leaf; 4109 struct extent_buffer *leaf;
4080 int slot; 4110 int slot;
@@ -4164,6 +4194,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4164 4194
4165 while (di_cur < di_total) { 4195 while (di_cur < di_total) {
4166 struct btrfs_key location; 4196 struct btrfs_key location;
4197 struct dentry *tmp;
4167 4198
4168 if (verify_dir_item(root, leaf, di)) 4199 if (verify_dir_item(root, leaf, di))
4169 break; 4200 break;
@@ -4184,6 +4215,33 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4184 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 4215 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4185 btrfs_dir_item_key_to_cpu(leaf, di, &location); 4216 btrfs_dir_item_key_to_cpu(leaf, di, &location);
4186 4217
4218 q.name = name_ptr;
4219 q.len = name_len;
4220 q.hash = full_name_hash(q.name, q.len);
4221 tmp = d_lookup(filp->f_dentry, &q);
4222 if (!tmp) {
4223 struct btrfs_key *newkey;
4224
4225 newkey = kzalloc(sizeof(struct btrfs_key),
4226 GFP_NOFS);
4227 if (!newkey)
4228 goto no_dentry;
4229 tmp = d_alloc(filp->f_dentry, &q);
4230 if (!tmp) {
4231 kfree(newkey);
4232 dput(tmp);
4233 goto no_dentry;
4234 }
4235 memcpy(newkey, &location,
4236 sizeof(struct btrfs_key));
4237 tmp->d_fsdata = newkey;
4238 tmp->d_flags |= DCACHE_NEED_LOOKUP;
4239 d_rehash(tmp);
4240 dput(tmp);
4241 } else {
4242 dput(tmp);
4243 }
4244no_dentry:
4187 /* is this a reference to our own snapshot? If so 4245 /* is this a reference to our own snapshot? If so
4188 * skip it 4246 * skip it
4189 */ 4247 */
@@ -4409,7 +4467,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4409 int owner; 4467 int owner;
4410 4468
4411 path = btrfs_alloc_path(); 4469 path = btrfs_alloc_path();
4412 BUG_ON(!path); 4470 if (!path)
4471 return ERR_PTR(-ENOMEM);
4413 4472
4414 inode = new_inode(root->fs_info->sb); 4473 inode = new_inode(root->fs_info->sb);
4415 if (!inode) { 4474 if (!inode) {
@@ -6669,19 +6728,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6669 return 0; 6728 return 0;
6670} 6729}
6671 6730
6672/* helper function for file defrag and space balancing. This
6673 * forces readahead on a given range of bytes in an inode
6674 */
6675unsigned long btrfs_force_ra(struct address_space *mapping,
6676 struct file_ra_state *ra, struct file *file,
6677 pgoff_t offset, pgoff_t last_index)
6678{
6679 pgoff_t req_size = last_index - offset + 1;
6680
6681 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
6682 return offset + req_size;
6683}
6684
6685struct inode *btrfs_alloc_inode(struct super_block *sb) 6731struct inode *btrfs_alloc_inode(struct super_block *sb)
6686{ 6732{
6687 struct btrfs_inode *ei; 6733 struct btrfs_inode *ei;
@@ -7164,7 +7210,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7164 goto out_unlock; 7210 goto out_unlock;
7165 7211
7166 path = btrfs_alloc_path(); 7212 path = btrfs_alloc_path();
7167 BUG_ON(!path); 7213 if (!path) {
7214 err = -ENOMEM;
7215 drop_inode = 1;
7216 goto out_unlock;
7217 }
7168 key.objectid = btrfs_ino(inode); 7218 key.objectid = btrfs_ino(inode);
7169 key.offset = 0; 7219 key.offset = 0;
7170 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 7220 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
@@ -7430,4 +7480,5 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
7430 7480
7431const struct dentry_operations btrfs_dentry_operations = { 7481const struct dentry_operations btrfs_dentry_operations = {
7432 .d_delete = btrfs_dentry_delete, 7482 .d_delete = btrfs_dentry_delete,
7483 .d_release = btrfs_dentry_release,
7433}; 7484};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0b980afc5edd..7cf013349941 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1749,11 +1749,10 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
1749 key.objectid = key.offset; 1749 key.objectid = key.offset;
1750 key.offset = (u64)-1; 1750 key.offset = (u64)-1;
1751 dirid = key.objectid; 1751 dirid = key.objectid;
1752
1753 } 1752 }
1754 if (ptr < name) 1753 if (ptr < name)
1755 goto out; 1754 goto out;
1756 memcpy(name, ptr, total_len); 1755 memmove(name, ptr, total_len);
1757 name[total_len]='\0'; 1756 name[total_len]='\0';
1758 ret = 0; 1757 ret = 0;
1759out: 1758out:
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
deleted file mode 100644
index 82d569cb6267..000000000000
--- a/fs/btrfs/ref-cache.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/sort.h>
22#include "ctree.h"
23#include "ref-cache.h"
24#include "transaction.h"
25
26static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
27 struct rb_node *node)
28{
29 struct rb_node **p = &root->rb_node;
30 struct rb_node *parent = NULL;
31 struct btrfs_leaf_ref *entry;
32
33 while (*p) {
34 parent = *p;
35 entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
36
37 if (bytenr < entry->bytenr)
38 p = &(*p)->rb_left;
39 else if (bytenr > entry->bytenr)
40 p = &(*p)->rb_right;
41 else
42 return parent;
43 }
44
45 entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
46 rb_link_node(node, parent, p);
47 rb_insert_color(node, root);
48 return NULL;
49}
50
51static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
52{
53 struct rb_node *n = root->rb_node;
54 struct btrfs_leaf_ref *entry;
55
56 while (n) {
57 entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
58 WARN_ON(!entry->in_tree);
59
60 if (bytenr < entry->bytenr)
61 n = n->rb_left;
62 else if (bytenr > entry->bytenr)
63 n = n->rb_right;
64 else
65 return n;
66 }
67 return NULL;
68}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
deleted file mode 100644
index 24f7001f6387..000000000000
--- a/fs/btrfs/ref-cache.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#ifndef __REFCACHE__
19#define __REFCACHE__
20
21struct btrfs_extent_info {
22 /* bytenr and num_bytes find the extent in the extent allocation tree */
23 u64 bytenr;
24 u64 num_bytes;
25
26 /* objectid and offset find the back reference for the file */
27 u64 objectid;
28 u64 offset;
29};
30
31struct btrfs_leaf_ref {
32 struct rb_node rb_node;
33 struct btrfs_leaf_ref_tree *tree;
34 int in_tree;
35 atomic_t usage;
36
37 u64 root_gen;
38 u64 bytenr;
39 u64 owner;
40 u64 generation;
41 int nritems;
42
43 struct list_head list;
44 struct btrfs_extent_info extents[];
45};
46
47static inline size_t btrfs_leaf_ref_size(int nr_extents)
48{
49 return sizeof(struct btrfs_leaf_ref) +
50 sizeof(struct btrfs_extent_info) * nr_extents;
51}
52#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index ebe45443de06..f4099904565a 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,13 +71,12 @@ out:
71 return ret; 71 return ret;
72} 72}
73 73
74int btrfs_set_root_node(struct btrfs_root_item *item, 74void btrfs_set_root_node(struct btrfs_root_item *item,
75 struct extent_buffer *node) 75 struct extent_buffer *node)
76{ 76{
77 btrfs_set_root_bytenr(item, node->start); 77 btrfs_set_root_bytenr(item, node->start);
78 btrfs_set_root_level(item, btrfs_header_level(node)); 78 btrfs_set_root_level(item, btrfs_header_level(node));
79 btrfs_set_root_generation(item, btrfs_header_generation(node)); 79 btrfs_set_root_generation(item, btrfs_header_generation(node));
80 return 0;
81} 80}
82 81
83/* 82/*
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index eb55863bb4ae..7dc36fab4afc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -216,17 +216,11 @@ static void wait_current_trans(struct btrfs_root *root)
216 spin_lock(&root->fs_info->trans_lock); 216 spin_lock(&root->fs_info->trans_lock);
217 cur_trans = root->fs_info->running_transaction; 217 cur_trans = root->fs_info->running_transaction;
218 if (cur_trans && cur_trans->blocked) { 218 if (cur_trans && cur_trans->blocked) {
219 DEFINE_WAIT(wait);
220 atomic_inc(&cur_trans->use_count); 219 atomic_inc(&cur_trans->use_count);
221 spin_unlock(&root->fs_info->trans_lock); 220 spin_unlock(&root->fs_info->trans_lock);
222 while (1) { 221
223 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 222 wait_event(root->fs_info->transaction_wait,
224 TASK_UNINTERRUPTIBLE); 223 !cur_trans->blocked);
225 if (!cur_trans->blocked)
226 break;
227 schedule();
228 }
229 finish_wait(&root->fs_info->transaction_wait, &wait);
230 put_transaction(cur_trans); 224 put_transaction(cur_trans);
231 } else { 225 } else {
232 spin_unlock(&root->fs_info->trans_lock); 226 spin_unlock(&root->fs_info->trans_lock);
@@ -357,19 +351,10 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
357} 351}
358 352
359/* wait for a transaction commit to be fully complete */ 353/* wait for a transaction commit to be fully complete */
360static noinline int wait_for_commit(struct btrfs_root *root, 354static noinline void wait_for_commit(struct btrfs_root *root,
361 struct btrfs_transaction *commit) 355 struct btrfs_transaction *commit)
362{ 356{
363 DEFINE_WAIT(wait); 357 wait_event(commit->commit_wait, commit->commit_done);
364 while (!commit->commit_done) {
365 prepare_to_wait(&commit->commit_wait, &wait,
366 TASK_UNINTERRUPTIBLE);
367 if (commit->commit_done)
368 break;
369 schedule();
370 }
371 finish_wait(&commit->commit_wait, &wait);
372 return 0;
373} 358}
374 359
375int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 360int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
@@ -1085,22 +1070,7 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1085static void wait_current_trans_commit_start(struct btrfs_root *root, 1070static void wait_current_trans_commit_start(struct btrfs_root *root,
1086 struct btrfs_transaction *trans) 1071 struct btrfs_transaction *trans)
1087{ 1072{
1088 DEFINE_WAIT(wait); 1073 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1089
1090 if (trans->in_commit)
1091 return;
1092
1093 while (1) {
1094 prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
1095 TASK_UNINTERRUPTIBLE);
1096 if (trans->in_commit) {
1097 finish_wait(&root->fs_info->transaction_blocked_wait,
1098 &wait);
1099 break;
1100 }
1101 schedule();
1102 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1103 }
1104} 1074}
1105 1075
1106/* 1076/*
@@ -1110,24 +1080,8 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
1110static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1080static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1111 struct btrfs_transaction *trans) 1081 struct btrfs_transaction *trans)
1112{ 1082{
1113 DEFINE_WAIT(wait); 1083 wait_event(root->fs_info->transaction_wait,
1114 1084 trans->commit_done || (trans->in_commit && !trans->blocked));
1115 if (trans->commit_done || (trans->in_commit && !trans->blocked))
1116 return;
1117
1118 while (1) {
1119 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
1120 TASK_UNINTERRUPTIBLE);
1121 if (trans->commit_done ||
1122 (trans->in_commit && !trans->blocked)) {
1123 finish_wait(&root->fs_info->transaction_wait,
1124 &wait);
1125 break;
1126 }
1127 schedule();
1128 finish_wait(&root->fs_info->transaction_wait,
1129 &wait);
1130 }
1131} 1085}
1132 1086
1133/* 1087/*
@@ -1234,8 +1188,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1234 atomic_inc(&cur_trans->use_count); 1188 atomic_inc(&cur_trans->use_count);
1235 btrfs_end_transaction(trans, root); 1189 btrfs_end_transaction(trans, root);
1236 1190
1237 ret = wait_for_commit(root, cur_trans); 1191 wait_for_commit(root, cur_trans);
1238 BUG_ON(ret);
1239 1192
1240 put_transaction(cur_trans); 1193 put_transaction(cur_trans);
1241 1194
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ac278dd83175..babee65f8eda 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1617,7 +1617,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1617 return 0; 1617 return 0;
1618 1618
1619 path = btrfs_alloc_path(); 1619 path = btrfs_alloc_path();
1620 BUG_ON(!path); 1620 if (!path)
1621 return -ENOMEM;
1621 1622
1622 nritems = btrfs_header_nritems(eb); 1623 nritems = btrfs_header_nritems(eb);
1623 for (i = 0; i < nritems; i++) { 1624 for (i = 0; i < nritems; i++) {
@@ -1723,7 +1724,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1723 return -ENOMEM; 1724 return -ENOMEM;
1724 1725
1725 if (*level == 1) { 1726 if (*level == 1) {
1726 wc->process_func(root, next, wc, ptr_gen); 1727 ret = wc->process_func(root, next, wc, ptr_gen);
1728 if (ret)
1729 return ret;
1727 1730
1728 path->slots[*level]++; 1731 path->slots[*level]++;
1729 if (wc->free) { 1732 if (wc->free) {
@@ -1788,8 +1791,11 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1788 parent = path->nodes[*level + 1]; 1791 parent = path->nodes[*level + 1];
1789 1792
1790 root_owner = btrfs_header_owner(parent); 1793 root_owner = btrfs_header_owner(parent);
1791 wc->process_func(root, path->nodes[*level], wc, 1794 ret = wc->process_func(root, path->nodes[*level], wc,
1792 btrfs_header_generation(path->nodes[*level])); 1795 btrfs_header_generation(path->nodes[*level]));
1796 if (ret)
1797 return ret;
1798
1793 if (wc->free) { 1799 if (wc->free) {
1794 struct extent_buffer *next; 1800 struct extent_buffer *next;
1795 1801
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b89e372c7544..53875ae73ad4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1037,7 +1037,8 @@ static noinline int find_next_chunk(struct btrfs_root *root,
1037 struct btrfs_key found_key; 1037 struct btrfs_key found_key;
1038 1038
1039 path = btrfs_alloc_path(); 1039 path = btrfs_alloc_path();
1040 BUG_ON(!path); 1040 if (!path)
1041 return -ENOMEM;
1041 1042
1042 key.objectid = objectid; 1043 key.objectid = objectid;
1043 key.offset = (u64)-1; 1044 key.offset = (u64)-1;
@@ -2061,8 +2062,10 @@ int btrfs_balance(struct btrfs_root *dev_root)
2061 2062
2062 /* step two, relocate all the chunks */ 2063 /* step two, relocate all the chunks */
2063 path = btrfs_alloc_path(); 2064 path = btrfs_alloc_path();
2064 BUG_ON(!path); 2065 if (!path) {
2065 2066 ret = -ENOMEM;
2067 goto error;
2068 }
2066 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2069 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2067 key.offset = (u64)-1; 2070 key.offset = (u64)-1;
2068 key.type = BTRFS_CHUNK_ITEM_KEY; 2071 key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -2661,7 +2664,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2661 2664
2662 ret = find_next_chunk(fs_info->chunk_root, 2665 ret = find_next_chunk(fs_info->chunk_root,
2663 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); 2666 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2664 BUG_ON(ret); 2667 if (ret)
2668 return ret;
2665 2669
2666 alloc_profile = BTRFS_BLOCK_GROUP_METADATA | 2670 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2667 (fs_info->metadata_alloc_profile & 2671 (fs_info->metadata_alloc_profile &