aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2012-03-28 20:31:37 -0400
committerChris Mason <chris.mason@oracle.com>2012-03-28 20:31:37 -0400
commit1d4284bd6e8d7dd1d5521a6747bdb6dc1caf0225 (patch)
treea7dde6312ec24eb6368cad7a3efedbf368a5a70c /fs/btrfs
parentb5d67f64f9bc656970dacba245410f0faedad18e (diff)
parent65139ed99234d8505948cdb7a835452eb5c191f9 (diff)
Merge branch 'error-handling' into for-linus
Conflicts: fs/btrfs/ctree.c fs/btrfs/disk-io.c fs/btrfs/extent-tree.c fs/btrfs/extent_io.c fs/btrfs/extent_io.h fs/btrfs/inode.c fs/btrfs/scrub.c Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/compression.c38
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c250
-rw-r--r--fs/btrfs/ctree.h94
-rw-r--r--fs/btrfs/delayed-inode.c33
-rw-r--r--fs/btrfs/delayed-ref.c33
-rw-r--r--fs/btrfs/dir-item.c10
-rw-r--r--fs/btrfs/disk-io.c261
-rw-r--r--fs/btrfs/disk-io.h10
-rw-r--r--fs/btrfs/export.c2
-rw-r--r--fs/btrfs/extent-tree.c383
-rw-r--r--fs/btrfs/extent_io.c236
-rw-r--r--fs/btrfs/extent_io.h15
-rw-r--r--fs/btrfs/file-item.c53
-rw-r--r--fs/btrfs/file.c52
-rw-r--r--fs/btrfs/free-space-cache.c15
-rw-r--r--fs/btrfs/inode-item.c5
-rw-r--r--fs/btrfs/inode-map.c19
-rw-r--r--fs/btrfs/inode.c438
-rw-r--r--fs/btrfs/ioctl.c101
-rw-r--r--fs/btrfs/locking.c6
-rw-r--r--fs/btrfs/locking.h4
-rw-r--r--fs/btrfs/ordered-data.c60
-rw-r--r--fs/btrfs/ordered-data.h24
-rw-r--r--fs/btrfs/orphan.c2
-rw-r--r--fs/btrfs/relocation.c130
-rw-r--r--fs/btrfs/root-tree.c25
-rw-r--r--fs/btrfs/scrub.c24
-rw-r--r--fs/btrfs/super.c190
-rw-r--r--fs/btrfs/transaction.c210
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/tree-log.c96
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c142
-rw-r--r--fs/btrfs/volumes.h4
37 files changed, 1973 insertions, 1018 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0cc20b35c1c4..42704149b723 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -171,11 +171,11 @@ out:
171 spin_unlock_irqrestore(&workers->lock, flags); 171 spin_unlock_irqrestore(&workers->lock, flags);
172} 172}
173 173
174static noinline int run_ordered_completions(struct btrfs_workers *workers, 174static noinline void run_ordered_completions(struct btrfs_workers *workers,
175 struct btrfs_work *work) 175 struct btrfs_work *work)
176{ 176{
177 if (!workers->ordered) 177 if (!workers->ordered)
178 return 0; 178 return;
179 179
180 set_bit(WORK_DONE_BIT, &work->flags); 180 set_bit(WORK_DONE_BIT, &work->flags);
181 181
@@ -213,7 +213,6 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
213 } 213 }
214 214
215 spin_unlock(&workers->order_lock); 215 spin_unlock(&workers->order_lock);
216 return 0;
217} 216}
218 217
219static void put_worker(struct btrfs_worker_thread *worker) 218static void put_worker(struct btrfs_worker_thread *worker)
@@ -399,7 +398,7 @@ again:
399/* 398/*
400 * this will wait for all the worker threads to shutdown 399 * this will wait for all the worker threads to shutdown
401 */ 400 */
402int btrfs_stop_workers(struct btrfs_workers *workers) 401void btrfs_stop_workers(struct btrfs_workers *workers)
403{ 402{
404 struct list_head *cur; 403 struct list_head *cur;
405 struct btrfs_worker_thread *worker; 404 struct btrfs_worker_thread *worker;
@@ -427,7 +426,6 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
427 put_worker(worker); 426 put_worker(worker);
428 } 427 }
429 spin_unlock_irq(&workers->lock); 428 spin_unlock_irq(&workers->lock);
430 return 0;
431} 429}
432 430
433/* 431/*
@@ -615,14 +613,14 @@ found:
615 * it was taken from. It is intended for use with long running work functions 613 * it was taken from. It is intended for use with long running work functions
616 * that make some progress and want to give the cpu up for others. 614 * that make some progress and want to give the cpu up for others.
617 */ 615 */
618int btrfs_requeue_work(struct btrfs_work *work) 616void btrfs_requeue_work(struct btrfs_work *work)
619{ 617{
620 struct btrfs_worker_thread *worker = work->worker; 618 struct btrfs_worker_thread *worker = work->worker;
621 unsigned long flags; 619 unsigned long flags;
622 int wake = 0; 620 int wake = 0;
623 621
624 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) 622 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
625 goto out; 623 return;
626 624
627 spin_lock_irqsave(&worker->lock, flags); 625 spin_lock_irqsave(&worker->lock, flags);
628 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) 626 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
@@ -649,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work *work)
649 if (wake) 647 if (wake)
650 wake_up_process(worker->task); 648 wake_up_process(worker->task);
651 spin_unlock_irqrestore(&worker->lock, flags); 649 spin_unlock_irqrestore(&worker->lock, flags);
652out:
653
654 return 0;
655} 650}
656 651
657void btrfs_set_work_high_prio(struct btrfs_work *work) 652void btrfs_set_work_high_prio(struct btrfs_work *work)
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index f34cc31fa3c9..063698b90ce2 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -111,9 +111,9 @@ struct btrfs_workers {
111 111
112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
113int btrfs_start_workers(struct btrfs_workers *workers); 113int btrfs_start_workers(struct btrfs_workers *workers);
114int btrfs_stop_workers(struct btrfs_workers *workers); 114void btrfs_stop_workers(struct btrfs_workers *workers);
115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, 115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
116 struct btrfs_workers *async_starter); 116 struct btrfs_workers *async_starter);
117int btrfs_requeue_work(struct btrfs_work *work); 117void btrfs_requeue_work(struct btrfs_work *work);
118void btrfs_set_work_high_prio(struct btrfs_work *work); 118void btrfs_set_work_high_prio(struct btrfs_work *work);
119#endif 119#endif
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d02c27cd14c7..d11afa67c7d8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -226,8 +226,8 @@ out:
226 * Clear the writeback bits on all of the file 226 * Clear the writeback bits on all of the file
227 * pages for a compressed write 227 * pages for a compressed write
228 */ 228 */
229static noinline int end_compressed_writeback(struct inode *inode, u64 start, 229static noinline void end_compressed_writeback(struct inode *inode, u64 start,
230 unsigned long ram_size) 230 unsigned long ram_size)
231{ 231{
232 unsigned long index = start >> PAGE_CACHE_SHIFT; 232 unsigned long index = start >> PAGE_CACHE_SHIFT;
233 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 233 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
@@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,
253 index += ret; 253 index += ret;
254 } 254 }
255 /* the inode may be gone now */ 255 /* the inode may be gone now */
256 return 0;
257} 256}
258 257
259/* 258/*
@@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
392 */ 391 */
393 atomic_inc(&cb->pending_bios); 392 atomic_inc(&cb->pending_bios);
394 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 393 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
395 BUG_ON(ret); 394 BUG_ON(ret); /* -ENOMEM */
396 395
397 if (!skip_sum) { 396 if (!skip_sum) {
398 ret = btrfs_csum_one_bio(root, inode, bio, 397 ret = btrfs_csum_one_bio(root, inode, bio,
399 start, 1); 398 start, 1);
400 BUG_ON(ret); 399 BUG_ON(ret); /* -ENOMEM */
401 } 400 }
402 401
403 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 402 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
404 BUG_ON(ret); 403 BUG_ON(ret); /* -ENOMEM */
405 404
406 bio_put(bio); 405 bio_put(bio);
407 406
@@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
421 bio_get(bio); 420 bio_get(bio);
422 421
423 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 422 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
424 BUG_ON(ret); 423 BUG_ON(ret); /* -ENOMEM */
425 424
426 if (!skip_sum) { 425 if (!skip_sum) {
427 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 426 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
428 BUG_ON(ret); 427 BUG_ON(ret); /* -ENOMEM */
429 } 428 }
430 429
431 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 430 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
432 BUG_ON(ret); 431 BUG_ON(ret); /* -ENOMEM */
433 432
434 bio_put(bio); 433 bio_put(bio);
435 return 0; 434 return 0;
@@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
497 * sure they map to this compressed extent on disk. 496 * sure they map to this compressed extent on disk.
498 */ 497 */
499 set_page_extent_mapped(page); 498 set_page_extent_mapped(page);
500 lock_extent(tree, last_offset, end, GFP_NOFS); 499 lock_extent(tree, last_offset, end);
501 read_lock(&em_tree->lock); 500 read_lock(&em_tree->lock);
502 em = lookup_extent_mapping(em_tree, last_offset, 501 em = lookup_extent_mapping(em_tree, last_offset,
503 PAGE_CACHE_SIZE); 502 PAGE_CACHE_SIZE);
@@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
507 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 506 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
508 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 507 (em->block_start >> 9) != cb->orig_bio->bi_sector) {
509 free_extent_map(em); 508 free_extent_map(em);
510 unlock_extent(tree, last_offset, end, GFP_NOFS); 509 unlock_extent(tree, last_offset, end);
511 unlock_page(page); 510 unlock_page(page);
512 page_cache_release(page); 511 page_cache_release(page);
513 break; 512 break;
@@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
535 nr_pages++; 534 nr_pages++;
536 page_cache_release(page); 535 page_cache_release(page);
537 } else { 536 } else {
538 unlock_extent(tree, last_offset, end, GFP_NOFS); 537 unlock_extent(tree, last_offset, end);
539 unlock_page(page); 538 unlock_page(page);
540 page_cache_release(page); 539 page_cache_release(page);
541 break; 540 break;
@@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
662 bio_get(comp_bio); 661 bio_get(comp_bio);
663 662
664 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 663 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
665 BUG_ON(ret); 664 BUG_ON(ret); /* -ENOMEM */
666 665
667 /* 666 /*
668 * inc the count before we submit the bio so 667 * inc the count before we submit the bio so
@@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
675 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 674 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
676 ret = btrfs_lookup_bio_sums(root, inode, 675 ret = btrfs_lookup_bio_sums(root, inode,
677 comp_bio, sums); 676 comp_bio, sums);
678 BUG_ON(ret); 677 BUG_ON(ret); /* -ENOMEM */
679 } 678 }
680 sums += (comp_bio->bi_size + root->sectorsize - 1) / 679 sums += (comp_bio->bi_size + root->sectorsize - 1) /
681 root->sectorsize; 680 root->sectorsize;
682 681
683 ret = btrfs_map_bio(root, READ, comp_bio, 682 ret = btrfs_map_bio(root, READ, comp_bio,
684 mirror_num, 0); 683 mirror_num, 0);
685 BUG_ON(ret); 684 BUG_ON(ret); /* -ENOMEM */
686 685
687 bio_put(comp_bio); 686 bio_put(comp_bio);
688 687
@@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
698 bio_get(comp_bio); 697 bio_get(comp_bio);
699 698
700 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 699 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
701 BUG_ON(ret); 700 BUG_ON(ret); /* -ENOMEM */
702 701
703 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 702 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
704 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 703 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
705 BUG_ON(ret); 704 BUG_ON(ret); /* -ENOMEM */
706 } 705 }
707 706
708 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 707 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
709 BUG_ON(ret); 708 BUG_ON(ret); /* -ENOMEM */
710 709
711 bio_put(comp_bio); 710 bio_put(comp_bio);
712 return 0; 711 return 0;
@@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = {
734 &btrfs_lzo_compress, 733 &btrfs_lzo_compress,
735}; 734};
736 735
737int __init btrfs_init_compress(void) 736void __init btrfs_init_compress(void)
738{ 737{
739 int i; 738 int i;
740 739
@@ -744,7 +743,6 @@ int __init btrfs_init_compress(void)
744 atomic_set(&comp_alloc_workspace[i], 0); 743 atomic_set(&comp_alloc_workspace[i], 0);
745 init_waitqueue_head(&comp_workspace_wait[i]); 744 init_waitqueue_head(&comp_workspace_wait[i]);
746 } 745 }
747 return 0;
748} 746}
749 747
750/* 748/*
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index a12059f4f0fd..9afb0a62ae82 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -19,7 +19,7 @@
19#ifndef __BTRFS_COMPRESSION_ 19#ifndef __BTRFS_COMPRESSION_
20#define __BTRFS_COMPRESSION_ 20#define __BTRFS_COMPRESSION_
21 21
22int btrfs_init_compress(void); 22void btrfs_init_compress(void);
23void btrfs_exit_compress(void); 23void btrfs_exit_compress(void);
24 24
25int btrfs_compress_pages(int type, struct address_space *mapping, 25int btrfs_compress_pages(int type, struct address_space *mapping,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 270655da11d1..e801f226d7e0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -36,7 +36,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root, 36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf, 37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf); 38 struct extent_buffer *src_buf);
39static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 39static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot); 40 struct btrfs_path *path, int level, int slot);
41 41
42struct btrfs_path *btrfs_alloc_path(void) 42struct btrfs_path *btrfs_alloc_path(void)
@@ -344,8 +344,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
344 if (btrfs_block_can_be_shared(root, buf)) { 344 if (btrfs_block_can_be_shared(root, buf)) {
345 ret = btrfs_lookup_extent_info(trans, root, buf->start, 345 ret = btrfs_lookup_extent_info(trans, root, buf->start,
346 buf->len, &refs, &flags); 346 buf->len, &refs, &flags);
347 BUG_ON(ret); 347 if (ret)
348 BUG_ON(refs == 0); 348 return ret;
349 if (refs == 0) {
350 ret = -EROFS;
351 btrfs_std_error(root->fs_info, ret);
352 return ret;
353 }
349 } else { 354 } else {
350 refs = 1; 355 refs = 1;
351 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 356 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
@@ -364,14 +369,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
364 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 369 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
365 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 370 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
366 ret = btrfs_inc_ref(trans, root, buf, 1, 1); 371 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
367 BUG_ON(ret); 372 BUG_ON(ret); /* -ENOMEM */
368 373
369 if (root->root_key.objectid == 374 if (root->root_key.objectid ==
370 BTRFS_TREE_RELOC_OBJECTID) { 375 BTRFS_TREE_RELOC_OBJECTID) {
371 ret = btrfs_dec_ref(trans, root, buf, 0, 1); 376 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
372 BUG_ON(ret); 377 BUG_ON(ret); /* -ENOMEM */
373 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 378 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
374 BUG_ON(ret); 379 BUG_ON(ret); /* -ENOMEM */
375 } 380 }
376 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 381 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
377 } else { 382 } else {
@@ -381,14 +386,15 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
381 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 386 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
382 else 387 else
383 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 388 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
384 BUG_ON(ret); 389 BUG_ON(ret); /* -ENOMEM */
385 } 390 }
386 if (new_flags != 0) { 391 if (new_flags != 0) {
387 ret = btrfs_set_disk_extent_flags(trans, root, 392 ret = btrfs_set_disk_extent_flags(trans, root,
388 buf->start, 393 buf->start,
389 buf->len, 394 buf->len,
390 new_flags, 0); 395 new_flags, 0);
391 BUG_ON(ret); 396 if (ret)
397 return ret;
392 } 398 }
393 } else { 399 } else {
394 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 400 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
@@ -397,9 +403,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
397 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 403 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
398 else 404 else
399 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 405 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
400 BUG_ON(ret); 406 BUG_ON(ret); /* -ENOMEM */
401 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 407 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
402 BUG_ON(ret); 408 BUG_ON(ret); /* -ENOMEM */
403 } 409 }
404 clean_tree_block(trans, root, buf); 410 clean_tree_block(trans, root, buf);
405 *last_ref = 1; 411 *last_ref = 1;
@@ -428,7 +434,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
428{ 434{
429 struct btrfs_disk_key disk_key; 435 struct btrfs_disk_key disk_key;
430 struct extent_buffer *cow; 436 struct extent_buffer *cow;
431 int level; 437 int level, ret;
432 int last_ref = 0; 438 int last_ref = 0;
433 int unlock_orig = 0; 439 int unlock_orig = 0;
434 u64 parent_start; 440 u64 parent_start;
@@ -480,7 +486,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
480 (unsigned long)btrfs_header_fsid(cow), 486 (unsigned long)btrfs_header_fsid(cow),
481 BTRFS_FSID_SIZE); 487 BTRFS_FSID_SIZE);
482 488
483 update_ref_for_cow(trans, root, buf, cow, &last_ref); 489 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
490 if (ret) {
491 btrfs_abort_transaction(trans, root, ret);
492 return ret;
493 }
484 494
485 if (root->ref_cows) 495 if (root->ref_cows)
486 btrfs_reloc_cow_block(trans, root, buf, cow); 496 btrfs_reloc_cow_block(trans, root, buf, cow);
@@ -947,7 +957,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
947 957
948 /* promote the child to a root */ 958 /* promote the child to a root */
949 child = read_node_slot(root, mid, 0); 959 child = read_node_slot(root, mid, 0);
950 BUG_ON(!child); 960 if (!child) {
961 ret = -EROFS;
962 btrfs_std_error(root->fs_info, ret);
963 goto enospc;
964 }
965
951 btrfs_tree_lock(child); 966 btrfs_tree_lock(child);
952 btrfs_set_lock_blocking(child); 967 btrfs_set_lock_blocking(child);
953 ret = btrfs_cow_block(trans, root, child, mid, 0, &child); 968 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
@@ -1023,10 +1038,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1023 if (btrfs_header_nritems(right) == 0) { 1038 if (btrfs_header_nritems(right) == 0) {
1024 clean_tree_block(trans, root, right); 1039 clean_tree_block(trans, root, right);
1025 btrfs_tree_unlock(right); 1040 btrfs_tree_unlock(right);
1026 wret = del_ptr(trans, root, path, level + 1, pslot + 1041 del_ptr(trans, root, path, level + 1, pslot + 1);
1027 1);
1028 if (wret)
1029 ret = wret;
1030 root_sub_used(root, right->len); 1042 root_sub_used(root, right->len);
1031 btrfs_free_tree_block(trans, root, right, 0, 1, 0); 1043 btrfs_free_tree_block(trans, root, right, 0, 1, 0);
1032 free_extent_buffer_stale(right); 1044 free_extent_buffer_stale(right);
@@ -1048,7 +1060,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1048 * otherwise we would have pulled some pointers from the 1060 * otherwise we would have pulled some pointers from the
1049 * right 1061 * right
1050 */ 1062 */
1051 BUG_ON(!left); 1063 if (!left) {
1064 ret = -EROFS;
1065 btrfs_std_error(root->fs_info, ret);
1066 goto enospc;
1067 }
1052 wret = balance_node_right(trans, root, mid, left); 1068 wret = balance_node_right(trans, root, mid, left);
1053 if (wret < 0) { 1069 if (wret < 0) {
1054 ret = wret; 1070 ret = wret;
@@ -1064,9 +1080,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1064 if (btrfs_header_nritems(mid) == 0) { 1080 if (btrfs_header_nritems(mid) == 0) {
1065 clean_tree_block(trans, root, mid); 1081 clean_tree_block(trans, root, mid);
1066 btrfs_tree_unlock(mid); 1082 btrfs_tree_unlock(mid);
1067 wret = del_ptr(trans, root, path, level + 1, pslot); 1083 del_ptr(trans, root, path, level + 1, pslot);
1068 if (wret)
1069 ret = wret;
1070 root_sub_used(root, mid->len); 1084 root_sub_used(root, mid->len);
1071 btrfs_free_tree_block(trans, root, mid, 0, 1, 0); 1085 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
1072 free_extent_buffer_stale(mid); 1086 free_extent_buffer_stale(mid);
@@ -1905,15 +1919,12 @@ done:
1905 * fixing up pointers when a given leaf/node is not in slot 0 of the 1919 * fixing up pointers when a given leaf/node is not in slot 0 of the
1906 * higher levels 1920 * higher levels
1907 * 1921 *
1908 * If this fails to write a tree block, it returns -1, but continues
1909 * fixing up the blocks in ram so the tree is consistent.
1910 */ 1922 */
1911static int fixup_low_keys(struct btrfs_trans_handle *trans, 1923static void fixup_low_keys(struct btrfs_trans_handle *trans,
1912 struct btrfs_root *root, struct btrfs_path *path, 1924 struct btrfs_root *root, struct btrfs_path *path,
1913 struct btrfs_disk_key *key, int level) 1925 struct btrfs_disk_key *key, int level)
1914{ 1926{
1915 int i; 1927 int i;
1916 int ret = 0;
1917 struct extent_buffer *t; 1928 struct extent_buffer *t;
1918 1929
1919 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1930 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -1926,7 +1937,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
1926 if (tslot != 0) 1937 if (tslot != 0)
1927 break; 1938 break;
1928 } 1939 }
1929 return ret;
1930} 1940}
1931 1941
1932/* 1942/*
@@ -1935,9 +1945,9 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
1935 * This function isn't completely safe. It's the caller's responsibility 1945 * This function isn't completely safe. It's the caller's responsibility
1936 * that the new key won't break the order 1946 * that the new key won't break the order
1937 */ 1947 */
1938int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 1948void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1939 struct btrfs_root *root, struct btrfs_path *path, 1949 struct btrfs_root *root, struct btrfs_path *path,
1940 struct btrfs_key *new_key) 1950 struct btrfs_key *new_key)
1941{ 1951{
1942 struct btrfs_disk_key disk_key; 1952 struct btrfs_disk_key disk_key;
1943 struct extent_buffer *eb; 1953 struct extent_buffer *eb;
@@ -1947,13 +1957,11 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1947 slot = path->slots[0]; 1957 slot = path->slots[0];
1948 if (slot > 0) { 1958 if (slot > 0) {
1949 btrfs_item_key(eb, &disk_key, slot - 1); 1959 btrfs_item_key(eb, &disk_key, slot - 1);
1950 if (comp_keys(&disk_key, new_key) >= 0) 1960 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
1951 return -1;
1952 } 1961 }
1953 if (slot < btrfs_header_nritems(eb) - 1) { 1962 if (slot < btrfs_header_nritems(eb) - 1) {
1954 btrfs_item_key(eb, &disk_key, slot + 1); 1963 btrfs_item_key(eb, &disk_key, slot + 1);
1955 if (comp_keys(&disk_key, new_key) <= 0) 1964 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
1956 return -1;
1957 } 1965 }
1958 1966
1959 btrfs_cpu_key_to_disk(&disk_key, new_key); 1967 btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -1961,7 +1969,6 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1961 btrfs_mark_buffer_dirty(eb); 1969 btrfs_mark_buffer_dirty(eb);
1962 if (slot == 0) 1970 if (slot == 0)
1963 fixup_low_keys(trans, root, path, &disk_key, 1); 1971 fixup_low_keys(trans, root, path, &disk_key, 1);
1964 return 0;
1965} 1972}
1966 1973
1967/* 1974/*
@@ -2164,12 +2171,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2164 * 2171 *
2165 * slot and level indicate where you want the key to go, and 2172 * slot and level indicate where you want the key to go, and
2166 * blocknr is the block the key points to. 2173 * blocknr is the block the key points to.
2167 *
2168 * returns zero on success and < 0 on any error
2169 */ 2174 */
2170static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root 2175static void insert_ptr(struct btrfs_trans_handle *trans,
2171 *root, struct btrfs_path *path, struct btrfs_disk_key 2176 struct btrfs_root *root, struct btrfs_path *path,
2172 *key, u64 bytenr, int slot, int level) 2177 struct btrfs_disk_key *key, u64 bytenr,
2178 int slot, int level)
2173{ 2179{
2174 struct extent_buffer *lower; 2180 struct extent_buffer *lower;
2175 int nritems; 2181 int nritems;
@@ -2179,8 +2185,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2179 lower = path->nodes[level]; 2185 lower = path->nodes[level];
2180 nritems = btrfs_header_nritems(lower); 2186 nritems = btrfs_header_nritems(lower);
2181 BUG_ON(slot > nritems); 2187 BUG_ON(slot > nritems);
2182 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root)) 2188 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
2183 BUG();
2184 if (slot != nritems) { 2189 if (slot != nritems) {
2185 memmove_extent_buffer(lower, 2190 memmove_extent_buffer(lower,
2186 btrfs_node_key_ptr_offset(slot + 1), 2191 btrfs_node_key_ptr_offset(slot + 1),
@@ -2193,7 +2198,6 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2193 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2198 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2194 btrfs_set_header_nritems(lower, nritems + 1); 2199 btrfs_set_header_nritems(lower, nritems + 1);
2195 btrfs_mark_buffer_dirty(lower); 2200 btrfs_mark_buffer_dirty(lower);
2196 return 0;
2197} 2201}
2198 2202
2199/* 2203/*
@@ -2214,7 +2218,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2214 struct btrfs_disk_key disk_key; 2218 struct btrfs_disk_key disk_key;
2215 int mid; 2219 int mid;
2216 int ret; 2220 int ret;
2217 int wret;
2218 u32 c_nritems; 2221 u32 c_nritems;
2219 2222
2220 c = path->nodes[level]; 2223 c = path->nodes[level];
@@ -2271,11 +2274,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2271 btrfs_mark_buffer_dirty(c); 2274 btrfs_mark_buffer_dirty(c);
2272 btrfs_mark_buffer_dirty(split); 2275 btrfs_mark_buffer_dirty(split);
2273 2276
2274 wret = insert_ptr(trans, root, path, &disk_key, split->start, 2277 insert_ptr(trans, root, path, &disk_key, split->start,
2275 path->slots[level + 1] + 1, 2278 path->slots[level + 1] + 1, level + 1);
2276 level + 1);
2277 if (wret)
2278 ret = wret;
2279 2279
2280 if (path->slots[level] >= mid) { 2280 if (path->slots[level] >= mid) {
2281 path->slots[level] -= mid; 2281 path->slots[level] -= mid;
@@ -2564,7 +2564,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2564 u32 old_left_nritems; 2564 u32 old_left_nritems;
2565 u32 nr; 2565 u32 nr;
2566 int ret = 0; 2566 int ret = 0;
2567 int wret;
2568 u32 this_item_size; 2567 u32 this_item_size;
2569 u32 old_left_item_size; 2568 u32 old_left_item_size;
2570 struct btrfs_map_token token; 2569 struct btrfs_map_token token;
@@ -2675,9 +2674,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2675 clean_tree_block(trans, root, right); 2674 clean_tree_block(trans, root, right);
2676 2675
2677 btrfs_item_key(right, &disk_key, 0); 2676 btrfs_item_key(right, &disk_key, 0);
2678 wret = fixup_low_keys(trans, root, path, &disk_key, 1); 2677 fixup_low_keys(trans, root, path, &disk_key, 1);
2679 if (wret)
2680 ret = wret;
2681 2678
2682 /* then fixup the leaf pointer in the path */ 2679 /* then fixup the leaf pointer in the path */
2683 if (path->slots[0] < push_items) { 2680 if (path->slots[0] < push_items) {
@@ -2748,7 +2745,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2748 path->nodes[1], slot - 1, &left); 2745 path->nodes[1], slot - 1, &left);
2749 if (ret) { 2746 if (ret) {
2750 /* we hit -ENOSPC, but it isn't fatal here */ 2747 /* we hit -ENOSPC, but it isn't fatal here */
2751 ret = 1; 2748 if (ret == -ENOSPC)
2749 ret = 1;
2752 goto out; 2750 goto out;
2753 } 2751 }
2754 2752
@@ -2770,21 +2768,17 @@ out:
2770/* 2768/*
2771 * split the path's leaf in two, making sure there is at least data_size 2769 * split the path's leaf in two, making sure there is at least data_size
2772 * available for the resulting leaf level of the path. 2770 * available for the resulting leaf level of the path.
2773 *
2774 * returns 0 if all went well and < 0 on failure.
2775 */ 2771 */
2776static noinline int copy_for_split(struct btrfs_trans_handle *trans, 2772static noinline void copy_for_split(struct btrfs_trans_handle *trans,
2777 struct btrfs_root *root, 2773 struct btrfs_root *root,
2778 struct btrfs_path *path, 2774 struct btrfs_path *path,
2779 struct extent_buffer *l, 2775 struct extent_buffer *l,
2780 struct extent_buffer *right, 2776 struct extent_buffer *right,
2781 int slot, int mid, int nritems) 2777 int slot, int mid, int nritems)
2782{ 2778{
2783 int data_copy_size; 2779 int data_copy_size;
2784 int rt_data_off; 2780 int rt_data_off;
2785 int i; 2781 int i;
2786 int ret = 0;
2787 int wret;
2788 struct btrfs_disk_key disk_key; 2782 struct btrfs_disk_key disk_key;
2789 struct btrfs_map_token token; 2783 struct btrfs_map_token token;
2790 2784
@@ -2816,12 +2810,9 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2816 } 2810 }
2817 2811
2818 btrfs_set_header_nritems(l, mid); 2812 btrfs_set_header_nritems(l, mid);
2819 ret = 0;
2820 btrfs_item_key(right, &disk_key, 0); 2813 btrfs_item_key(right, &disk_key, 0);
2821 wret = insert_ptr(trans, root, path, &disk_key, right->start, 2814 insert_ptr(trans, root, path, &disk_key, right->start,
2822 path->slots[1] + 1, 1); 2815 path->slots[1] + 1, 1);
2823 if (wret)
2824 ret = wret;
2825 2816
2826 btrfs_mark_buffer_dirty(right); 2817 btrfs_mark_buffer_dirty(right);
2827 btrfs_mark_buffer_dirty(l); 2818 btrfs_mark_buffer_dirty(l);
@@ -2839,8 +2830,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2839 } 2830 }
2840 2831
2841 BUG_ON(path->slots[0] < 0); 2832 BUG_ON(path->slots[0] < 0);
2842
2843 return ret;
2844} 2833}
2845 2834
2846/* 2835/*
@@ -3029,12 +3018,8 @@ again:
3029 if (split == 0) { 3018 if (split == 0) {
3030 if (mid <= slot) { 3019 if (mid <= slot) {
3031 btrfs_set_header_nritems(right, 0); 3020 btrfs_set_header_nritems(right, 0);
3032 wret = insert_ptr(trans, root, path, 3021 insert_ptr(trans, root, path, &disk_key, right->start,
3033 &disk_key, right->start, 3022 path->slots[1] + 1, 1);
3034 path->slots[1] + 1, 1);
3035 if (wret)
3036 ret = wret;
3037
3038 btrfs_tree_unlock(path->nodes[0]); 3023 btrfs_tree_unlock(path->nodes[0]);
3039 free_extent_buffer(path->nodes[0]); 3024 free_extent_buffer(path->nodes[0]);
3040 path->nodes[0] = right; 3025 path->nodes[0] = right;
@@ -3042,29 +3027,21 @@ again:
3042 path->slots[1] += 1; 3027 path->slots[1] += 1;
3043 } else { 3028 } else {
3044 btrfs_set_header_nritems(right, 0); 3029 btrfs_set_header_nritems(right, 0);
3045 wret = insert_ptr(trans, root, path, 3030 insert_ptr(trans, root, path, &disk_key, right->start,
3046 &disk_key,
3047 right->start,
3048 path->slots[1], 1); 3031 path->slots[1], 1);
3049 if (wret)
3050 ret = wret;
3051 btrfs_tree_unlock(path->nodes[0]); 3032 btrfs_tree_unlock(path->nodes[0]);
3052 free_extent_buffer(path->nodes[0]); 3033 free_extent_buffer(path->nodes[0]);
3053 path->nodes[0] = right; 3034 path->nodes[0] = right;
3054 path->slots[0] = 0; 3035 path->slots[0] = 0;
3055 if (path->slots[1] == 0) { 3036 if (path->slots[1] == 0)
3056 wret = fixup_low_keys(trans, root, 3037 fixup_low_keys(trans, root, path,
3057 path, &disk_key, 1); 3038 &disk_key, 1);
3058 if (wret)
3059 ret = wret;
3060 }
3061 } 3039 }
3062 btrfs_mark_buffer_dirty(right); 3040 btrfs_mark_buffer_dirty(right);
3063 return ret; 3041 return ret;
3064 } 3042 }
3065 3043
3066 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems); 3044 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3067 BUG_ON(ret);
3068 3045
3069 if (split == 2) { 3046 if (split == 2) {
3070 BUG_ON(num_doubles != 0); 3047 BUG_ON(num_doubles != 0);
@@ -3072,7 +3049,7 @@ again:
3072 goto again; 3049 goto again;
3073 } 3050 }
3074 3051
3075 return ret; 3052 return 0;
3076 3053
3077push_for_double: 3054push_for_double:
3078 push_for_double_split(trans, root, path, data_size); 3055 push_for_double_split(trans, root, path, data_size);
@@ -3274,11 +3251,9 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3274 return ret; 3251 return ret;
3275 3252
3276 path->slots[0]++; 3253 path->slots[0]++;
3277 ret = setup_items_for_insert(trans, root, path, new_key, &item_size, 3254 setup_items_for_insert(trans, root, path, new_key, &item_size,
3278 item_size, item_size + 3255 item_size, item_size +
3279 sizeof(struct btrfs_item), 1); 3256 sizeof(struct btrfs_item), 1);
3280 BUG_ON(ret);
3281
3282 leaf = path->nodes[0]; 3257 leaf = path->nodes[0];
3283 memcpy_extent_buffer(leaf, 3258 memcpy_extent_buffer(leaf,
3284 btrfs_item_ptr_offset(leaf, path->slots[0]), 3259 btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -3293,10 +3268,10 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3293 * off the end of the item or if we shift the item to chop bytes off 3268 * off the end of the item or if we shift the item to chop bytes off
3294 * the front. 3269 * the front.
3295 */ 3270 */
3296int btrfs_truncate_item(struct btrfs_trans_handle *trans, 3271void btrfs_truncate_item(struct btrfs_trans_handle *trans,
3297 struct btrfs_root *root, 3272 struct btrfs_root *root,
3298 struct btrfs_path *path, 3273 struct btrfs_path *path,
3299 u32 new_size, int from_end) 3274 u32 new_size, int from_end)
3300{ 3275{
3301 int slot; 3276 int slot;
3302 struct extent_buffer *leaf; 3277 struct extent_buffer *leaf;
@@ -3316,7 +3291,7 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3316 3291
3317 old_size = btrfs_item_size_nr(leaf, slot); 3292 old_size = btrfs_item_size_nr(leaf, slot);
3318 if (old_size == new_size) 3293 if (old_size == new_size)
3319 return 0; 3294 return;
3320 3295
3321 nritems = btrfs_header_nritems(leaf); 3296 nritems = btrfs_header_nritems(leaf);
3322 data_end = leaf_data_end(root, leaf); 3297 data_end = leaf_data_end(root, leaf);
@@ -3390,15 +3365,14 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3390 btrfs_print_leaf(root, leaf); 3365 btrfs_print_leaf(root, leaf);
3391 BUG(); 3366 BUG();
3392 } 3367 }
3393 return 0;
3394} 3368}
3395 3369
3396/* 3370/*
3397 * make the item pointed to by the path bigger, data_size is the new size. 3371 * make the item pointed to by the path bigger, data_size is the new size.
3398 */ 3372 */
3399int btrfs_extend_item(struct btrfs_trans_handle *trans, 3373void btrfs_extend_item(struct btrfs_trans_handle *trans,
3400 struct btrfs_root *root, struct btrfs_path *path, 3374 struct btrfs_root *root, struct btrfs_path *path,
3401 u32 data_size) 3375 u32 data_size)
3402{ 3376{
3403 int slot; 3377 int slot;
3404 struct extent_buffer *leaf; 3378 struct extent_buffer *leaf;
@@ -3460,7 +3434,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3460 btrfs_print_leaf(root, leaf); 3434 btrfs_print_leaf(root, leaf);
3461 BUG(); 3435 BUG();
3462 } 3436 }
3463 return 0;
3464} 3437}
3465 3438
3466/* 3439/*
@@ -3593,7 +3566,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3593 ret = 0; 3566 ret = 0;
3594 if (slot == 0) { 3567 if (slot == 0) {
3595 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3568 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3596 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3569 fixup_low_keys(trans, root, path, &disk_key, 1);
3597 } 3570 }
3598 3571
3599 if (btrfs_leaf_free_space(root, leaf) < 0) { 3572 if (btrfs_leaf_free_space(root, leaf) < 0) {
@@ -3611,17 +3584,16 @@ out:
3611 * to save stack depth by doing the bulk of the work in a function 3584 * to save stack depth by doing the bulk of the work in a function
3612 * that doesn't call btrfs_search_slot 3585 * that doesn't call btrfs_search_slot
3613 */ 3586 */
3614int setup_items_for_insert(struct btrfs_trans_handle *trans, 3587void setup_items_for_insert(struct btrfs_trans_handle *trans,
3615 struct btrfs_root *root, struct btrfs_path *path, 3588 struct btrfs_root *root, struct btrfs_path *path,
3616 struct btrfs_key *cpu_key, u32 *data_size, 3589 struct btrfs_key *cpu_key, u32 *data_size,
3617 u32 total_data, u32 total_size, int nr) 3590 u32 total_data, u32 total_size, int nr)
3618{ 3591{
3619 struct btrfs_item *item; 3592 struct btrfs_item *item;
3620 int i; 3593 int i;
3621 u32 nritems; 3594 u32 nritems;
3622 unsigned int data_end; 3595 unsigned int data_end;
3623 struct btrfs_disk_key disk_key; 3596 struct btrfs_disk_key disk_key;
3624 int ret;
3625 struct extent_buffer *leaf; 3597 struct extent_buffer *leaf;
3626 int slot; 3598 int slot;
3627 struct btrfs_map_token token; 3599 struct btrfs_map_token token;
@@ -3687,10 +3659,9 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
3687 3659
3688 btrfs_set_header_nritems(leaf, nritems + nr); 3660 btrfs_set_header_nritems(leaf, nritems + nr);
3689 3661
3690 ret = 0;
3691 if (slot == 0) { 3662 if (slot == 0) {
3692 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3663 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3693 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3664 fixup_low_keys(trans, root, path, &disk_key, 1);
3694 } 3665 }
3695 btrfs_unlock_up_safe(path, 1); 3666 btrfs_unlock_up_safe(path, 1);
3696 btrfs_mark_buffer_dirty(leaf); 3667 btrfs_mark_buffer_dirty(leaf);
@@ -3699,7 +3670,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
3699 btrfs_print_leaf(root, leaf); 3670 btrfs_print_leaf(root, leaf);
3700 BUG(); 3671 BUG();
3701 } 3672 }
3702 return ret;
3703} 3673}
3704 3674
3705/* 3675/*
@@ -3726,16 +3696,14 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3726 if (ret == 0) 3696 if (ret == 0)
3727 return -EEXIST; 3697 return -EEXIST;
3728 if (ret < 0) 3698 if (ret < 0)
3729 goto out; 3699 return ret;
3730 3700
3731 slot = path->slots[0]; 3701 slot = path->slots[0];
3732 BUG_ON(slot < 0); 3702 BUG_ON(slot < 0);
3733 3703
3734 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size, 3704 setup_items_for_insert(trans, root, path, cpu_key, data_size,
3735 total_data, total_size, nr); 3705 total_data, total_size, nr);
3736 3706 return 0;
3737out:
3738 return ret;
3739} 3707}
3740 3708
3741/* 3709/*
@@ -3771,13 +3739,11 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3771 * the tree should have been previously balanced so the deletion does not 3739 * the tree should have been previously balanced so the deletion does not
3772 * empty a node. 3740 * empty a node.
3773 */ 3741 */
3774static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3742static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3775 struct btrfs_path *path, int level, int slot) 3743 struct btrfs_path *path, int level, int slot)
3776{ 3744{
3777 struct extent_buffer *parent = path->nodes[level]; 3745 struct extent_buffer *parent = path->nodes[level];
3778 u32 nritems; 3746 u32 nritems;
3779 int ret = 0;
3780 int wret;
3781 3747
3782 nritems = btrfs_header_nritems(parent); 3748 nritems = btrfs_header_nritems(parent);
3783 if (slot != nritems - 1) { 3749 if (slot != nritems - 1) {
@@ -3797,12 +3763,9 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3797 struct btrfs_disk_key disk_key; 3763 struct btrfs_disk_key disk_key;
3798 3764
3799 btrfs_node_key(parent, &disk_key, 0); 3765 btrfs_node_key(parent, &disk_key, 0);
3800 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1); 3766 fixup_low_keys(trans, root, path, &disk_key, level + 1);
3801 if (wret)
3802 ret = wret;
3803 } 3767 }
3804 btrfs_mark_buffer_dirty(parent); 3768 btrfs_mark_buffer_dirty(parent);
3805 return ret;
3806} 3769}
3807 3770
3808/* 3771/*
@@ -3815,17 +3778,13 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3815 * The path must have already been setup for deleting the leaf, including 3778 * The path must have already been setup for deleting the leaf, including
3816 * all the proper balancing. path->nodes[1] must be locked. 3779 * all the proper balancing. path->nodes[1] must be locked.
3817 */ 3780 */
3818static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 3781static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3819 struct btrfs_root *root, 3782 struct btrfs_root *root,
3820 struct btrfs_path *path, 3783 struct btrfs_path *path,
3821 struct extent_buffer *leaf) 3784 struct extent_buffer *leaf)
3822{ 3785{
3823 int ret;
3824
3825 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 3786 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3826 ret = del_ptr(trans, root, path, 1, path->slots[1]); 3787 del_ptr(trans, root, path, 1, path->slots[1]);
3827 if (ret)
3828 return ret;
3829 3788
3830 /* 3789 /*
3831 * btrfs_free_extent is expensive, we want to make sure we 3790 * btrfs_free_extent is expensive, we want to make sure we
@@ -3838,7 +3797,6 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3838 extent_buffer_get(leaf); 3797 extent_buffer_get(leaf);
3839 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0); 3798 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
3840 free_extent_buffer_stale(leaf); 3799 free_extent_buffer_stale(leaf);
3841 return 0;
3842} 3800}
3843/* 3801/*
3844 * delete the item at the leaf level in path. If that empties 3802 * delete the item at the leaf level in path. If that empties
@@ -3899,8 +3857,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3899 } else { 3857 } else {
3900 btrfs_set_path_blocking(path); 3858 btrfs_set_path_blocking(path);
3901 clean_tree_block(trans, root, leaf); 3859 clean_tree_block(trans, root, leaf);
3902 ret = btrfs_del_leaf(trans, root, path, leaf); 3860 btrfs_del_leaf(trans, root, path, leaf);
3903 BUG_ON(ret);
3904 } 3861 }
3905 } else { 3862 } else {
3906 int used = leaf_space_used(leaf, 0, nritems); 3863 int used = leaf_space_used(leaf, 0, nritems);
@@ -3908,10 +3865,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3908 struct btrfs_disk_key disk_key; 3865 struct btrfs_disk_key disk_key;
3909 3866
3910 btrfs_item_key(leaf, &disk_key, 0); 3867 btrfs_item_key(leaf, &disk_key, 0);
3911 wret = fixup_low_keys(trans, root, path, 3868 fixup_low_keys(trans, root, path, &disk_key, 1);
3912 &disk_key, 1);
3913 if (wret)
3914 ret = wret;
3915 } 3869 }
3916 3870
3917 /* delete the leaf if it is mostly empty */ 3871 /* delete the leaf if it is mostly empty */
@@ -3939,9 +3893,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3939 3893
3940 if (btrfs_header_nritems(leaf) == 0) { 3894 if (btrfs_header_nritems(leaf) == 0) {
3941 path->slots[1] = slot; 3895 path->slots[1] = slot;
3942 ret = btrfs_del_leaf(trans, root, path, leaf); 3896 btrfs_del_leaf(trans, root, path, leaf);
3943 BUG_ON(ret);
3944 free_extent_buffer(leaf); 3897 free_extent_buffer(leaf);
3898 ret = 0;
3945 } else { 3899 } else {
3946 /* if we're still in the path, make sure 3900 /* if we're still in the path, make sure
3947 * we're dirty. Otherwise, one of the 3901 * we're dirty. Otherwise, one of the
@@ -4124,7 +4078,7 @@ find_next_key:
4124 } 4078 }
4125 btrfs_set_path_blocking(path); 4079 btrfs_set_path_blocking(path);
4126 cur = read_node_slot(root, cur, slot); 4080 cur = read_node_slot(root, cur, slot);
4127 BUG_ON(!cur); 4081 BUG_ON(!cur); /* -ENOMEM */
4128 4082
4129 btrfs_tree_read_lock(cur); 4083 btrfs_tree_read_lock(cur);
4130 4084
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index f7da8a8d13c1..ed2d196f7a84 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1525,6 +1525,7 @@ struct btrfs_ioctl_defrag_range_args {
1525#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 1525#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19)
1526#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 1526#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20)
1527#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 1527#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
1528#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
1528 1529
1529#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1530#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1530#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1531#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2518,8 +2519,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
2518int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); 2519int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
2519int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 2520int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
2520 u64 start, u64 len); 2521 u64 start, u64 len);
2521int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 2522void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
2522 struct btrfs_root *root); 2523 struct btrfs_root *root);
2523int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 2524int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2524 struct btrfs_root *root); 2525 struct btrfs_root *root);
2525int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 2526int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2582,8 +2583,8 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
2582 u64 num_bytes); 2583 u64 num_bytes);
2583int btrfs_set_block_group_ro(struct btrfs_root *root, 2584int btrfs_set_block_group_ro(struct btrfs_root *root,
2584 struct btrfs_block_group_cache *cache); 2585 struct btrfs_block_group_cache *cache);
2585int btrfs_set_block_group_rw(struct btrfs_root *root, 2586void btrfs_set_block_group_rw(struct btrfs_root *root,
2586 struct btrfs_block_group_cache *cache); 2587 struct btrfs_block_group_cache *cache);
2587void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 2588void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
2588u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 2589u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
2589int btrfs_error_unpin_extent_range(struct btrfs_root *root, 2590int btrfs_error_unpin_extent_range(struct btrfs_root *root,
@@ -2602,9 +2603,9 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2);
2602int btrfs_previous_item(struct btrfs_root *root, 2603int btrfs_previous_item(struct btrfs_root *root,
2603 struct btrfs_path *path, u64 min_objectid, 2604 struct btrfs_path *path, u64 min_objectid,
2604 int type); 2605 int type);
2605int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2606void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2606 struct btrfs_root *root, struct btrfs_path *path, 2607 struct btrfs_root *root, struct btrfs_path *path,
2607 struct btrfs_key *new_key); 2608 struct btrfs_key *new_key);
2608struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 2609struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
2609struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 2610struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
2610int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 2611int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -2624,12 +2625,13 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
2624 struct extent_buffer **cow_ret, u64 new_root_objectid); 2625 struct extent_buffer **cow_ret, u64 new_root_objectid);
2625int btrfs_block_can_be_shared(struct btrfs_root *root, 2626int btrfs_block_can_be_shared(struct btrfs_root *root,
2626 struct extent_buffer *buf); 2627 struct extent_buffer *buf);
2627int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root 2628void btrfs_extend_item(struct btrfs_trans_handle *trans,
2628 *root, struct btrfs_path *path, u32 data_size); 2629 struct btrfs_root *root, struct btrfs_path *path,
2629int btrfs_truncate_item(struct btrfs_trans_handle *trans, 2630 u32 data_size);
2630 struct btrfs_root *root, 2631void btrfs_truncate_item(struct btrfs_trans_handle *trans,
2631 struct btrfs_path *path, 2632 struct btrfs_root *root,
2632 u32 new_size, int from_end); 2633 struct btrfs_path *path,
2634 u32 new_size, int from_end);
2633int btrfs_split_item(struct btrfs_trans_handle *trans, 2635int btrfs_split_item(struct btrfs_trans_handle *trans,
2634 struct btrfs_root *root, 2636 struct btrfs_root *root,
2635 struct btrfs_path *path, 2637 struct btrfs_path *path,
@@ -2663,10 +2665,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
2663 return btrfs_del_items(trans, root, path, path->slots[0], 1); 2665 return btrfs_del_items(trans, root, path, path->slots[0], 1);
2664} 2666}
2665 2667
2666int setup_items_for_insert(struct btrfs_trans_handle *trans, 2668void setup_items_for_insert(struct btrfs_trans_handle *trans,
2667 struct btrfs_root *root, struct btrfs_path *path, 2669 struct btrfs_root *root, struct btrfs_path *path,
2668 struct btrfs_key *cpu_key, u32 *data_size, 2670 struct btrfs_key *cpu_key, u32 *data_size,
2669 u32 total_data, u32 total_size, int nr); 2671 u32 total_data, u32 total_size, int nr);
2670int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 2672int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
2671 *root, struct btrfs_key *key, void *data, u32 data_size); 2673 *root, struct btrfs_key *key, void *data, u32 data_size);
2672int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 2674int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
@@ -2693,9 +2695,9 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
2693} 2695}
2694int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2696int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2695int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2697int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2696void btrfs_drop_snapshot(struct btrfs_root *root, 2698int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
2697 struct btrfs_block_rsv *block_rsv, int update_ref, 2699 struct btrfs_block_rsv *block_rsv,
2698 int for_reloc); 2700 int update_ref, int for_reloc);
2699int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2701int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2700 struct btrfs_root *root, 2702 struct btrfs_root *root,
2701 struct extent_buffer *node, 2703 struct extent_buffer *node,
@@ -2757,9 +2759,10 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2757int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 2759int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
2758 *root, struct btrfs_key *key, struct btrfs_root_item 2760 *root, struct btrfs_key *key, struct btrfs_root_item
2759 *item); 2761 *item);
2760int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root 2762int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
2761 *root, struct btrfs_key *key, struct btrfs_root_item 2763 struct btrfs_root *root,
2762 *item); 2764 struct btrfs_key *key,
2765 struct btrfs_root_item *item);
2763int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct 2766int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
2764 btrfs_root_item *item, struct btrfs_key *key); 2767 btrfs_root_item *item, struct btrfs_key *key);
2765int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); 2768int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -2943,7 +2946,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root);
2943void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 2946void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2944 struct btrfs_root *root); 2947 struct btrfs_root *root);
2945int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 2948int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
2946int btrfs_invalidate_inodes(struct btrfs_root *root); 2949void btrfs_invalidate_inodes(struct btrfs_root *root);
2947void btrfs_add_delayed_iput(struct inode *inode); 2950void btrfs_add_delayed_iput(struct inode *inode);
2948void btrfs_run_delayed_iputs(struct btrfs_root *root); 2951void btrfs_run_delayed_iputs(struct btrfs_root *root);
2949int btrfs_prealloc_file_range(struct inode *inode, int mode, 2952int btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -2995,13 +2998,41 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
2995/* super.c */ 2998/* super.c */
2996int btrfs_parse_options(struct btrfs_root *root, char *options); 2999int btrfs_parse_options(struct btrfs_root *root, char *options);
2997int btrfs_sync_fs(struct super_block *sb, int wait); 3000int btrfs_sync_fs(struct super_block *sb, int wait);
3001void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
2998void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 3002void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
2999 unsigned int line, int errno); 3003 unsigned int line, int errno, const char *fmt, ...);
3004
3005void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
3006 struct btrfs_root *root, const char *function,
3007 unsigned int line, int errno);
3008
3009#define btrfs_abort_transaction(trans, root, errno) \
3010do { \
3011 __btrfs_abort_transaction(trans, root, __func__, \
3012 __LINE__, errno); \
3013} while (0)
3000 3014
3001#define btrfs_std_error(fs_info, errno) \ 3015#define btrfs_std_error(fs_info, errno) \
3002do { \ 3016do { \
3003 if ((errno)) \ 3017 if ((errno)) \
3004 __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ 3018 __btrfs_std_error((fs_info), __func__, \
3019 __LINE__, (errno), NULL); \
3020} while (0)
3021
3022#define btrfs_error(fs_info, errno, fmt, args...) \
3023do { \
3024 __btrfs_std_error((fs_info), __func__, __LINE__, \
3025 (errno), fmt, ##args); \
3026} while (0)
3027
3028void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
3029 unsigned int line, int errno, const char *fmt, ...);
3030
3031#define btrfs_panic(fs_info, errno, fmt, args...) \
3032do { \
3033 struct btrfs_fs_info *_i = (fs_info); \
3034 __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
3035 BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
3005} while (0) 3036} while (0)
3006 3037
3007/* acl.c */ 3038/* acl.c */
@@ -3037,16 +3068,17 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
3037void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 3068void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
3038 struct btrfs_pending_snapshot *pending, 3069 struct btrfs_pending_snapshot *pending,
3039 u64 *bytes_to_reserve); 3070 u64 *bytes_to_reserve);
3040void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 3071int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
3041 struct btrfs_pending_snapshot *pending); 3072 struct btrfs_pending_snapshot *pending);
3042 3073
3043/* scrub.c */ 3074/* scrub.c */
3044int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, 3075int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
3045 struct btrfs_scrub_progress *progress, int readonly); 3076 struct btrfs_scrub_progress *progress, int readonly);
3046int btrfs_scrub_pause(struct btrfs_root *root); 3077void btrfs_scrub_pause(struct btrfs_root *root);
3047int btrfs_scrub_pause_super(struct btrfs_root *root); 3078void btrfs_scrub_pause_super(struct btrfs_root *root);
3048int btrfs_scrub_continue(struct btrfs_root *root); 3079void btrfs_scrub_continue(struct btrfs_root *root);
3049int btrfs_scrub_continue_super(struct btrfs_root *root); 3080void btrfs_scrub_continue_super(struct btrfs_root *root);
3081int __btrfs_scrub_cancel(struct btrfs_fs_info *info);
3050int btrfs_scrub_cancel(struct btrfs_root *root); 3082int btrfs_scrub_cancel(struct btrfs_root *root);
3051int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev); 3083int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
3052int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); 3084int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index fe4cd0f1cef1..03e3748d84d0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
115 return NULL; 115 return NULL;
116} 116}
117 117
118/* Will return either the node or PTR_ERR(-ENOMEM) */
118static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 119static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 struct inode *inode) 120 struct inode *inode)
120{ 121{
@@ -836,10 +837,8 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
836 btrfs_clear_path_blocking(path, NULL, 0); 837 btrfs_clear_path_blocking(path, NULL, 0);
837 838
838 /* insert the keys of the items */ 839 /* insert the keys of the items */
839 ret = setup_items_for_insert(trans, root, path, keys, data_size, 840 setup_items_for_insert(trans, root, path, keys, data_size,
840 total_data_size, total_size, nitems); 841 total_data_size, total_size, nitems);
841 if (ret)
842 goto error;
843 842
844 /* insert the dir index items */ 843 /* insert the dir index items */
845 slot = path->slots[0]; 844 slot = path->slots[0];
@@ -1108,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1108 return 0; 1107 return 0;
1109} 1108}
1110 1109
1111/* Called when committing the transaction. */ 1110/*
1111 * Called when committing the transaction.
1112 * Returns 0 on success.
1113 * Returns < 0 on error and returns with an aborted transaction with any
1114 * outstanding delayed items cleaned up.
1115 */
1112int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, 1116int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1113 struct btrfs_root *root) 1117 struct btrfs_root *root)
1114{ 1118{
1119 struct btrfs_root *curr_root = root;
1115 struct btrfs_delayed_root *delayed_root; 1120 struct btrfs_delayed_root *delayed_root;
1116 struct btrfs_delayed_node *curr_node, *prev_node; 1121 struct btrfs_delayed_node *curr_node, *prev_node;
1117 struct btrfs_path *path; 1122 struct btrfs_path *path;
1118 struct btrfs_block_rsv *block_rsv; 1123 struct btrfs_block_rsv *block_rsv;
1119 int ret = 0; 1124 int ret = 0;
1120 1125
1126 if (trans->aborted)
1127 return -EIO;
1128
1121 path = btrfs_alloc_path(); 1129 path = btrfs_alloc_path();
1122 if (!path) 1130 if (!path)
1123 return -ENOMEM; 1131 return -ENOMEM;
@@ -1130,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1130 1138
1131 curr_node = btrfs_first_delayed_node(delayed_root); 1139 curr_node = btrfs_first_delayed_node(delayed_root);
1132 while (curr_node) { 1140 while (curr_node) {
1133 root = curr_node->root; 1141 curr_root = curr_node->root;
1134 ret = btrfs_insert_delayed_items(trans, path, root, 1142 ret = btrfs_insert_delayed_items(trans, path, curr_root,
1135 curr_node); 1143 curr_node);
1136 if (!ret) 1144 if (!ret)
1137 ret = btrfs_delete_delayed_items(trans, path, root, 1145 ret = btrfs_delete_delayed_items(trans, path,
1138 curr_node); 1146 curr_root, curr_node);
1139 if (!ret) 1147 if (!ret)
1140 ret = btrfs_update_delayed_inode(trans, root, path, 1148 ret = btrfs_update_delayed_inode(trans, curr_root,
1141 curr_node); 1149 path, curr_node);
1142 if (ret) { 1150 if (ret) {
1143 btrfs_release_delayed_node(curr_node); 1151 btrfs_release_delayed_node(curr_node);
1152 btrfs_abort_transaction(trans, root, ret);
1144 break; 1153 break;
1145 } 1154 }
1146 1155
@@ -1151,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1151 1160
1152 btrfs_free_path(path); 1161 btrfs_free_path(path);
1153 trans->block_rsv = block_rsv; 1162 trans->block_rsv = block_rsv;
1163
1154 return ret; 1164 return ret;
1155} 1165}
1156 1166
@@ -1371,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
1371 btrfs_wq_run_delayed_node(delayed_root, root, 0); 1381 btrfs_wq_run_delayed_node(delayed_root, root, 0);
1372} 1382}
1373 1383
1384/* Will return 0 or -ENOMEM */
1374int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, 1385int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1375 struct btrfs_root *root, const char *name, 1386 struct btrfs_root *root, const char *name,
1376 int name_len, struct inode *dir, 1387 int name_len, struct inode *dir,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 66e4f29505a3..69f22e3ab3bc 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -420,7 +420,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
420 * this does all the dirty work in terms of maintaining the correct 420 * this does all the dirty work in terms of maintaining the correct
421 * overall modification count. 421 * overall modification count.
422 */ 422 */
423static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info, 423static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424 struct btrfs_trans_handle *trans, 424 struct btrfs_trans_handle *trans,
425 struct btrfs_delayed_ref_node *ref, 425 struct btrfs_delayed_ref_node *ref,
426 u64 bytenr, u64 num_bytes, 426 u64 bytenr, u64 num_bytes,
@@ -487,20 +487,19 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
487 * we've updated the existing ref, free the newly 487 * we've updated the existing ref, free the newly
488 * allocated ref 488 * allocated ref
489 */ 489 */
490 kfree(ref); 490 kfree(head_ref);
491 } else { 491 } else {
492 delayed_refs->num_heads++; 492 delayed_refs->num_heads++;
493 delayed_refs->num_heads_ready++; 493 delayed_refs->num_heads_ready++;
494 delayed_refs->num_entries++; 494 delayed_refs->num_entries++;
495 trans->delayed_ref_updates++; 495 trans->delayed_ref_updates++;
496 } 496 }
497 return 0;
498} 497}
499 498
500/* 499/*
501 * helper to insert a delayed tree ref into the rbtree. 500 * helper to insert a delayed tree ref into the rbtree.
502 */ 501 */
503static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info, 502static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
504 struct btrfs_trans_handle *trans, 503 struct btrfs_trans_handle *trans,
505 struct btrfs_delayed_ref_node *ref, 504 struct btrfs_delayed_ref_node *ref,
506 u64 bytenr, u64 num_bytes, u64 parent, 505 u64 bytenr, u64 num_bytes, u64 parent,
@@ -549,18 +548,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
549 * we've updated the existing ref, free the newly 548 * we've updated the existing ref, free the newly
550 * allocated ref 549 * allocated ref
551 */ 550 */
552 kfree(ref); 551 kfree(full_ref);
553 } else { 552 } else {
554 delayed_refs->num_entries++; 553 delayed_refs->num_entries++;
555 trans->delayed_ref_updates++; 554 trans->delayed_ref_updates++;
556 } 555 }
557 return 0;
558} 556}
559 557
560/* 558/*
561 * helper to insert a delayed data ref into the rbtree. 559 * helper to insert a delayed data ref into the rbtree.
562 */ 560 */
563static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info, 561static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
564 struct btrfs_trans_handle *trans, 562 struct btrfs_trans_handle *trans,
565 struct btrfs_delayed_ref_node *ref, 563 struct btrfs_delayed_ref_node *ref,
566 u64 bytenr, u64 num_bytes, u64 parent, 564 u64 bytenr, u64 num_bytes, u64 parent,
@@ -611,12 +609,11 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
611 * we've updated the existing ref, free the newly 609 * we've updated the existing ref, free the newly
612 * allocated ref 610 * allocated ref
613 */ 611 */
614 kfree(ref); 612 kfree(full_ref);
615 } else { 613 } else {
616 delayed_refs->num_entries++; 614 delayed_refs->num_entries++;
617 trans->delayed_ref_updates++; 615 trans->delayed_ref_updates++;
618 } 616 }
619 return 0;
620} 617}
621 618
622/* 619/*
@@ -634,7 +631,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
634 struct btrfs_delayed_tree_ref *ref; 631 struct btrfs_delayed_tree_ref *ref;
635 struct btrfs_delayed_ref_head *head_ref; 632 struct btrfs_delayed_ref_head *head_ref;
636 struct btrfs_delayed_ref_root *delayed_refs; 633 struct btrfs_delayed_ref_root *delayed_refs;
637 int ret;
638 634
639 BUG_ON(extent_op && extent_op->is_data); 635 BUG_ON(extent_op && extent_op->is_data);
640 ref = kmalloc(sizeof(*ref), GFP_NOFS); 636 ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -656,14 +652,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
656 * insert both the head node and the new ref without dropping 652 * insert both the head node and the new ref without dropping
657 * the spin lock 653 * the spin lock
658 */ 654 */
659 ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 655 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
660 num_bytes, action, 0); 656 num_bytes, action, 0);
661 BUG_ON(ret);
662 657
663 ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, 658 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
664 num_bytes, parent, ref_root, level, action, 659 num_bytes, parent, ref_root, level, action,
665 for_cow); 660 for_cow);
666 BUG_ON(ret);
667 if (!need_ref_seq(for_cow, ref_root) && 661 if (!need_ref_seq(for_cow, ref_root) &&
668 waitqueue_active(&delayed_refs->seq_wait)) 662 waitqueue_active(&delayed_refs->seq_wait))
669 wake_up(&delayed_refs->seq_wait); 663 wake_up(&delayed_refs->seq_wait);
@@ -685,7 +679,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
685 struct btrfs_delayed_data_ref *ref; 679 struct btrfs_delayed_data_ref *ref;
686 struct btrfs_delayed_ref_head *head_ref; 680 struct btrfs_delayed_ref_head *head_ref;
687 struct btrfs_delayed_ref_root *delayed_refs; 681 struct btrfs_delayed_ref_root *delayed_refs;
688 int ret;
689 682
690 BUG_ON(extent_op && !extent_op->is_data); 683 BUG_ON(extent_op && !extent_op->is_data);
691 ref = kmalloc(sizeof(*ref), GFP_NOFS); 684 ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -707,14 +700,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
707 * insert both the head node and the new ref without dropping 700 * insert both the head node and the new ref without dropping
708 * the spin lock 701 * the spin lock
709 */ 702 */
710 ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 703 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
711 num_bytes, action, 1); 704 num_bytes, action, 1);
712 BUG_ON(ret);
713 705
714 ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, 706 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
715 num_bytes, parent, ref_root, owner, offset, 707 num_bytes, parent, ref_root, owner, offset,
716 action, for_cow); 708 action, for_cow);
717 BUG_ON(ret);
718 if (!need_ref_seq(for_cow, ref_root) && 709 if (!need_ref_seq(for_cow, ref_root) &&
719 waitqueue_active(&delayed_refs->seq_wait)) 710 waitqueue_active(&delayed_refs->seq_wait))
720 wake_up(&delayed_refs->seq_wait); 711 wake_up(&delayed_refs->seq_wait);
@@ -729,7 +720,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
729{ 720{
730 struct btrfs_delayed_ref_head *head_ref; 721 struct btrfs_delayed_ref_head *head_ref;
731 struct btrfs_delayed_ref_root *delayed_refs; 722 struct btrfs_delayed_ref_root *delayed_refs;
732 int ret;
733 723
734 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); 724 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
735 if (!head_ref) 725 if (!head_ref)
@@ -740,10 +730,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
740 delayed_refs = &trans->transaction->delayed_refs; 730 delayed_refs = &trans->transaction->delayed_refs;
741 spin_lock(&delayed_refs->lock); 731 spin_lock(&delayed_refs->lock);
742 732
743 ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 733 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
744 num_bytes, BTRFS_UPDATE_DELAYED_HEAD, 734 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
745 extent_op->is_data); 735 extent_op->is_data);
746 BUG_ON(ret);
747 736
748 if (waitqueue_active(&delayed_refs->seq_wait)) 737 if (waitqueue_active(&delayed_refs->seq_wait))
749 wake_up(&delayed_refs->seq_wait); 738 wake_up(&delayed_refs->seq_wait);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 31d84e78129b..c1a074d0696f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -49,9 +49,8 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
49 di = btrfs_match_dir_item_name(root, path, name, name_len); 49 di = btrfs_match_dir_item_name(root, path, name, name_len);
50 if (di) 50 if (di)
51 return ERR_PTR(-EEXIST); 51 return ERR_PTR(-EEXIST);
52 ret = btrfs_extend_item(trans, root, path, data_size); 52 btrfs_extend_item(trans, root, path, data_size);
53 } 53 } else if (ret < 0)
54 if (ret < 0)
55 return ERR_PTR(ret); 54 return ERR_PTR(ret);
56 WARN_ON(ret > 0); 55 WARN_ON(ret > 0);
57 leaf = path->nodes[0]; 56 leaf = path->nodes[0];
@@ -116,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
116 * 'location' is the key to stuff into the directory item, 'type' is the 115 * 'location' is the key to stuff into the directory item, 'type' is the
117 * type of the inode we're pointing to, and 'index' is the sequence number 116 * type of the inode we're pointing to, and 'index' is the sequence number
118 * to use for the second index (if one is created). 117 * to use for the second index (if one is created).
118 * Will return 0 or -ENOMEM
119 */ 119 */
120int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root 120int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
121 *root, const char *name, int name_len, 121 *root, const char *name, int name_len,
@@ -383,8 +383,8 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
383 start = btrfs_item_ptr_offset(leaf, path->slots[0]); 383 start = btrfs_item_ptr_offset(leaf, path->slots[0]);
384 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, 384 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
385 item_len - (ptr + sub_item_len - start)); 385 item_len - (ptr + sub_item_len - start));
386 ret = btrfs_truncate_item(trans, root, path, 386 btrfs_truncate_item(trans, root, path,
387 item_len - sub_item_len, 1); 387 item_len - sub_item_len, 1);
388 } 388 }
389 return ret; 389 return ret;
390} 390}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6107b6958413..7b55eee15a51 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -48,20 +48,19 @@
48static struct extent_io_ops btree_extent_io_ops; 48static struct extent_io_ops btree_extent_io_ops;
49static void end_workqueue_fn(struct btrfs_work *work); 49static void end_workqueue_fn(struct btrfs_work *work);
50static void free_fs_root(struct btrfs_root *root); 50static void free_fs_root(struct btrfs_root *root);
51static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 51static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
52 int read_only); 52 int read_only);
53static int btrfs_destroy_ordered_operations(struct btrfs_root *root); 53static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
54static int btrfs_destroy_ordered_extents(struct btrfs_root *root); 54static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
55static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 55static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56 struct btrfs_root *root); 56 struct btrfs_root *root);
57static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); 57static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
58static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 58static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59static int btrfs_destroy_marked_extents(struct btrfs_root *root, 59static int btrfs_destroy_marked_extents(struct btrfs_root *root,
60 struct extent_io_tree *dirty_pages, 60 struct extent_io_tree *dirty_pages,
61 int mark); 61 int mark);
62static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 62static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
63 struct extent_io_tree *pinned_extents); 63 struct extent_io_tree *pinned_extents);
64static int btrfs_cleanup_transaction(struct btrfs_root *root);
65 64
66/* 65/*
67 * end_io_wq structs are used to do processing in task context when an IO is 66 * end_io_wq structs are used to do processing in task context when an IO is
@@ -99,6 +98,7 @@ struct async_submit_bio {
99 */ 98 */
100 u64 bio_offset; 99 u64 bio_offset;
101 struct btrfs_work work; 100 struct btrfs_work work;
101 int error;
102}; 102};
103 103
104/* 104/*
@@ -332,7 +332,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
332 return 0; 332 return 0;
333 333
334 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 334 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335 0, &cached_state, GFP_NOFS); 335 0, &cached_state);
336 if (extent_buffer_uptodate(eb) && 336 if (extent_buffer_uptodate(eb) &&
337 btrfs_header_generation(eb) == parent_transid) { 337 btrfs_header_generation(eb) == parent_transid) {
338 ret = 0; 338 ret = 0;
@@ -425,7 +425,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
425 eb = (struct extent_buffer *)page->private; 425 eb = (struct extent_buffer *)page->private;
426 if (page != eb->pages[0]) 426 if (page != eb->pages[0])
427 return 0; 427 return 0;
428
429 found_start = btrfs_header_bytenr(eb); 428 found_start = btrfs_header_bytenr(eb);
430 if (found_start != start) { 429 if (found_start != start) {
431 WARN_ON(1); 430 WARN_ON(1);
@@ -727,11 +726,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
727static void run_one_async_start(struct btrfs_work *work) 726static void run_one_async_start(struct btrfs_work *work)
728{ 727{
729 struct async_submit_bio *async; 728 struct async_submit_bio *async;
729 int ret;
730 730
731 async = container_of(work, struct async_submit_bio, work); 731 async = container_of(work, struct async_submit_bio, work);
732 async->submit_bio_start(async->inode, async->rw, async->bio, 732 ret = async->submit_bio_start(async->inode, async->rw, async->bio,
733 async->mirror_num, async->bio_flags, 733 async->mirror_num, async->bio_flags,
734 async->bio_offset); 734 async->bio_offset);
735 if (ret)
736 async->error = ret;
735} 737}
736 738
737static void run_one_async_done(struct btrfs_work *work) 739static void run_one_async_done(struct btrfs_work *work)
@@ -752,6 +754,12 @@ static void run_one_async_done(struct btrfs_work *work)
752 waitqueue_active(&fs_info->async_submit_wait)) 754 waitqueue_active(&fs_info->async_submit_wait))
753 wake_up(&fs_info->async_submit_wait); 755 wake_up(&fs_info->async_submit_wait);
754 756
757 /* If an error occured we just want to clean up the bio and move on */
758 if (async->error) {
759 bio_endio(async->bio, async->error);
760 return;
761 }
762
755 async->submit_bio_done(async->inode, async->rw, async->bio, 763 async->submit_bio_done(async->inode, async->rw, async->bio,
756 async->mirror_num, async->bio_flags, 764 async->mirror_num, async->bio_flags,
757 async->bio_offset); 765 async->bio_offset);
@@ -793,6 +801,8 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
793 async->bio_flags = bio_flags; 801 async->bio_flags = bio_flags;
794 async->bio_offset = bio_offset; 802 async->bio_offset = bio_offset;
795 803
804 async->error = 0;
805
796 atomic_inc(&fs_info->nr_async_submits); 806 atomic_inc(&fs_info->nr_async_submits);
797 807
798 if (rw & REQ_SYNC) 808 if (rw & REQ_SYNC)
@@ -814,15 +824,18 @@ static int btree_csum_one_bio(struct bio *bio)
814 struct bio_vec *bvec = bio->bi_io_vec; 824 struct bio_vec *bvec = bio->bi_io_vec;
815 int bio_index = 0; 825 int bio_index = 0;
816 struct btrfs_root *root; 826 struct btrfs_root *root;
827 int ret = 0;
817 828
818 WARN_ON(bio->bi_vcnt <= 0); 829 WARN_ON(bio->bi_vcnt <= 0);
819 while (bio_index < bio->bi_vcnt) { 830 while (bio_index < bio->bi_vcnt) {
820 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 831 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
821 csum_dirty_buffer(root, bvec->bv_page); 832 ret = csum_dirty_buffer(root, bvec->bv_page);
833 if (ret)
834 break;
822 bio_index++; 835 bio_index++;
823 bvec++; 836 bvec++;
824 } 837 }
825 return 0; 838 return ret;
826} 839}
827 840
828static int __btree_submit_bio_start(struct inode *inode, int rw, 841static int __btree_submit_bio_start(struct inode *inode, int rw,
@@ -834,8 +847,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
834 * when we're called for a write, we're already in the async 847 * when we're called for a write, we're already in the async
835 * submission context. Just jump into btrfs_map_bio 848 * submission context. Just jump into btrfs_map_bio
836 */ 849 */
837 btree_csum_one_bio(bio); 850 return btree_csum_one_bio(bio);
838 return 0;
839} 851}
840 852
841static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 853static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
@@ -863,7 +875,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
863 */ 875 */
864 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, 876 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
865 bio, 1); 877 bio, 1);
866 BUG_ON(ret); 878 if (ret)
879 return ret;
867 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 880 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
868 mirror_num, 0); 881 mirror_num, 0);
869 } 882 }
@@ -1080,8 +1093,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1080 1093
1081} 1094}
1082 1095
1083int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1096void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1084 struct extent_buffer *buf) 1097 struct extent_buffer *buf)
1085{ 1098{
1086 if (btrfs_header_generation(buf) == 1099 if (btrfs_header_generation(buf) ==
1087 root->fs_info->running_transaction->transid) { 1100 root->fs_info->running_transaction->transid) {
@@ -1091,8 +1104,14 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1091 spin_lock(&root->fs_info->delalloc_lock); 1104 spin_lock(&root->fs_info->delalloc_lock);
1092 if (root->fs_info->dirty_metadata_bytes >= buf->len) 1105 if (root->fs_info->dirty_metadata_bytes >= buf->len)
1093 root->fs_info->dirty_metadata_bytes -= buf->len; 1106 root->fs_info->dirty_metadata_bytes -= buf->len;
1094 else 1107 else {
1095 WARN_ON(1); 1108 spin_unlock(&root->fs_info->delalloc_lock);
1109 btrfs_panic(root->fs_info, -EOVERFLOW,
1110 "Can't clear %lu bytes from "
1111 " dirty_mdatadata_bytes (%lu)",
1112 buf->len,
1113 root->fs_info->dirty_metadata_bytes);
1114 }
1096 spin_unlock(&root->fs_info->delalloc_lock); 1115 spin_unlock(&root->fs_info->delalloc_lock);
1097 } 1116 }
1098 1117
@@ -1100,13 +1119,12 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1100 btrfs_set_lock_blocking(buf); 1119 btrfs_set_lock_blocking(buf);
1101 clear_extent_buffer_dirty(buf); 1120 clear_extent_buffer_dirty(buf);
1102 } 1121 }
1103 return 0;
1104} 1122}
1105 1123
1106static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, 1124static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1107 u32 stripesize, struct btrfs_root *root, 1125 u32 stripesize, struct btrfs_root *root,
1108 struct btrfs_fs_info *fs_info, 1126 struct btrfs_fs_info *fs_info,
1109 u64 objectid) 1127 u64 objectid)
1110{ 1128{
1111 root->node = NULL; 1129 root->node = NULL;
1112 root->commit_root = NULL; 1130 root->commit_root = NULL;
@@ -1158,13 +1176,12 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1158 root->defrag_running = 0; 1176 root->defrag_running = 0;
1159 root->root_key.objectid = objectid; 1177 root->root_key.objectid = objectid;
1160 root->anon_dev = 0; 1178 root->anon_dev = 0;
1161 return 0;
1162} 1179}
1163 1180
1164static int find_and_setup_root(struct btrfs_root *tree_root, 1181static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1165 struct btrfs_fs_info *fs_info, 1182 struct btrfs_fs_info *fs_info,
1166 u64 objectid, 1183 u64 objectid,
1167 struct btrfs_root *root) 1184 struct btrfs_root *root)
1168{ 1185{
1169 int ret; 1186 int ret;
1170 u32 blocksize; 1187 u32 blocksize;
@@ -1177,7 +1194,8 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
1177 &root->root_item, &root->root_key); 1194 &root->root_item, &root->root_key);
1178 if (ret > 0) 1195 if (ret > 0)
1179 return -ENOENT; 1196 return -ENOENT;
1180 BUG_ON(ret); 1197 else if (ret < 0)
1198 return ret;
1181 1199
1182 generation = btrfs_root_generation(&root->root_item); 1200 generation = btrfs_root_generation(&root->root_item);
1183 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); 1201 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
@@ -1346,7 +1364,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1346 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1364 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1347 blocksize, generation); 1365 blocksize, generation);
1348 root->commit_root = btrfs_root_node(root); 1366 root->commit_root = btrfs_root_node(root);
1349 BUG_ON(!root->node); 1367 BUG_ON(!root->node); /* -ENOMEM */
1350out: 1368out:
1351 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { 1369 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1352 root->ref_cows = 1; 1370 root->ref_cows = 1;
@@ -1537,9 +1555,10 @@ static int transaction_kthread(void *arg)
1537 u64 transid; 1555 u64 transid;
1538 unsigned long now; 1556 unsigned long now;
1539 unsigned long delay; 1557 unsigned long delay;
1540 int ret; 1558 bool cannot_commit;
1541 1559
1542 do { 1560 do {
1561 cannot_commit = false;
1543 delay = HZ * 30; 1562 delay = HZ * 30;
1544 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1563 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1545 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1564 mutex_lock(&root->fs_info->transaction_kthread_mutex);
@@ -1561,11 +1580,14 @@ static int transaction_kthread(void *arg)
1561 transid = cur->transid; 1580 transid = cur->transid;
1562 spin_unlock(&root->fs_info->trans_lock); 1581 spin_unlock(&root->fs_info->trans_lock);
1563 1582
1583 /* If the file system is aborted, this will always fail. */
1564 trans = btrfs_join_transaction(root); 1584 trans = btrfs_join_transaction(root);
1565 BUG_ON(IS_ERR(trans)); 1585 if (IS_ERR(trans)) {
1586 cannot_commit = true;
1587 goto sleep;
1588 }
1566 if (transid == trans->transid) { 1589 if (transid == trans->transid) {
1567 ret = btrfs_commit_transaction(trans, root); 1590 btrfs_commit_transaction(trans, root);
1568 BUG_ON(ret);
1569 } else { 1591 } else {
1570 btrfs_end_transaction(trans, root); 1592 btrfs_end_transaction(trans, root);
1571 } 1593 }
@@ -1576,7 +1598,8 @@ sleep:
1576 if (!try_to_freeze()) { 1598 if (!try_to_freeze()) {
1577 set_current_state(TASK_INTERRUPTIBLE); 1599 set_current_state(TASK_INTERRUPTIBLE);
1578 if (!kthread_should_stop() && 1600 if (!kthread_should_stop() &&
1579 !btrfs_transaction_blocked(root->fs_info)) 1601 (!btrfs_transaction_blocked(root->fs_info) ||
1602 cannot_commit))
1580 schedule_timeout(delay); 1603 schedule_timeout(delay);
1581 __set_current_state(TASK_RUNNING); 1604 __set_current_state(TASK_RUNNING);
1582 } 1605 }
@@ -2028,7 +2051,12 @@ int open_ctree(struct super_block *sb,
2028 /* check FS state, whether FS is broken. */ 2051 /* check FS state, whether FS is broken. */
2029 fs_info->fs_state |= btrfs_super_flags(disk_super); 2052 fs_info->fs_state |= btrfs_super_flags(disk_super);
2030 2053
2031 btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); 2054 ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2055 if (ret) {
2056 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2057 err = ret;
2058 goto fail_alloc;
2059 }
2032 2060
2033 /* 2061 /*
2034 * run through our array of backup supers and setup 2062 * run through our array of backup supers and setup
@@ -2218,6 +2246,14 @@ int open_ctree(struct super_block *sb,
2218 goto fail_sb_buffer; 2246 goto fail_sb_buffer;
2219 } 2247 }
2220 2248
2249 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2250 (leafsize != nodesize || sectorsize != nodesize)) {
2251 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2252 "are not allowed for mixed block groups on %s\n",
2253 sb->s_id);
2254 goto fail_sb_buffer;
2255 }
2256
2221 mutex_lock(&fs_info->chunk_mutex); 2257 mutex_lock(&fs_info->chunk_mutex);
2222 ret = btrfs_read_sys_array(tree_root); 2258 ret = btrfs_read_sys_array(tree_root);
2223 mutex_unlock(&fs_info->chunk_mutex); 2259 mutex_unlock(&fs_info->chunk_mutex);
@@ -2237,7 +2273,7 @@ int open_ctree(struct super_block *sb,
2237 chunk_root->node = read_tree_block(chunk_root, 2273 chunk_root->node = read_tree_block(chunk_root,
2238 btrfs_super_chunk_root(disk_super), 2274 btrfs_super_chunk_root(disk_super),
2239 blocksize, generation); 2275 blocksize, generation);
2240 BUG_ON(!chunk_root->node); 2276 BUG_ON(!chunk_root->node); /* -ENOMEM */
2241 if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { 2277 if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2242 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", 2278 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2243 sb->s_id); 2279 sb->s_id);
@@ -2377,21 +2413,31 @@ retry_root_backup:
2377 log_tree_root->node = read_tree_block(tree_root, bytenr, 2413 log_tree_root->node = read_tree_block(tree_root, bytenr,
2378 blocksize, 2414 blocksize,
2379 generation + 1); 2415 generation + 1);
2416 /* returns with log_tree_root freed on success */
2380 ret = btrfs_recover_log_trees(log_tree_root); 2417 ret = btrfs_recover_log_trees(log_tree_root);
2381 BUG_ON(ret); 2418 if (ret) {
2419 btrfs_error(tree_root->fs_info, ret,
2420 "Failed to recover log tree");
2421 free_extent_buffer(log_tree_root->node);
2422 kfree(log_tree_root);
2423 goto fail_trans_kthread;
2424 }
2382 2425
2383 if (sb->s_flags & MS_RDONLY) { 2426 if (sb->s_flags & MS_RDONLY) {
2384 ret = btrfs_commit_super(tree_root); 2427 ret = btrfs_commit_super(tree_root);
2385 BUG_ON(ret); 2428 if (ret)
2429 goto fail_trans_kthread;
2386 } 2430 }
2387 } 2431 }
2388 2432
2389 ret = btrfs_find_orphan_roots(tree_root); 2433 ret = btrfs_find_orphan_roots(tree_root);
2390 BUG_ON(ret); 2434 if (ret)
2435 goto fail_trans_kthread;
2391 2436
2392 if (!(sb->s_flags & MS_RDONLY)) { 2437 if (!(sb->s_flags & MS_RDONLY)) {
2393 ret = btrfs_cleanup_fs_roots(fs_info); 2438 ret = btrfs_cleanup_fs_roots(fs_info);
2394 BUG_ON(ret); 2439 if (ret) {
2440 }
2395 2441
2396 ret = btrfs_recover_relocation(tree_root); 2442 ret = btrfs_recover_relocation(tree_root);
2397 if (ret < 0) { 2443 if (ret < 0) {
@@ -2811,6 +2857,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2811 if (total_errors > max_errors) { 2857 if (total_errors > max_errors) {
2812 printk(KERN_ERR "btrfs: %d errors while writing supers\n", 2858 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2813 total_errors); 2859 total_errors);
2860
2861 /* This shouldn't happen. FUA is masked off if unsupported */
2814 BUG(); 2862 BUG();
2815 } 2863 }
2816 2864
@@ -2827,9 +2875,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2827 } 2875 }
2828 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2876 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2829 if (total_errors > max_errors) { 2877 if (total_errors > max_errors) {
2830 printk(KERN_ERR "btrfs: %d errors while writing supers\n", 2878 btrfs_error(root->fs_info, -EIO,
2831 total_errors); 2879 "%d errors while writing supers", total_errors);
2832 BUG(); 2880 return -EIO;
2833 } 2881 }
2834 return 0; 2882 return 0;
2835} 2883}
@@ -2843,7 +2891,20 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
2843 return ret; 2891 return ret;
2844} 2892}
2845 2893
2846int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) 2894/* Kill all outstanding I/O */
2895void btrfs_abort_devices(struct btrfs_root *root)
2896{
2897 struct list_head *head;
2898 struct btrfs_device *dev;
2899 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2900 head = &root->fs_info->fs_devices->devices;
2901 list_for_each_entry_rcu(dev, head, dev_list) {
2902 blk_abort_queue(dev->bdev->bd_disk->queue);
2903 }
2904 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2905}
2906
2907void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2847{ 2908{
2848 spin_lock(&fs_info->fs_roots_radix_lock); 2909 spin_lock(&fs_info->fs_roots_radix_lock);
2849 radix_tree_delete(&fs_info->fs_roots_radix, 2910 radix_tree_delete(&fs_info->fs_roots_radix,
@@ -2856,7 +2917,6 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2856 __btrfs_remove_free_space_cache(root->free_ino_pinned); 2917 __btrfs_remove_free_space_cache(root->free_ino_pinned);
2857 __btrfs_remove_free_space_cache(root->free_ino_ctl); 2918 __btrfs_remove_free_space_cache(root->free_ino_ctl);
2858 free_fs_root(root); 2919 free_fs_root(root);
2859 return 0;
2860} 2920}
2861 2921
2862static void free_fs_root(struct btrfs_root *root) 2922static void free_fs_root(struct btrfs_root *root)
@@ -2873,7 +2933,7 @@ static void free_fs_root(struct btrfs_root *root)
2873 kfree(root); 2933 kfree(root);
2874} 2934}
2875 2935
2876static int del_fs_roots(struct btrfs_fs_info *fs_info) 2936static void del_fs_roots(struct btrfs_fs_info *fs_info)
2877{ 2937{
2878 int ret; 2938 int ret;
2879 struct btrfs_root *gang[8]; 2939 struct btrfs_root *gang[8];
@@ -2902,7 +2962,6 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)
2902 for (i = 0; i < ret; i++) 2962 for (i = 0; i < ret; i++)
2903 btrfs_free_fs_root(fs_info, gang[i]); 2963 btrfs_free_fs_root(fs_info, gang[i]);
2904 } 2964 }
2905 return 0;
2906} 2965}
2907 2966
2908int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 2967int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
@@ -2951,14 +3010,21 @@ int btrfs_commit_super(struct btrfs_root *root)
2951 if (IS_ERR(trans)) 3010 if (IS_ERR(trans))
2952 return PTR_ERR(trans); 3011 return PTR_ERR(trans);
2953 ret = btrfs_commit_transaction(trans, root); 3012 ret = btrfs_commit_transaction(trans, root);
2954 BUG_ON(ret); 3013 if (ret)
3014 return ret;
2955 /* run commit again to drop the original snapshot */ 3015 /* run commit again to drop the original snapshot */
2956 trans = btrfs_join_transaction(root); 3016 trans = btrfs_join_transaction(root);
2957 if (IS_ERR(trans)) 3017 if (IS_ERR(trans))
2958 return PTR_ERR(trans); 3018 return PTR_ERR(trans);
2959 btrfs_commit_transaction(trans, root); 3019 ret = btrfs_commit_transaction(trans, root);
3020 if (ret)
3021 return ret;
2960 ret = btrfs_write_and_wait_transaction(NULL, root); 3022 ret = btrfs_write_and_wait_transaction(NULL, root);
2961 BUG_ON(ret); 3023 if (ret) {
3024 btrfs_error(root->fs_info, ret,
3025 "Failed to sync btree inode to disk.");
3026 return ret;
3027 }
2962 3028
2963 ret = write_ctree_super(NULL, root, 0); 3029 ret = write_ctree_super(NULL, root, 0);
2964 return ret; 3030 return ret;
@@ -3209,15 +3275,23 @@ out:
3209 return 0; 3275 return 0;
3210} 3276}
3211 3277
3212static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 3278static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3213 int read_only) 3279 int read_only)
3214{ 3280{
3281 if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
3282 printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
3283 return -EINVAL;
3284 }
3285
3215 if (read_only) 3286 if (read_only)
3216 return; 3287 return 0;
3217 3288
3218 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 3289 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3219 printk(KERN_WARNING "warning: mount fs with errors, " 3290 printk(KERN_WARNING "warning: mount fs with errors, "
3220 "running btrfsck is recommended\n"); 3291 "running btrfsck is recommended\n");
3292 }
3293
3294 return 0;
3221} 3295}
3222 3296
3223int btrfs_error_commit_super(struct btrfs_root *root) 3297int btrfs_error_commit_super(struct btrfs_root *root)
@@ -3239,7 +3313,7 @@ int btrfs_error_commit_super(struct btrfs_root *root)
3239 return ret; 3313 return ret;
3240} 3314}
3241 3315
3242static int btrfs_destroy_ordered_operations(struct btrfs_root *root) 3316static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3243{ 3317{
3244 struct btrfs_inode *btrfs_inode; 3318 struct btrfs_inode *btrfs_inode;
3245 struct list_head splice; 3319 struct list_head splice;
@@ -3261,11 +3335,9 @@ static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
3261 3335
3262 spin_unlock(&root->fs_info->ordered_extent_lock); 3336 spin_unlock(&root->fs_info->ordered_extent_lock);
3263 mutex_unlock(&root->fs_info->ordered_operations_mutex); 3337 mutex_unlock(&root->fs_info->ordered_operations_mutex);
3264
3265 return 0;
3266} 3338}
3267 3339
3268static int btrfs_destroy_ordered_extents(struct btrfs_root *root) 3340static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3269{ 3341{
3270 struct list_head splice; 3342 struct list_head splice;
3271 struct btrfs_ordered_extent *ordered; 3343 struct btrfs_ordered_extent *ordered;
@@ -3297,12 +3369,10 @@ static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
3297 } 3369 }
3298 3370
3299 spin_unlock(&root->fs_info->ordered_extent_lock); 3371 spin_unlock(&root->fs_info->ordered_extent_lock);
3300
3301 return 0;
3302} 3372}
3303 3373
3304static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 3374int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3305 struct btrfs_root *root) 3375 struct btrfs_root *root)
3306{ 3376{
3307 struct rb_node *node; 3377 struct rb_node *node;
3308 struct btrfs_delayed_ref_root *delayed_refs; 3378 struct btrfs_delayed_ref_root *delayed_refs;
@@ -3311,6 +3381,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3311 3381
3312 delayed_refs = &trans->delayed_refs; 3382 delayed_refs = &trans->delayed_refs;
3313 3383
3384again:
3314 spin_lock(&delayed_refs->lock); 3385 spin_lock(&delayed_refs->lock);
3315 if (delayed_refs->num_entries == 0) { 3386 if (delayed_refs->num_entries == 0) {
3316 spin_unlock(&delayed_refs->lock); 3387 spin_unlock(&delayed_refs->lock);
@@ -3332,6 +3403,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3332 struct btrfs_delayed_ref_head *head; 3403 struct btrfs_delayed_ref_head *head;
3333 3404
3334 head = btrfs_delayed_node_to_head(ref); 3405 head = btrfs_delayed_node_to_head(ref);
3406 spin_unlock(&delayed_refs->lock);
3335 mutex_lock(&head->mutex); 3407 mutex_lock(&head->mutex);
3336 kfree(head->extent_op); 3408 kfree(head->extent_op);
3337 delayed_refs->num_heads--; 3409 delayed_refs->num_heads--;
@@ -3339,8 +3411,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3339 delayed_refs->num_heads_ready--; 3411 delayed_refs->num_heads_ready--;
3340 list_del_init(&head->cluster); 3412 list_del_init(&head->cluster);
3341 mutex_unlock(&head->mutex); 3413 mutex_unlock(&head->mutex);
3414 btrfs_put_delayed_ref(ref);
3415 goto again;
3342 } 3416 }
3343
3344 spin_unlock(&delayed_refs->lock); 3417 spin_unlock(&delayed_refs->lock);
3345 btrfs_put_delayed_ref(ref); 3418 btrfs_put_delayed_ref(ref);
3346 3419
@@ -3353,7 +3426,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3353 return ret; 3426 return ret;
3354} 3427}
3355 3428
3356static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) 3429static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3357{ 3430{
3358 struct btrfs_pending_snapshot *snapshot; 3431 struct btrfs_pending_snapshot *snapshot;
3359 struct list_head splice; 3432 struct list_head splice;
@@ -3371,11 +3444,9 @@ static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3371 3444
3372 kfree(snapshot); 3445 kfree(snapshot);
3373 } 3446 }
3374
3375 return 0;
3376} 3447}
3377 3448
3378static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 3449static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3379{ 3450{
3380 struct btrfs_inode *btrfs_inode; 3451 struct btrfs_inode *btrfs_inode;
3381 struct list_head splice; 3452 struct list_head splice;
@@ -3395,8 +3466,6 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3395 } 3466 }
3396 3467
3397 spin_unlock(&root->fs_info->delalloc_lock); 3468 spin_unlock(&root->fs_info->delalloc_lock);
3398
3399 return 0;
3400} 3469}
3401 3470
3402static int btrfs_destroy_marked_extents(struct btrfs_root *root, 3471static int btrfs_destroy_marked_extents(struct btrfs_root *root,
@@ -3487,13 +3556,43 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3487 return 0; 3556 return 0;
3488} 3557}
3489 3558
3490static int btrfs_cleanup_transaction(struct btrfs_root *root) 3559void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3560 struct btrfs_root *root)
3561{
3562 btrfs_destroy_delayed_refs(cur_trans, root);
3563 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3564 cur_trans->dirty_pages.dirty_bytes);
3565
3566 /* FIXME: cleanup wait for commit */
3567 cur_trans->in_commit = 1;
3568 cur_trans->blocked = 1;
3569 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3570 wake_up(&root->fs_info->transaction_blocked_wait);
3571
3572 cur_trans->blocked = 0;
3573 if (waitqueue_active(&root->fs_info->transaction_wait))
3574 wake_up(&root->fs_info->transaction_wait);
3575
3576 cur_trans->commit_done = 1;
3577 if (waitqueue_active(&cur_trans->commit_wait))
3578 wake_up(&cur_trans->commit_wait);
3579
3580 btrfs_destroy_pending_snapshots(cur_trans);
3581
3582 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3583 EXTENT_DIRTY);
3584
3585 /*
3586 memset(cur_trans, 0, sizeof(*cur_trans));
3587 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3588 */
3589}
3590
3591int btrfs_cleanup_transaction(struct btrfs_root *root)
3491{ 3592{
3492 struct btrfs_transaction *t; 3593 struct btrfs_transaction *t;
3493 LIST_HEAD(list); 3594 LIST_HEAD(list);
3494 3595
3495 WARN_ON(1);
3496
3497 mutex_lock(&root->fs_info->transaction_kthread_mutex); 3596 mutex_lock(&root->fs_info->transaction_kthread_mutex);
3498 3597
3499 spin_lock(&root->fs_info->trans_lock); 3598 spin_lock(&root->fs_info->trans_lock);
@@ -3558,6 +3657,17 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3558 return 0; 3657 return 0;
3559} 3658}
3560 3659
3660static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
3661 u64 start, u64 end,
3662 struct extent_state *state)
3663{
3664 struct super_block *sb = page->mapping->host->i_sb;
3665 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3666 btrfs_error(fs_info, -EIO,
3667 "Error occured while writing out btree at %llu", start);
3668 return -EIO;
3669}
3670
3561static struct extent_io_ops btree_extent_io_ops = { 3671static struct extent_io_ops btree_extent_io_ops = {
3562 .write_cache_pages_lock_hook = btree_lock_page_hook, 3672 .write_cache_pages_lock_hook = btree_lock_page_hook,
3563 .readpage_end_io_hook = btree_readpage_end_io_hook, 3673 .readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3565,4 +3675,5 @@ static struct extent_io_ops btree_extent_io_ops = {
3565 .submit_bio_hook = btree_submit_bio_hook, 3675 .submit_bio_hook = btree_submit_bio_hook,
3566 /* note we're sharing with inode.c for the merge bio hook */ 3676 /* note we're sharing with inode.c for the merge bio hook */
3567 .merge_bio_hook = btrfs_merge_bio_hook, 3677 .merge_bio_hook = btrfs_merge_bio_hook,
3678 .writepage_io_failed_hook = btree_writepage_io_failed_hook,
3568}; 3679};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e4bc4741319b..a7ace1a2dd12 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -44,8 +44,8 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
44 int mirror_num, struct extent_buffer **eb); 44 int mirror_num, struct extent_buffer **eb);
45struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, 45struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
46 u64 bytenr, u32 blocksize); 46 u64 bytenr, u32 blocksize);
47int clean_tree_block(struct btrfs_trans_handle *trans, 47void clean_tree_block(struct btrfs_trans_handle *trans,
48 struct btrfs_root *root, struct extent_buffer *buf); 48 struct btrfs_root *root, struct extent_buffer *buf);
49int open_ctree(struct super_block *sb, 49int open_ctree(struct super_block *sb,
50 struct btrfs_fs_devices *fs_devices, 50 struct btrfs_fs_devices *fs_devices,
51 char *options); 51 char *options);
@@ -64,7 +64,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
64int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); 64int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
65void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 65void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
66void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 66void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
67int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); 67void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
68void btrfs_mark_buffer_dirty(struct extent_buffer *buf); 68void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
69int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); 69int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
70int btrfs_set_buffer_uptodate(struct extent_buffer *buf); 70int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
@@ -85,6 +85,10 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
85 struct btrfs_fs_info *fs_info); 85 struct btrfs_fs_info *fs_info);
86int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 86int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
87 struct btrfs_root *root); 87 struct btrfs_root *root);
88int btrfs_cleanup_transaction(struct btrfs_root *root);
89void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
90 struct btrfs_root *root);
91void btrfs_abort_devices(struct btrfs_root *root);
88 92
89#ifdef CONFIG_DEBUG_LOCK_ALLOC 93#ifdef CONFIG_DEBUG_LOCK_ALLOC
90void btrfs_init_lockdep(void); 94void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 5f77166fd01c..e887ee62b6d4 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
193 if (ret < 0) 193 if (ret < 0)
194 goto fail; 194 goto fail;
195 195
196 BUG_ON(ret == 0); 196 BUG_ON(ret == 0); /* Key with offset of -1 found */
197 if (path->slots[0] == 0) { 197 if (path->slots[0] == 0) {
198 ret = -ENOENT; 198 ret = -ENOENT;
199 goto fail; 199 goto fail;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1b831ac4c079..8b304e3537c4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -245,7 +245,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
245 cache->bytes_super += stripe_len; 245 cache->bytes_super += stripe_len;
246 ret = add_excluded_extent(root, cache->key.objectid, 246 ret = add_excluded_extent(root, cache->key.objectid,
247 stripe_len); 247 stripe_len);
248 BUG_ON(ret); 248 BUG_ON(ret); /* -ENOMEM */
249 } 249 }
250 250
251 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 251 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -253,13 +253,13 @@ static int exclude_super_stripes(struct btrfs_root *root,
253 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 253 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
254 cache->key.objectid, bytenr, 254 cache->key.objectid, bytenr,
255 0, &logical, &nr, &stripe_len); 255 0, &logical, &nr, &stripe_len);
256 BUG_ON(ret); 256 BUG_ON(ret); /* -ENOMEM */
257 257
258 while (nr--) { 258 while (nr--) {
259 cache->bytes_super += stripe_len; 259 cache->bytes_super += stripe_len;
260 ret = add_excluded_extent(root, logical[nr], 260 ret = add_excluded_extent(root, logical[nr],
261 stripe_len); 261 stripe_len);
262 BUG_ON(ret); 262 BUG_ON(ret); /* -ENOMEM */
263 } 263 }
264 264
265 kfree(logical); 265 kfree(logical);
@@ -321,7 +321,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
321 total_added += size; 321 total_added += size;
322 ret = btrfs_add_free_space(block_group, start, 322 ret = btrfs_add_free_space(block_group, start,
323 size); 323 size);
324 BUG_ON(ret); 324 BUG_ON(ret); /* -ENOMEM or logic error */
325 start = extent_end + 1; 325 start = extent_end + 1;
326 } else { 326 } else {
327 break; 327 break;
@@ -332,7 +332,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
332 size = end - start; 332 size = end - start;
333 total_added += size; 333 total_added += size;
334 ret = btrfs_add_free_space(block_group, start, size); 334 ret = btrfs_add_free_space(block_group, start, size);
335 BUG_ON(ret); 335 BUG_ON(ret); /* -ENOMEM or logic error */
336 } 336 }
337 337
338 return total_added; 338 return total_added;
@@ -474,7 +474,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
474 int ret = 0; 474 int ret = 0;
475 475
476 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 476 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
477 BUG_ON(!caching_ctl); 477 if (!caching_ctl)
478 return -ENOMEM;
478 479
479 INIT_LIST_HEAD(&caching_ctl->list); 480 INIT_LIST_HEAD(&caching_ctl->list);
480 mutex_init(&caching_ctl->mutex); 481 mutex_init(&caching_ctl->mutex);
@@ -982,7 +983,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
982 ret = btrfs_next_leaf(root, path); 983 ret = btrfs_next_leaf(root, path);
983 if (ret < 0) 984 if (ret < 0)
984 return ret; 985 return ret;
985 BUG_ON(ret > 0); 986 BUG_ON(ret > 0); /* Corruption */
986 leaf = path->nodes[0]; 987 leaf = path->nodes[0];
987 } 988 }
988 btrfs_item_key_to_cpu(leaf, &found_key, 989 btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1008,9 +1009,9 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1008 new_size + extra_size, 1); 1009 new_size + extra_size, 1);
1009 if (ret < 0) 1010 if (ret < 0)
1010 return ret; 1011 return ret;
1011 BUG_ON(ret); 1012 BUG_ON(ret); /* Corruption */
1012 1013
1013 ret = btrfs_extend_item(trans, root, path, new_size); 1014 btrfs_extend_item(trans, root, path, new_size);
1014 1015
1015 leaf = path->nodes[0]; 1016 leaf = path->nodes[0];
1016 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1017 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1478,7 +1479,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478 err = ret; 1479 err = ret;
1479 goto out; 1480 goto out;
1480 } 1481 }
1481 BUG_ON(ret); 1482 if (ret && !insert) {
1483 err = -ENOENT;
1484 goto out;
1485 }
1486 BUG_ON(ret); /* Corruption */
1482 1487
1483 leaf = path->nodes[0]; 1488 leaf = path->nodes[0];
1484 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1489 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -1592,13 +1597,13 @@ out:
1592 * helper to add new inline back ref 1597 * helper to add new inline back ref
1593 */ 1598 */
1594static noinline_for_stack 1599static noinline_for_stack
1595int setup_inline_extent_backref(struct btrfs_trans_handle *trans, 1600void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1596 struct btrfs_root *root, 1601 struct btrfs_root *root,
1597 struct btrfs_path *path, 1602 struct btrfs_path *path,
1598 struct btrfs_extent_inline_ref *iref, 1603 struct btrfs_extent_inline_ref *iref,
1599 u64 parent, u64 root_objectid, 1604 u64 parent, u64 root_objectid,
1600 u64 owner, u64 offset, int refs_to_add, 1605 u64 owner, u64 offset, int refs_to_add,
1601 struct btrfs_delayed_extent_op *extent_op) 1606 struct btrfs_delayed_extent_op *extent_op)
1602{ 1607{
1603 struct extent_buffer *leaf; 1608 struct extent_buffer *leaf;
1604 struct btrfs_extent_item *ei; 1609 struct btrfs_extent_item *ei;
@@ -1608,7 +1613,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1608 u64 refs; 1613 u64 refs;
1609 int size; 1614 int size;
1610 int type; 1615 int type;
1611 int ret;
1612 1616
1613 leaf = path->nodes[0]; 1617 leaf = path->nodes[0];
1614 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1618 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1617,7 +1621,7 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1617 type = extent_ref_type(parent, owner); 1621 type = extent_ref_type(parent, owner);
1618 size = btrfs_extent_inline_ref_size(type); 1622 size = btrfs_extent_inline_ref_size(type);
1619 1623
1620 ret = btrfs_extend_item(trans, root, path, size); 1624 btrfs_extend_item(trans, root, path, size);
1621 1625
1622 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1626 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1623 refs = btrfs_extent_refs(leaf, ei); 1627 refs = btrfs_extent_refs(leaf, ei);
@@ -1652,7 +1656,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1652 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1656 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1653 } 1657 }
1654 btrfs_mark_buffer_dirty(leaf); 1658 btrfs_mark_buffer_dirty(leaf);
1655 return 0;
1656} 1659}
1657 1660
1658static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1661static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1687,12 +1690,12 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1687 * helper to update/remove inline back ref 1690 * helper to update/remove inline back ref
1688 */ 1691 */
1689static noinline_for_stack 1692static noinline_for_stack
1690int update_inline_extent_backref(struct btrfs_trans_handle *trans, 1693void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1691 struct btrfs_root *root, 1694 struct btrfs_root *root,
1692 struct btrfs_path *path, 1695 struct btrfs_path *path,
1693 struct btrfs_extent_inline_ref *iref, 1696 struct btrfs_extent_inline_ref *iref,
1694 int refs_to_mod, 1697 int refs_to_mod,
1695 struct btrfs_delayed_extent_op *extent_op) 1698 struct btrfs_delayed_extent_op *extent_op)
1696{ 1699{
1697 struct extent_buffer *leaf; 1700 struct extent_buffer *leaf;
1698 struct btrfs_extent_item *ei; 1701 struct btrfs_extent_item *ei;
@@ -1703,7 +1706,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1703 u32 item_size; 1706 u32 item_size;
1704 int size; 1707 int size;
1705 int type; 1708 int type;
1706 int ret;
1707 u64 refs; 1709 u64 refs;
1708 1710
1709 leaf = path->nodes[0]; 1711 leaf = path->nodes[0];
@@ -1745,10 +1747,9 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1745 memmove_extent_buffer(leaf, ptr, ptr + size, 1747 memmove_extent_buffer(leaf, ptr, ptr + size,
1746 end - ptr - size); 1748 end - ptr - size);
1747 item_size -= size; 1749 item_size -= size;
1748 ret = btrfs_truncate_item(trans, root, path, item_size, 1); 1750 btrfs_truncate_item(trans, root, path, item_size, 1);
1749 } 1751 }
1750 btrfs_mark_buffer_dirty(leaf); 1752 btrfs_mark_buffer_dirty(leaf);
1751 return 0;
1752} 1753}
1753 1754
1754static noinline_for_stack 1755static noinline_for_stack
@@ -1768,13 +1769,13 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1768 root_objectid, owner, offset, 1); 1769 root_objectid, owner, offset, 1);
1769 if (ret == 0) { 1770 if (ret == 0) {
1770 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); 1771 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1771 ret = update_inline_extent_backref(trans, root, path, iref, 1772 update_inline_extent_backref(trans, root, path, iref,
1772 refs_to_add, extent_op); 1773 refs_to_add, extent_op);
1773 } else if (ret == -ENOENT) { 1774 } else if (ret == -ENOENT) {
1774 ret = setup_inline_extent_backref(trans, root, path, iref, 1775 setup_inline_extent_backref(trans, root, path, iref, parent,
1775 parent, root_objectid, 1776 root_objectid, owner, offset,
1776 owner, offset, refs_to_add, 1777 refs_to_add, extent_op);
1777 extent_op); 1778 ret = 0;
1778 } 1779 }
1779 return ret; 1780 return ret;
1780} 1781}
@@ -1804,12 +1805,12 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1804 struct btrfs_extent_inline_ref *iref, 1805 struct btrfs_extent_inline_ref *iref,
1805 int refs_to_drop, int is_data) 1806 int refs_to_drop, int is_data)
1806{ 1807{
1807 int ret; 1808 int ret = 0;
1808 1809
1809 BUG_ON(!is_data && refs_to_drop != 1); 1810 BUG_ON(!is_data && refs_to_drop != 1);
1810 if (iref) { 1811 if (iref) {
1811 ret = update_inline_extent_backref(trans, root, path, iref, 1812 update_inline_extent_backref(trans, root, path, iref,
1812 -refs_to_drop, NULL); 1813 -refs_to_drop, NULL);
1813 } else if (is_data) { 1814 } else if (is_data) {
1814 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1815 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1815 } else { 1816 } else {
@@ -1835,6 +1836,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1835 /* Tell the block device(s) that the sectors can be discarded */ 1836 /* Tell the block device(s) that the sectors can be discarded */
1836 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD, 1837 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1837 bytenr, &num_bytes, &bbio, 0); 1838 bytenr, &num_bytes, &bbio, 0);
1839 /* Error condition is -ENOMEM */
1838 if (!ret) { 1840 if (!ret) {
1839 struct btrfs_bio_stripe *stripe = bbio->stripes; 1841 struct btrfs_bio_stripe *stripe = bbio->stripes;
1840 int i; 1842 int i;
@@ -1850,7 +1852,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1850 if (!ret) 1852 if (!ret)
1851 discarded_bytes += stripe->length; 1853 discarded_bytes += stripe->length;
1852 else if (ret != -EOPNOTSUPP) 1854 else if (ret != -EOPNOTSUPP)
1853 break; 1855 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1854 1856
1855 /* 1857 /*
1856 * Just in case we get back EOPNOTSUPP for some reason, 1858 * Just in case we get back EOPNOTSUPP for some reason,
@@ -1869,6 +1871,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1869 return ret; 1871 return ret;
1870} 1872}
1871 1873
1874/* Can return -ENOMEM */
1872int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1875int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1873 struct btrfs_root *root, 1876 struct btrfs_root *root,
1874 u64 bytenr, u64 num_bytes, u64 parent, 1877 u64 bytenr, u64 num_bytes, u64 parent,
@@ -1944,7 +1947,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1944 ret = insert_extent_backref(trans, root->fs_info->extent_root, 1947 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1945 path, bytenr, parent, root_objectid, 1948 path, bytenr, parent, root_objectid,
1946 owner, offset, refs_to_add); 1949 owner, offset, refs_to_add);
1947 BUG_ON(ret); 1950 if (ret)
1951 btrfs_abort_transaction(trans, root, ret);
1948out: 1952out:
1949 btrfs_free_path(path); 1953 btrfs_free_path(path);
1950 return err; 1954 return err;
@@ -2031,6 +2035,9 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2031 int ret; 2035 int ret;
2032 int err = 0; 2036 int err = 0;
2033 2037
2038 if (trans->aborted)
2039 return 0;
2040
2034 path = btrfs_alloc_path(); 2041 path = btrfs_alloc_path();
2035 if (!path) 2042 if (!path)
2036 return -ENOMEM; 2043 return -ENOMEM;
@@ -2128,7 +2135,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2128 struct btrfs_delayed_extent_op *extent_op, 2135 struct btrfs_delayed_extent_op *extent_op,
2129 int insert_reserved) 2136 int insert_reserved)
2130{ 2137{
2131 int ret; 2138 int ret = 0;
2139
2140 if (trans->aborted)
2141 return 0;
2142
2132 if (btrfs_delayed_ref_is_head(node)) { 2143 if (btrfs_delayed_ref_is_head(node)) {
2133 struct btrfs_delayed_ref_head *head; 2144 struct btrfs_delayed_ref_head *head;
2134 /* 2145 /*
@@ -2146,11 +2157,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2146 ret = btrfs_del_csums(trans, root, 2157 ret = btrfs_del_csums(trans, root,
2147 node->bytenr, 2158 node->bytenr,
2148 node->num_bytes); 2159 node->num_bytes);
2149 BUG_ON(ret);
2150 } 2160 }
2151 } 2161 }
2152 mutex_unlock(&head->mutex); 2162 mutex_unlock(&head->mutex);
2153 return 0; 2163 return ret;
2154 } 2164 }
2155 2165
2156 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 2166 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
@@ -2197,6 +2207,10 @@ again:
2197 return NULL; 2207 return NULL;
2198} 2208}
2199 2209
2210/*
2211 * Returns 0 on success or if called with an already aborted transaction.
2212 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2213 */
2200static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, 2214static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2201 struct btrfs_root *root, 2215 struct btrfs_root *root,
2202 struct list_head *cluster) 2216 struct list_head *cluster)
@@ -2285,9 +2299,13 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2285 2299
2286 ret = run_delayed_extent_op(trans, root, 2300 ret = run_delayed_extent_op(trans, root,
2287 ref, extent_op); 2301 ref, extent_op);
2288 BUG_ON(ret);
2289 kfree(extent_op); 2302 kfree(extent_op);
2290 2303
2304 if (ret) {
2305 printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2306 return ret;
2307 }
2308
2291 goto next; 2309 goto next;
2292 } 2310 }
2293 2311
@@ -2308,11 +2326,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2308 2326
2309 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2327 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2310 must_insert_reserved); 2328 must_insert_reserved);
2311 BUG_ON(ret);
2312 2329
2313 btrfs_put_delayed_ref(ref); 2330 btrfs_put_delayed_ref(ref);
2314 kfree(extent_op); 2331 kfree(extent_op);
2315 count++; 2332 count++;
2333
2334 if (ret) {
2335 printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2336 return ret;
2337 }
2338
2316next: 2339next:
2317 do_chunk_alloc(trans, root->fs_info->extent_root, 2340 do_chunk_alloc(trans, root->fs_info->extent_root,
2318 2 * 1024 * 1024, 2341 2 * 1024 * 1024,
@@ -2347,6 +2370,9 @@ static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2347 * 0, which means to process everything in the tree at the start 2370 * 0, which means to process everything in the tree at the start
2348 * of the run (but not newly added entries), or it can be some target 2371 * of the run (but not newly added entries), or it can be some target
2349 * number you'd like to process. 2372 * number you'd like to process.
2373 *
2374 * Returns 0 on success or if called with an aborted transaction
2375 * Returns <0 on error and aborts the transaction
2350 */ 2376 */
2351int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2377int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2352 struct btrfs_root *root, unsigned long count) 2378 struct btrfs_root *root, unsigned long count)
@@ -2362,6 +2388,10 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2362 unsigned long num_refs = 0; 2388 unsigned long num_refs = 0;
2363 int consider_waiting; 2389 int consider_waiting;
2364 2390
2391 /* We'll clean this up in btrfs_cleanup_transaction */
2392 if (trans->aborted)
2393 return 0;
2394
2365 if (root == root->fs_info->extent_root) 2395 if (root == root->fs_info->extent_root)
2366 root = root->fs_info->tree_root; 2396 root = root->fs_info->tree_root;
2367 2397
@@ -2419,7 +2449,11 @@ again:
2419 } 2449 }
2420 2450
2421 ret = run_clustered_refs(trans, root, &cluster); 2451 ret = run_clustered_refs(trans, root, &cluster);
2422 BUG_ON(ret < 0); 2452 if (ret < 0) {
2453 spin_unlock(&delayed_refs->lock);
2454 btrfs_abort_transaction(trans, root, ret);
2455 return ret;
2456 }
2423 2457
2424 count -= min_t(unsigned long, ret, count); 2458 count -= min_t(unsigned long, ret, count);
2425 2459
@@ -2584,7 +2618,7 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2584 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2618 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2585 if (ret < 0) 2619 if (ret < 0)
2586 goto out; 2620 goto out;
2587 BUG_ON(ret == 0); 2621 BUG_ON(ret == 0); /* Corruption */
2588 2622
2589 ret = -ENOENT; 2623 ret = -ENOENT;
2590 if (path->slots[0] == 0) 2624 if (path->slots[0] == 0)
@@ -2738,7 +2772,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2738 } 2772 }
2739 return 0; 2773 return 0;
2740fail: 2774fail:
2741 BUG();
2742 return ret; 2775 return ret;
2743} 2776}
2744 2777
@@ -2767,7 +2800,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
2767 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 2800 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2768 if (ret < 0) 2801 if (ret < 0)
2769 goto fail; 2802 goto fail;
2770 BUG_ON(ret); 2803 BUG_ON(ret); /* Corruption */
2771 2804
2772 leaf = path->nodes[0]; 2805 leaf = path->nodes[0];
2773 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2806 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -2775,8 +2808,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
2775 btrfs_mark_buffer_dirty(leaf); 2808 btrfs_mark_buffer_dirty(leaf);
2776 btrfs_release_path(path); 2809 btrfs_release_path(path);
2777fail: 2810fail:
2778 if (ret) 2811 if (ret) {
2812 btrfs_abort_transaction(trans, root, ret);
2779 return ret; 2813 return ret;
2814 }
2780 return 0; 2815 return 0;
2781 2816
2782} 2817}
@@ -2949,7 +2984,8 @@ again:
2949 if (last == 0) { 2984 if (last == 0) {
2950 err = btrfs_run_delayed_refs(trans, root, 2985 err = btrfs_run_delayed_refs(trans, root,
2951 (unsigned long)-1); 2986 (unsigned long)-1);
2952 BUG_ON(err); 2987 if (err) /* File system offline */
2988 goto out;
2953 } 2989 }
2954 2990
2955 cache = btrfs_lookup_first_block_group(root->fs_info, last); 2991 cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -2976,7 +3012,9 @@ again:
2976 last = cache->key.objectid + cache->key.offset; 3012 last = cache->key.objectid + cache->key.offset;
2977 3013
2978 err = write_one_cache_group(trans, root, path, cache); 3014 err = write_one_cache_group(trans, root, path, cache);
2979 BUG_ON(err); 3015 if (err) /* File system offline */
3016 goto out;
3017
2980 btrfs_put_block_group(cache); 3018 btrfs_put_block_group(cache);
2981 } 3019 }
2982 3020
@@ -2989,7 +3027,8 @@ again:
2989 if (last == 0) { 3027 if (last == 0) {
2990 err = btrfs_run_delayed_refs(trans, root, 3028 err = btrfs_run_delayed_refs(trans, root,
2991 (unsigned long)-1); 3029 (unsigned long)-1);
2992 BUG_ON(err); 3030 if (err) /* File system offline */
3031 goto out;
2993 } 3032 }
2994 3033
2995 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3034 cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -3014,20 +3053,21 @@ again:
3014 continue; 3053 continue;
3015 } 3054 }
3016 3055
3017 btrfs_write_out_cache(root, trans, cache, path); 3056 err = btrfs_write_out_cache(root, trans, cache, path);
3018 3057
3019 /* 3058 /*
3020 * If we didn't have an error then the cache state is still 3059 * If we didn't have an error then the cache state is still
3021 * NEED_WRITE, so we can set it to WRITTEN. 3060 * NEED_WRITE, so we can set it to WRITTEN.
3022 */ 3061 */
3023 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 3062 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3024 cache->disk_cache_state = BTRFS_DC_WRITTEN; 3063 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3025 last = cache->key.objectid + cache->key.offset; 3064 last = cache->key.objectid + cache->key.offset;
3026 btrfs_put_block_group(cache); 3065 btrfs_put_block_group(cache);
3027 } 3066 }
3067out:
3028 3068
3029 btrfs_free_path(path); 3069 btrfs_free_path(path);
3030 return 0; 3070 return err;
3031} 3071}
3032 3072
3033int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 3073int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -3411,9 +3451,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3411 if (!space_info) { 3451 if (!space_info) {
3412 ret = update_space_info(extent_root->fs_info, flags, 3452 ret = update_space_info(extent_root->fs_info, flags,
3413 0, 0, &space_info); 3453 0, 0, &space_info);
3414 BUG_ON(ret); 3454 BUG_ON(ret); /* -ENOMEM */
3415 } 3455 }
3416 BUG_ON(!space_info); 3456 BUG_ON(!space_info); /* Logic error */
3417 3457
3418again: 3458again:
3419 spin_lock(&space_info->lock); 3459 spin_lock(&space_info->lock);
@@ -3678,8 +3718,10 @@ again:
3678 ret = wait_event_interruptible(space_info->wait, 3718 ret = wait_event_interruptible(space_info->wait,
3679 !space_info->flush); 3719 !space_info->flush);
3680 /* Must have been interrupted, return */ 3720 /* Must have been interrupted, return */
3681 if (ret) 3721 if (ret) {
3722 printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
3682 return -EINTR; 3723 return -EINTR;
3724 }
3683 3725
3684 spin_lock(&space_info->lock); 3726 spin_lock(&space_info->lock);
3685 } 3727 }
@@ -3836,8 +3878,9 @@ out:
3836 return ret; 3878 return ret;
3837} 3879}
3838 3880
3839static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, 3881static struct btrfs_block_rsv *get_block_rsv(
3840 struct btrfs_root *root) 3882 const struct btrfs_trans_handle *trans,
3883 const struct btrfs_root *root)
3841{ 3884{
3842 struct btrfs_block_rsv *block_rsv = NULL; 3885 struct btrfs_block_rsv *block_rsv = NULL;
3843 3886
@@ -4204,6 +4247,7 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4204 trans->bytes_reserved = 0; 4247 trans->bytes_reserved = 0;
4205} 4248}
4206 4249
4250/* Can only return 0 or -ENOSPC */
4207int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 4251int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4208 struct inode *inode) 4252 struct inode *inode)
4209{ 4253{
@@ -4540,7 +4584,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
4540 while (total) { 4584 while (total) {
4541 cache = btrfs_lookup_block_group(info, bytenr); 4585 cache = btrfs_lookup_block_group(info, bytenr);
4542 if (!cache) 4586 if (!cache)
4543 return -1; 4587 return -ENOENT;
4544 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | 4588 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4545 BTRFS_BLOCK_GROUP_RAID1 | 4589 BTRFS_BLOCK_GROUP_RAID1 |
4546 BTRFS_BLOCK_GROUP_RAID10)) 4590 BTRFS_BLOCK_GROUP_RAID10))
@@ -4643,7 +4687,7 @@ int btrfs_pin_extent(struct btrfs_root *root,
4643 struct btrfs_block_group_cache *cache; 4687 struct btrfs_block_group_cache *cache;
4644 4688
4645 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 4689 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4646 BUG_ON(!cache); 4690 BUG_ON(!cache); /* Logic error */
4647 4691
4648 pin_down_extent(root, cache, bytenr, num_bytes, reserved); 4692 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4649 4693
@@ -4661,7 +4705,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4661 struct btrfs_block_group_cache *cache; 4705 struct btrfs_block_group_cache *cache;
4662 4706
4663 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 4707 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4664 BUG_ON(!cache); 4708 BUG_ON(!cache); /* Logic error */
4665 4709
4666 /* 4710 /*
4667 * pull in the free space cache (if any) so that our pin 4711 * pull in the free space cache (if any) so that our pin
@@ -4706,6 +4750,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4706{ 4750{
4707 struct btrfs_space_info *space_info = cache->space_info; 4751 struct btrfs_space_info *space_info = cache->space_info;
4708 int ret = 0; 4752 int ret = 0;
4753
4709 spin_lock(&space_info->lock); 4754 spin_lock(&space_info->lock);
4710 spin_lock(&cache->lock); 4755 spin_lock(&cache->lock);
4711 if (reserve != RESERVE_FREE) { 4756 if (reserve != RESERVE_FREE) {
@@ -4734,7 +4779,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4734 return ret; 4779 return ret;
4735} 4780}
4736 4781
4737int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 4782void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4738 struct btrfs_root *root) 4783 struct btrfs_root *root)
4739{ 4784{
4740 struct btrfs_fs_info *fs_info = root->fs_info; 4785 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4764,7 +4809,6 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4764 up_write(&fs_info->extent_commit_sem); 4809 up_write(&fs_info->extent_commit_sem);
4765 4810
4766 update_global_block_rsv(fs_info); 4811 update_global_block_rsv(fs_info);
4767 return 0;
4768} 4812}
4769 4813
4770static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 4814static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
@@ -4779,7 +4823,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4779 if (cache) 4823 if (cache)
4780 btrfs_put_block_group(cache); 4824 btrfs_put_block_group(cache);
4781 cache = btrfs_lookup_block_group(fs_info, start); 4825 cache = btrfs_lookup_block_group(fs_info, start);
4782 BUG_ON(!cache); 4826 BUG_ON(!cache); /* Logic error */
4783 } 4827 }
4784 4828
4785 len = cache->key.objectid + cache->key.offset - start; 4829 len = cache->key.objectid + cache->key.offset - start;
@@ -4816,6 +4860,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4816 u64 end; 4860 u64 end;
4817 int ret; 4861 int ret;
4818 4862
4863 if (trans->aborted)
4864 return 0;
4865
4819 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 4866 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4820 unpin = &fs_info->freed_extents[1]; 4867 unpin = &fs_info->freed_extents[1];
4821 else 4868 else
@@ -4901,7 +4948,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4901 ret = remove_extent_backref(trans, extent_root, path, 4948 ret = remove_extent_backref(trans, extent_root, path,
4902 NULL, refs_to_drop, 4949 NULL, refs_to_drop,
4903 is_data); 4950 is_data);
4904 BUG_ON(ret); 4951 if (ret)
4952 goto abort;
4905 btrfs_release_path(path); 4953 btrfs_release_path(path);
4906 path->leave_spinning = 1; 4954 path->leave_spinning = 1;
4907 4955
@@ -4919,10 +4967,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4919 btrfs_print_leaf(extent_root, 4967 btrfs_print_leaf(extent_root,
4920 path->nodes[0]); 4968 path->nodes[0]);
4921 } 4969 }
4922 BUG_ON(ret); 4970 if (ret < 0)
4971 goto abort;
4923 extent_slot = path->slots[0]; 4972 extent_slot = path->slots[0];
4924 } 4973 }
4925 } else { 4974 } else if (ret == -ENOENT) {
4926 btrfs_print_leaf(extent_root, path->nodes[0]); 4975 btrfs_print_leaf(extent_root, path->nodes[0]);
4927 WARN_ON(1); 4976 WARN_ON(1);
4928 printk(KERN_ERR "btrfs unable to find ref byte nr %llu " 4977 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
@@ -4932,6 +4981,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4932 (unsigned long long)root_objectid, 4981 (unsigned long long)root_objectid,
4933 (unsigned long long)owner_objectid, 4982 (unsigned long long)owner_objectid,
4934 (unsigned long long)owner_offset); 4983 (unsigned long long)owner_offset);
4984 } else {
4985 goto abort;
4935 } 4986 }
4936 4987
4937 leaf = path->nodes[0]; 4988 leaf = path->nodes[0];
@@ -4941,7 +4992,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4941 BUG_ON(found_extent || extent_slot != path->slots[0]); 4992 BUG_ON(found_extent || extent_slot != path->slots[0]);
4942 ret = convert_extent_item_v0(trans, extent_root, path, 4993 ret = convert_extent_item_v0(trans, extent_root, path,
4943 owner_objectid, 0); 4994 owner_objectid, 0);
4944 BUG_ON(ret < 0); 4995 if (ret < 0)
4996 goto abort;
4945 4997
4946 btrfs_release_path(path); 4998 btrfs_release_path(path);
4947 path->leave_spinning = 1; 4999 path->leave_spinning = 1;
@@ -4958,7 +5010,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4958 (unsigned long long)bytenr); 5010 (unsigned long long)bytenr);
4959 btrfs_print_leaf(extent_root, path->nodes[0]); 5011 btrfs_print_leaf(extent_root, path->nodes[0]);
4960 } 5012 }
4961 BUG_ON(ret); 5013 if (ret < 0)
5014 goto abort;
4962 extent_slot = path->slots[0]; 5015 extent_slot = path->slots[0];
4963 leaf = path->nodes[0]; 5016 leaf = path->nodes[0];
4964 item_size = btrfs_item_size_nr(leaf, extent_slot); 5017 item_size = btrfs_item_size_nr(leaf, extent_slot);
@@ -4995,7 +5048,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4995 ret = remove_extent_backref(trans, extent_root, path, 5048 ret = remove_extent_backref(trans, extent_root, path,
4996 iref, refs_to_drop, 5049 iref, refs_to_drop,
4997 is_data); 5050 is_data);
4998 BUG_ON(ret); 5051 if (ret)
5052 goto abort;
4999 } 5053 }
5000 } else { 5054 } else {
5001 if (found_extent) { 5055 if (found_extent) {
@@ -5012,19 +5066,27 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5012 5066
5013 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 5067 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5014 num_to_del); 5068 num_to_del);
5015 BUG_ON(ret); 5069 if (ret)
5070 goto abort;
5016 btrfs_release_path(path); 5071 btrfs_release_path(path);
5017 5072
5018 if (is_data) { 5073 if (is_data) {
5019 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 5074 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5020 BUG_ON(ret); 5075 if (ret)
5076 goto abort;
5021 } 5077 }
5022 5078
5023 ret = update_block_group(trans, root, bytenr, num_bytes, 0); 5079 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5024 BUG_ON(ret); 5080 if (ret)
5081 goto abort;
5025 } 5082 }
5083out:
5026 btrfs_free_path(path); 5084 btrfs_free_path(path);
5027 return ret; 5085 return ret;
5086
5087abort:
5088 btrfs_abort_transaction(trans, extent_root, ret);
5089 goto out;
5028} 5090}
5029 5091
5030/* 5092/*
@@ -5120,7 +5182,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5120 parent, root->root_key.objectid, 5182 parent, root->root_key.objectid,
5121 btrfs_header_level(buf), 5183 btrfs_header_level(buf),
5122 BTRFS_DROP_DELAYED_REF, NULL, for_cow); 5184 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5123 BUG_ON(ret); 5185 BUG_ON(ret); /* -ENOMEM */
5124 } 5186 }
5125 5187
5126 if (!last_ref) 5188 if (!last_ref)
@@ -5154,6 +5216,7 @@ out:
5154 btrfs_put_block_group(cache); 5216 btrfs_put_block_group(cache);
5155} 5217}
5156 5218
5219/* Can return -ENOMEM */
5157int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, 5220int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5158 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 5221 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5159 u64 owner, u64 offset, int for_cow) 5222 u64 owner, u64 offset, int for_cow)
@@ -5175,14 +5238,12 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5175 num_bytes, 5238 num_bytes,
5176 parent, root_objectid, (int)owner, 5239 parent, root_objectid, (int)owner,
5177 BTRFS_DROP_DELAYED_REF, NULL, for_cow); 5240 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5178 BUG_ON(ret);
5179 } else { 5241 } else {
5180 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, 5242 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5181 num_bytes, 5243 num_bytes,
5182 parent, root_objectid, owner, 5244 parent, root_objectid, owner,
5183 offset, BTRFS_DROP_DELAYED_REF, 5245 offset, BTRFS_DROP_DELAYED_REF,
5184 NULL, for_cow); 5246 NULL, for_cow);
5185 BUG_ON(ret);
5186 } 5247 }
5187 return ret; 5248 return ret;
5188} 5249}
@@ -5412,7 +5473,8 @@ have_block_group:
5412 found_uncached_bg = true; 5473 found_uncached_bg = true;
5413 ret = cache_block_group(block_group, trans, 5474 ret = cache_block_group(block_group, trans,
5414 orig_root, 0); 5475 orig_root, 0);
5415 BUG_ON(ret); 5476 BUG_ON(ret < 0);
5477 ret = 0;
5416 } 5478 }
5417 5479
5418 if (unlikely(block_group->ro)) 5480 if (unlikely(block_group->ro))
@@ -5631,6 +5693,11 @@ loop:
5631 ret = do_chunk_alloc(trans, root, num_bytes + 5693 ret = do_chunk_alloc(trans, root, num_bytes +
5632 2 * 1024 * 1024, data, 5694 2 * 1024 * 1024, data,
5633 CHUNK_ALLOC_LIMITED); 5695 CHUNK_ALLOC_LIMITED);
5696 if (ret < 0) {
5697 btrfs_abort_transaction(trans,
5698 root, ret);
5699 goto out;
5700 }
5634 allowed_chunk_alloc = 0; 5701 allowed_chunk_alloc = 0;
5635 if (ret == 1) 5702 if (ret == 1)
5636 done_chunk_alloc = 1; 5703 done_chunk_alloc = 1;
@@ -5659,6 +5726,7 @@ loop:
5659 } else if (ins->objectid) { 5726 } else if (ins->objectid) {
5660 ret = 0; 5727 ret = 0;
5661 } 5728 }
5729out:
5662 5730
5663 return ret; 5731 return ret;
5664} 5732}
@@ -5723,10 +5791,15 @@ again:
5723 * the only place that sets empty_size is btrfs_realloc_node, which 5791 * the only place that sets empty_size is btrfs_realloc_node, which
5724 * is not called recursively on allocations 5792 * is not called recursively on allocations
5725 */ 5793 */
5726 if (empty_size || root->ref_cows) 5794 if (empty_size || root->ref_cows) {
5727 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 5795 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5728 num_bytes + 2 * 1024 * 1024, data, 5796 num_bytes + 2 * 1024 * 1024, data,
5729 CHUNK_ALLOC_NO_FORCE); 5797 CHUNK_ALLOC_NO_FORCE);
5798 if (ret < 0 && ret != -ENOSPC) {
5799 btrfs_abort_transaction(trans, root, ret);
5800 return ret;
5801 }
5802 }
5730 5803
5731 WARN_ON(num_bytes < root->sectorsize); 5804 WARN_ON(num_bytes < root->sectorsize);
5732 ret = find_free_extent(trans, root, num_bytes, empty_size, 5805 ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5737,8 +5810,12 @@ again:
5737 num_bytes = num_bytes >> 1; 5810 num_bytes = num_bytes >> 1;
5738 num_bytes = num_bytes & ~(root->sectorsize - 1); 5811 num_bytes = num_bytes & ~(root->sectorsize - 1);
5739 num_bytes = max(num_bytes, min_alloc_size); 5812 num_bytes = max(num_bytes, min_alloc_size);
5740 do_chunk_alloc(trans, root->fs_info->extent_root, 5813 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5741 num_bytes, data, CHUNK_ALLOC_FORCE); 5814 num_bytes, data, CHUNK_ALLOC_FORCE);
5815 if (ret < 0 && ret != -ENOSPC) {
5816 btrfs_abort_transaction(trans, root, ret);
5817 return ret;
5818 }
5742 if (num_bytes == min_alloc_size) 5819 if (num_bytes == min_alloc_size)
5743 final_tried = true; 5820 final_tried = true;
5744 goto again; 5821 goto again;
@@ -5749,7 +5826,8 @@ again:
5749 printk(KERN_ERR "btrfs allocation failed flags %llu, " 5826 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5750 "wanted %llu\n", (unsigned long long)data, 5827 "wanted %llu\n", (unsigned long long)data,
5751 (unsigned long long)num_bytes); 5828 (unsigned long long)num_bytes);
5752 dump_space_info(sinfo, num_bytes, 1); 5829 if (sinfo)
5830 dump_space_info(sinfo, num_bytes, 1);
5753 } 5831 }
5754 } 5832 }
5755 5833
@@ -5828,7 +5906,10 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5828 path->leave_spinning = 1; 5906 path->leave_spinning = 1;
5829 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5907 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5830 ins, size); 5908 ins, size);
5831 BUG_ON(ret); 5909 if (ret) {
5910 btrfs_free_path(path);
5911 return ret;
5912 }
5832 5913
5833 leaf = path->nodes[0]; 5914 leaf = path->nodes[0];
5834 extent_item = btrfs_item_ptr(leaf, path->slots[0], 5915 extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -5858,7 +5939,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5858 btrfs_free_path(path); 5939 btrfs_free_path(path);
5859 5940
5860 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); 5941 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5861 if (ret) { 5942 if (ret) { /* -ENOENT, logic error */
5862 printk(KERN_ERR "btrfs update block group failed for %llu " 5943 printk(KERN_ERR "btrfs update block group failed for %llu "
5863 "%llu\n", (unsigned long long)ins->objectid, 5944 "%llu\n", (unsigned long long)ins->objectid,
5864 (unsigned long long)ins->offset); 5945 (unsigned long long)ins->offset);
@@ -5889,7 +5970,10 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5889 path->leave_spinning = 1; 5970 path->leave_spinning = 1;
5890 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5971 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5891 ins, size); 5972 ins, size);
5892 BUG_ON(ret); 5973 if (ret) {
5974 btrfs_free_path(path);
5975 return ret;
5976 }
5893 5977
5894 leaf = path->nodes[0]; 5978 leaf = path->nodes[0];
5895 extent_item = btrfs_item_ptr(leaf, path->slots[0], 5979 extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -5919,7 +6003,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5919 btrfs_free_path(path); 6003 btrfs_free_path(path);
5920 6004
5921 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); 6005 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5922 if (ret) { 6006 if (ret) { /* -ENOENT, logic error */
5923 printk(KERN_ERR "btrfs update block group failed for %llu " 6007 printk(KERN_ERR "btrfs update block group failed for %llu "
5924 "%llu\n", (unsigned long long)ins->objectid, 6008 "%llu\n", (unsigned long long)ins->objectid,
5925 (unsigned long long)ins->offset); 6009 (unsigned long long)ins->offset);
@@ -5967,28 +6051,28 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5967 if (!caching_ctl) { 6051 if (!caching_ctl) {
5968 BUG_ON(!block_group_cache_done(block_group)); 6052 BUG_ON(!block_group_cache_done(block_group));
5969 ret = btrfs_remove_free_space(block_group, start, num_bytes); 6053 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5970 BUG_ON(ret); 6054 BUG_ON(ret); /* -ENOMEM */
5971 } else { 6055 } else {
5972 mutex_lock(&caching_ctl->mutex); 6056 mutex_lock(&caching_ctl->mutex);
5973 6057
5974 if (start >= caching_ctl->progress) { 6058 if (start >= caching_ctl->progress) {
5975 ret = add_excluded_extent(root, start, num_bytes); 6059 ret = add_excluded_extent(root, start, num_bytes);
5976 BUG_ON(ret); 6060 BUG_ON(ret); /* -ENOMEM */
5977 } else if (start + num_bytes <= caching_ctl->progress) { 6061 } else if (start + num_bytes <= caching_ctl->progress) {
5978 ret = btrfs_remove_free_space(block_group, 6062 ret = btrfs_remove_free_space(block_group,
5979 start, num_bytes); 6063 start, num_bytes);
5980 BUG_ON(ret); 6064 BUG_ON(ret); /* -ENOMEM */
5981 } else { 6065 } else {
5982 num_bytes = caching_ctl->progress - start; 6066 num_bytes = caching_ctl->progress - start;
5983 ret = btrfs_remove_free_space(block_group, 6067 ret = btrfs_remove_free_space(block_group,
5984 start, num_bytes); 6068 start, num_bytes);
5985 BUG_ON(ret); 6069 BUG_ON(ret); /* -ENOMEM */
5986 6070
5987 start = caching_ctl->progress; 6071 start = caching_ctl->progress;
5988 num_bytes = ins->objectid + ins->offset - 6072 num_bytes = ins->objectid + ins->offset -
5989 caching_ctl->progress; 6073 caching_ctl->progress;
5990 ret = add_excluded_extent(root, start, num_bytes); 6074 ret = add_excluded_extent(root, start, num_bytes);
5991 BUG_ON(ret); 6075 BUG_ON(ret); /* -ENOMEM */
5992 } 6076 }
5993 6077
5994 mutex_unlock(&caching_ctl->mutex); 6078 mutex_unlock(&caching_ctl->mutex);
@@ -5997,7 +6081,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5997 6081
5998 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 6082 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
5999 RESERVE_ALLOC_NO_ACCOUNT); 6083 RESERVE_ALLOC_NO_ACCOUNT);
6000 BUG_ON(ret); 6084 BUG_ON(ret); /* logic error */
6001 btrfs_put_block_group(block_group); 6085 btrfs_put_block_group(block_group);
6002 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 6086 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6003 0, owner, offset, ins, 1); 6087 0, owner, offset, ins, 1);
@@ -6134,7 +6218,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6134 6218
6135 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 6219 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6136 blocksize, level); 6220 blocksize, level);
6137 BUG_ON(IS_ERR(buf)); 6221 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6138 6222
6139 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 6223 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6140 if (parent == 0) 6224 if (parent == 0)
@@ -6146,7 +6230,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6146 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 6230 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6147 struct btrfs_delayed_extent_op *extent_op; 6231 struct btrfs_delayed_extent_op *extent_op;
6148 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); 6232 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6149 BUG_ON(!extent_op); 6233 BUG_ON(!extent_op); /* -ENOMEM */
6150 if (key) 6234 if (key)
6151 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 6235 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6152 else 6236 else
@@ -6161,7 +6245,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6161 ins.offset, parent, root_objectid, 6245 ins.offset, parent, root_objectid,
6162 level, BTRFS_ADD_DELAYED_EXTENT, 6246 level, BTRFS_ADD_DELAYED_EXTENT,
6163 extent_op, for_cow); 6247 extent_op, for_cow);
6164 BUG_ON(ret); 6248 BUG_ON(ret); /* -ENOMEM */
6165 } 6249 }
6166 return buf; 6250 return buf;
6167} 6251}
@@ -6231,7 +6315,9 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6231 /* We don't lock the tree block, it's OK to be racy here */ 6315 /* We don't lock the tree block, it's OK to be racy here */
6232 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 6316 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6233 &refs, &flags); 6317 &refs, &flags);
6234 BUG_ON(ret); 6318 /* We don't care about errors in readahead. */
6319 if (ret < 0)
6320 continue;
6235 BUG_ON(refs == 0); 6321 BUG_ON(refs == 0);
6236 6322
6237 if (wc->stage == DROP_REFERENCE) { 6323 if (wc->stage == DROP_REFERENCE) {
@@ -6298,7 +6384,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6298 eb->start, eb->len, 6384 eb->start, eb->len,
6299 &wc->refs[level], 6385 &wc->refs[level],
6300 &wc->flags[level]); 6386 &wc->flags[level]);
6301 BUG_ON(ret); 6387 BUG_ON(ret == -ENOMEM);
6388 if (ret)
6389 return ret;
6302 BUG_ON(wc->refs[level] == 0); 6390 BUG_ON(wc->refs[level] == 0);
6303 } 6391 }
6304 6392
@@ -6317,12 +6405,12 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6317 if (!(wc->flags[level] & flag)) { 6405 if (!(wc->flags[level] & flag)) {
6318 BUG_ON(!path->locks[level]); 6406 BUG_ON(!path->locks[level]);
6319 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc); 6407 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6320 BUG_ON(ret); 6408 BUG_ON(ret); /* -ENOMEM */
6321 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); 6409 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6322 BUG_ON(ret); 6410 BUG_ON(ret); /* -ENOMEM */
6323 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, 6411 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6324 eb->len, flag, 0); 6412 eb->len, flag, 0);
6325 BUG_ON(ret); 6413 BUG_ON(ret); /* -ENOMEM */
6326 wc->flags[level] |= flag; 6414 wc->flags[level] |= flag;
6327 } 6415 }
6328 6416
@@ -6394,7 +6482,11 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6394 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 6482 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6395 &wc->refs[level - 1], 6483 &wc->refs[level - 1],
6396 &wc->flags[level - 1]); 6484 &wc->flags[level - 1]);
6397 BUG_ON(ret); 6485 if (ret < 0) {
6486 btrfs_tree_unlock(next);
6487 return ret;
6488 }
6489
6398 BUG_ON(wc->refs[level - 1] == 0); 6490 BUG_ON(wc->refs[level - 1] == 0);
6399 *lookup_info = 0; 6491 *lookup_info = 0;
6400 6492
@@ -6463,7 +6555,7 @@ skip:
6463 6555
6464 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, 6556 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6465 root->root_key.objectid, level - 1, 0, 0); 6557 root->root_key.objectid, level - 1, 0, 0);
6466 BUG_ON(ret); 6558 BUG_ON(ret); /* -ENOMEM */
6467 } 6559 }
6468 btrfs_tree_unlock(next); 6560 btrfs_tree_unlock(next);
6469 free_extent_buffer(next); 6561 free_extent_buffer(next);
@@ -6521,7 +6613,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6521 eb->start, eb->len, 6613 eb->start, eb->len,
6522 &wc->refs[level], 6614 &wc->refs[level],
6523 &wc->flags[level]); 6615 &wc->flags[level]);
6524 BUG_ON(ret); 6616 if (ret < 0) {
6617 btrfs_tree_unlock_rw(eb, path->locks[level]);
6618 return ret;
6619 }
6525 BUG_ON(wc->refs[level] == 0); 6620 BUG_ON(wc->refs[level] == 0);
6526 if (wc->refs[level] == 1) { 6621 if (wc->refs[level] == 1) {
6527 btrfs_tree_unlock_rw(eb, path->locks[level]); 6622 btrfs_tree_unlock_rw(eb, path->locks[level]);
@@ -6541,7 +6636,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6541 else 6636 else
6542 ret = btrfs_dec_ref(trans, root, eb, 0, 6637 ret = btrfs_dec_ref(trans, root, eb, 0,
6543 wc->for_reloc); 6638 wc->for_reloc);
6544 BUG_ON(ret); 6639 BUG_ON(ret); /* -ENOMEM */
6545 } 6640 }
6546 /* make block locked assertion in clean_tree_block happy */ 6641 /* make block locked assertion in clean_tree_block happy */
6547 if (!path->locks[level] && 6642 if (!path->locks[level] &&
@@ -6650,7 +6745,7 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6650 * also make sure backrefs for the shared block and all lower level 6745 * also make sure backrefs for the shared block and all lower level
6651 * blocks are properly updated. 6746 * blocks are properly updated.
6652 */ 6747 */
6653void btrfs_drop_snapshot(struct btrfs_root *root, 6748int btrfs_drop_snapshot(struct btrfs_root *root,
6654 struct btrfs_block_rsv *block_rsv, int update_ref, 6749 struct btrfs_block_rsv *block_rsv, int update_ref,
6655 int for_reloc) 6750 int for_reloc)
6656{ 6751{
@@ -6678,7 +6773,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6678 } 6773 }
6679 6774
6680 trans = btrfs_start_transaction(tree_root, 0); 6775 trans = btrfs_start_transaction(tree_root, 0);
6681 BUG_ON(IS_ERR(trans)); 6776 if (IS_ERR(trans)) {
6777 err = PTR_ERR(trans);
6778 goto out_free;
6779 }
6682 6780
6683 if (block_rsv) 6781 if (block_rsv)
6684 trans->block_rsv = block_rsv; 6782 trans->block_rsv = block_rsv;
@@ -6703,7 +6801,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6703 path->lowest_level = 0; 6801 path->lowest_level = 0;
6704 if (ret < 0) { 6802 if (ret < 0) {
6705 err = ret; 6803 err = ret;
6706 goto out_free; 6804 goto out_end_trans;
6707 } 6805 }
6708 WARN_ON(ret > 0); 6806 WARN_ON(ret > 0);
6709 6807
@@ -6723,7 +6821,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6723 path->nodes[level]->len, 6821 path->nodes[level]->len,
6724 &wc->refs[level], 6822 &wc->refs[level],
6725 &wc->flags[level]); 6823 &wc->flags[level]);
6726 BUG_ON(ret); 6824 if (ret < 0) {
6825 err = ret;
6826 goto out_end_trans;
6827 }
6727 BUG_ON(wc->refs[level] == 0); 6828 BUG_ON(wc->refs[level] == 0);
6728 6829
6729 if (level == root_item->drop_level) 6830 if (level == root_item->drop_level)
@@ -6774,26 +6875,40 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6774 ret = btrfs_update_root(trans, tree_root, 6875 ret = btrfs_update_root(trans, tree_root,
6775 &root->root_key, 6876 &root->root_key,
6776 root_item); 6877 root_item);
6777 BUG_ON(ret); 6878 if (ret) {
6879 btrfs_abort_transaction(trans, tree_root, ret);
6880 err = ret;
6881 goto out_end_trans;
6882 }
6778 6883
6779 btrfs_end_transaction_throttle(trans, tree_root); 6884 btrfs_end_transaction_throttle(trans, tree_root);
6780 trans = btrfs_start_transaction(tree_root, 0); 6885 trans = btrfs_start_transaction(tree_root, 0);
6781 BUG_ON(IS_ERR(trans)); 6886 if (IS_ERR(trans)) {
6887 err = PTR_ERR(trans);
6888 goto out_free;
6889 }
6782 if (block_rsv) 6890 if (block_rsv)
6783 trans->block_rsv = block_rsv; 6891 trans->block_rsv = block_rsv;
6784 } 6892 }
6785 } 6893 }
6786 btrfs_release_path(path); 6894 btrfs_release_path(path);
6787 BUG_ON(err); 6895 if (err)
6896 goto out_end_trans;
6788 6897
6789 ret = btrfs_del_root(trans, tree_root, &root->root_key); 6898 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6790 BUG_ON(ret); 6899 if (ret) {
6900 btrfs_abort_transaction(trans, tree_root, ret);
6901 goto out_end_trans;
6902 }
6791 6903
6792 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 6904 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6793 ret = btrfs_find_last_root(tree_root, root->root_key.objectid, 6905 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6794 NULL, NULL); 6906 NULL, NULL);
6795 BUG_ON(ret < 0); 6907 if (ret < 0) {
6796 if (ret > 0) { 6908 btrfs_abort_transaction(trans, tree_root, ret);
6909 err = ret;
6910 goto out_end_trans;
6911 } else if (ret > 0) {
6797 /* if we fail to delete the orphan item this time 6912 /* if we fail to delete the orphan item this time
6798 * around, it'll get picked up the next time. 6913 * around, it'll get picked up the next time.
6799 * 6914 *
@@ -6811,14 +6926,15 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6811 free_extent_buffer(root->commit_root); 6926 free_extent_buffer(root->commit_root);
6812 kfree(root); 6927 kfree(root);
6813 } 6928 }
6814out_free: 6929out_end_trans:
6815 btrfs_end_transaction_throttle(trans, tree_root); 6930 btrfs_end_transaction_throttle(trans, tree_root);
6931out_free:
6816 kfree(wc); 6932 kfree(wc);
6817 btrfs_free_path(path); 6933 btrfs_free_path(path);
6818out: 6934out:
6819 if (err) 6935 if (err)
6820 btrfs_std_error(root->fs_info, err); 6936 btrfs_std_error(root->fs_info, err);
6821 return; 6937 return err;
6822} 6938}
6823 6939
6824/* 6940/*
@@ -7015,12 +7131,16 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
7015 BUG_ON(cache->ro); 7131 BUG_ON(cache->ro);
7016 7132
7017 trans = btrfs_join_transaction(root); 7133 trans = btrfs_join_transaction(root);
7018 BUG_ON(IS_ERR(trans)); 7134 if (IS_ERR(trans))
7135 return PTR_ERR(trans);
7019 7136
7020 alloc_flags = update_block_group_flags(root, cache->flags); 7137 alloc_flags = update_block_group_flags(root, cache->flags);
7021 if (alloc_flags != cache->flags) 7138 if (alloc_flags != cache->flags) {
7022 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 7139 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7023 CHUNK_ALLOC_FORCE); 7140 CHUNK_ALLOC_FORCE);
7141 if (ret < 0)
7142 goto out;
7143 }
7024 7144
7025 ret = set_block_group_ro(cache, 0); 7145 ret = set_block_group_ro(cache, 0);
7026 if (!ret) 7146 if (!ret)
@@ -7100,7 +7220,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7100 return free_bytes; 7220 return free_bytes;
7101} 7221}
7102 7222
7103int btrfs_set_block_group_rw(struct btrfs_root *root, 7223void btrfs_set_block_group_rw(struct btrfs_root *root,
7104 struct btrfs_block_group_cache *cache) 7224 struct btrfs_block_group_cache *cache)
7105{ 7225{
7106 struct btrfs_space_info *sinfo = cache->space_info; 7226 struct btrfs_space_info *sinfo = cache->space_info;
@@ -7116,7 +7236,6 @@ int btrfs_set_block_group_rw(struct btrfs_root *root,
7116 cache->ro = 0; 7236 cache->ro = 0;
7117 spin_unlock(&cache->lock); 7237 spin_unlock(&cache->lock);
7118 spin_unlock(&sinfo->lock); 7238 spin_unlock(&sinfo->lock);
7119 return 0;
7120} 7239}
7121 7240
7122/* 7241/*
@@ -7484,7 +7603,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7484 ret = update_space_info(info, cache->flags, found_key.offset, 7603 ret = update_space_info(info, cache->flags, found_key.offset,
7485 btrfs_block_group_used(&cache->item), 7604 btrfs_block_group_used(&cache->item),
7486 &space_info); 7605 &space_info);
7487 BUG_ON(ret); 7606 BUG_ON(ret); /* -ENOMEM */
7488 cache->space_info = space_info; 7607 cache->space_info = space_info;
7489 spin_lock(&cache->space_info->lock); 7608 spin_lock(&cache->space_info->lock);
7490 cache->space_info->bytes_readonly += cache->bytes_super; 7609 cache->space_info->bytes_readonly += cache->bytes_super;
@@ -7493,7 +7612,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7493 __link_block_group(space_info, cache); 7612 __link_block_group(space_info, cache);
7494 7613
7495 ret = btrfs_add_block_group_cache(root->fs_info, cache); 7614 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7496 BUG_ON(ret); 7615 BUG_ON(ret); /* Logic error */
7497 7616
7498 set_avail_alloc_bits(root->fs_info, cache->flags); 7617 set_avail_alloc_bits(root->fs_info, cache->flags);
7499 if (btrfs_chunk_readonly(root, cache->key.objectid)) 7618 if (btrfs_chunk_readonly(root, cache->key.objectid))
@@ -7575,7 +7694,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7575 7694
7576 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 7695 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7577 &cache->space_info); 7696 &cache->space_info);
7578 BUG_ON(ret); 7697 BUG_ON(ret); /* -ENOMEM */
7579 update_global_block_rsv(root->fs_info); 7698 update_global_block_rsv(root->fs_info);
7580 7699
7581 spin_lock(&cache->space_info->lock); 7700 spin_lock(&cache->space_info->lock);
@@ -7585,11 +7704,14 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7585 __link_block_group(cache->space_info, cache); 7704 __link_block_group(cache->space_info, cache);
7586 7705
7587 ret = btrfs_add_block_group_cache(root->fs_info, cache); 7706 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7588 BUG_ON(ret); 7707 BUG_ON(ret); /* Logic error */
7589 7708
7590 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, 7709 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7591 sizeof(cache->item)); 7710 sizeof(cache->item));
7592 BUG_ON(ret); 7711 if (ret) {
7712 btrfs_abort_transaction(trans, extent_root, ret);
7713 return ret;
7714 }
7593 7715
7594 set_avail_alloc_bits(extent_root->fs_info, type); 7716 set_avail_alloc_bits(extent_root->fs_info, type);
7595 7717
@@ -7670,7 +7792,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7670 inode = lookup_free_space_inode(tree_root, block_group, path); 7792 inode = lookup_free_space_inode(tree_root, block_group, path);
7671 if (!IS_ERR(inode)) { 7793 if (!IS_ERR(inode)) {
7672 ret = btrfs_orphan_add(trans, inode); 7794 ret = btrfs_orphan_add(trans, inode);
7673 BUG_ON(ret); 7795 if (ret) {
7796 btrfs_add_delayed_iput(inode);
7797 goto out;
7798 }
7674 clear_nlink(inode); 7799 clear_nlink(inode);
7675 /* One for the block groups ref */ 7800 /* One for the block groups ref */
7676 spin_lock(&block_group->lock); 7801 spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 49a368593a16..0c3ec003f273 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -55,6 +55,11 @@ struct extent_page_data {
55}; 55};
56 56
57static noinline void flush_write_bio(void *data); 57static noinline void flush_write_bio(void *data);
58static inline struct btrfs_fs_info *
59tree_fs_info(struct extent_io_tree *tree)
60{
61 return btrfs_sb(tree->mapping->host->i_sb);
62}
58 63
59int __init extent_io_init(void) 64int __init extent_io_init(void)
60{ 65{
@@ -139,6 +144,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
139#endif 144#endif
140 atomic_set(&state->refs, 1); 145 atomic_set(&state->refs, 1);
141 init_waitqueue_head(&state->wq); 146 init_waitqueue_head(&state->wq);
147 trace_alloc_extent_state(state, mask, _RET_IP_);
142 return state; 148 return state;
143} 149}
144 150
@@ -156,6 +162,7 @@ void free_extent_state(struct extent_state *state)
156 list_del(&state->leak_list); 162 list_del(&state->leak_list);
157 spin_unlock_irqrestore(&leak_lock, flags); 163 spin_unlock_irqrestore(&leak_lock, flags);
158#endif 164#endif
165 trace_free_extent_state(state, _RET_IP_);
159 kmem_cache_free(extent_state_cache, state); 166 kmem_cache_free(extent_state_cache, state);
160 } 167 }
161} 168}
@@ -442,6 +449,13 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
442 return prealloc; 449 return prealloc;
443} 450}
444 451
452void extent_io_tree_panic(struct extent_io_tree *tree, int err)
453{
454 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
455 "Extent tree was modified by another "
456 "thread while locked.");
457}
458
445/* 459/*
446 * clear some bits on a range in the tree. This may require splitting 460 * clear some bits on a range in the tree. This may require splitting
447 * or inserting elements in the tree, so the gfp mask is used to 461 * or inserting elements in the tree, so the gfp mask is used to
@@ -452,8 +466,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
452 * 466 *
453 * the range [start, end] is inclusive. 467 * the range [start, end] is inclusive.
454 * 468 *
455 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 469 * This takes the tree lock, and returns 0 on success and < 0 on error.
456 * bits were already set, or zero if none of the bits were already set.
457 */ 470 */
458int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 471int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
459 int bits, int wake, int delete, 472 int bits, int wake, int delete,
@@ -467,7 +480,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
467 struct rb_node *node; 480 struct rb_node *node;
468 u64 last_end; 481 u64 last_end;
469 int err; 482 int err;
470 int set = 0;
471 int clear = 0; 483 int clear = 0;
472 484
473 if (delete) 485 if (delete)
@@ -545,12 +557,14 @@ hit_next:
545 prealloc = alloc_extent_state_atomic(prealloc); 557 prealloc = alloc_extent_state_atomic(prealloc);
546 BUG_ON(!prealloc); 558 BUG_ON(!prealloc);
547 err = split_state(tree, state, prealloc, start); 559 err = split_state(tree, state, prealloc, start);
548 BUG_ON(err == -EEXIST); 560 if (err)
561 extent_io_tree_panic(tree, err);
562
549 prealloc = NULL; 563 prealloc = NULL;
550 if (err) 564 if (err)
551 goto out; 565 goto out;
552 if (state->end <= end) { 566 if (state->end <= end) {
553 set |= clear_state_bit(tree, state, &bits, wake); 567 clear_state_bit(tree, state, &bits, wake);
554 if (last_end == (u64)-1) 568 if (last_end == (u64)-1)
555 goto out; 569 goto out;
556 start = last_end + 1; 570 start = last_end + 1;
@@ -567,17 +581,19 @@ hit_next:
567 prealloc = alloc_extent_state_atomic(prealloc); 581 prealloc = alloc_extent_state_atomic(prealloc);
568 BUG_ON(!prealloc); 582 BUG_ON(!prealloc);
569 err = split_state(tree, state, prealloc, end + 1); 583 err = split_state(tree, state, prealloc, end + 1);
570 BUG_ON(err == -EEXIST); 584 if (err)
585 extent_io_tree_panic(tree, err);
586
571 if (wake) 587 if (wake)
572 wake_up(&state->wq); 588 wake_up(&state->wq);
573 589
574 set |= clear_state_bit(tree, prealloc, &bits, wake); 590 clear_state_bit(tree, prealloc, &bits, wake);
575 591
576 prealloc = NULL; 592 prealloc = NULL;
577 goto out; 593 goto out;
578 } 594 }
579 595
580 set |= clear_state_bit(tree, state, &bits, wake); 596 clear_state_bit(tree, state, &bits, wake);
581next: 597next:
582 if (last_end == (u64)-1) 598 if (last_end == (u64)-1)
583 goto out; 599 goto out;
@@ -594,7 +610,7 @@ out:
594 if (prealloc) 610 if (prealloc)
595 free_extent_state(prealloc); 611 free_extent_state(prealloc);
596 612
597 return set; 613 return 0;
598 614
599search_again: 615search_again:
600 if (start > end) 616 if (start > end)
@@ -605,8 +621,8 @@ search_again:
605 goto again; 621 goto again;
606} 622}
607 623
608static int wait_on_state(struct extent_io_tree *tree, 624static void wait_on_state(struct extent_io_tree *tree,
609 struct extent_state *state) 625 struct extent_state *state)
610 __releases(tree->lock) 626 __releases(tree->lock)
611 __acquires(tree->lock) 627 __acquires(tree->lock)
612{ 628{
@@ -616,7 +632,6 @@ static int wait_on_state(struct extent_io_tree *tree,
616 schedule(); 632 schedule();
617 spin_lock(&tree->lock); 633 spin_lock(&tree->lock);
618 finish_wait(&state->wq, &wait); 634 finish_wait(&state->wq, &wait);
619 return 0;
620} 635}
621 636
622/* 637/*
@@ -624,7 +639,7 @@ static int wait_on_state(struct extent_io_tree *tree,
624 * The range [start, end] is inclusive. 639 * The range [start, end] is inclusive.
625 * The tree lock is taken by this function 640 * The tree lock is taken by this function
626 */ 641 */
627int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) 642void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
628{ 643{
629 struct extent_state *state; 644 struct extent_state *state;
630 struct rb_node *node; 645 struct rb_node *node;
@@ -661,7 +676,6 @@ again:
661 } 676 }
662out: 677out:
663 spin_unlock(&tree->lock); 678 spin_unlock(&tree->lock);
664 return 0;
665} 679}
666 680
667static void set_state_bits(struct extent_io_tree *tree, 681static void set_state_bits(struct extent_io_tree *tree,
@@ -709,9 +723,10 @@ static void uncache_state(struct extent_state **cached_ptr)
709 * [start, end] is inclusive This takes the tree lock. 723 * [start, end] is inclusive This takes the tree lock.
710 */ 724 */
711 725
712int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 726static int __must_check
713 int bits, int exclusive_bits, u64 *failed_start, 727__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
714 struct extent_state **cached_state, gfp_t mask) 728 int bits, int exclusive_bits, u64 *failed_start,
729 struct extent_state **cached_state, gfp_t mask)
715{ 730{
716 struct extent_state *state; 731 struct extent_state *state;
717 struct extent_state *prealloc = NULL; 732 struct extent_state *prealloc = NULL;
@@ -745,8 +760,10 @@ again:
745 prealloc = alloc_extent_state_atomic(prealloc); 760 prealloc = alloc_extent_state_atomic(prealloc);
746 BUG_ON(!prealloc); 761 BUG_ON(!prealloc);
747 err = insert_state(tree, prealloc, start, end, &bits); 762 err = insert_state(tree, prealloc, start, end, &bits);
763 if (err)
764 extent_io_tree_panic(tree, err);
765
748 prealloc = NULL; 766 prealloc = NULL;
749 BUG_ON(err == -EEXIST);
750 goto out; 767 goto out;
751 } 768 }
752 state = rb_entry(node, struct extent_state, rb_node); 769 state = rb_entry(node, struct extent_state, rb_node);
@@ -812,7 +829,9 @@ hit_next:
812 prealloc = alloc_extent_state_atomic(prealloc); 829 prealloc = alloc_extent_state_atomic(prealloc);
813 BUG_ON(!prealloc); 830 BUG_ON(!prealloc);
814 err = split_state(tree, state, prealloc, start); 831 err = split_state(tree, state, prealloc, start);
815 BUG_ON(err == -EEXIST); 832 if (err)
833 extent_io_tree_panic(tree, err);
834
816 prealloc = NULL; 835 prealloc = NULL;
817 if (err) 836 if (err)
818 goto out; 837 goto out;
@@ -849,12 +868,9 @@ hit_next:
849 */ 868 */
850 err = insert_state(tree, prealloc, start, this_end, 869 err = insert_state(tree, prealloc, start, this_end,
851 &bits); 870 &bits);
852 BUG_ON(err == -EEXIST); 871 if (err)
853 if (err) { 872 extent_io_tree_panic(tree, err);
854 free_extent_state(prealloc); 873
855 prealloc = NULL;
856 goto out;
857 }
858 cache_state(prealloc, cached_state); 874 cache_state(prealloc, cached_state);
859 prealloc = NULL; 875 prealloc = NULL;
860 start = this_end + 1; 876 start = this_end + 1;
@@ -876,7 +892,8 @@ hit_next:
876 prealloc = alloc_extent_state_atomic(prealloc); 892 prealloc = alloc_extent_state_atomic(prealloc);
877 BUG_ON(!prealloc); 893 BUG_ON(!prealloc);
878 err = split_state(tree, state, prealloc, end + 1); 894 err = split_state(tree, state, prealloc, end + 1);
879 BUG_ON(err == -EEXIST); 895 if (err)
896 extent_io_tree_panic(tree, err);
880 897
881 set_state_bits(tree, prealloc, &bits); 898 set_state_bits(tree, prealloc, &bits);
882 cache_state(prealloc, cached_state); 899 cache_state(prealloc, cached_state);
@@ -903,6 +920,15 @@ search_again:
903 goto again; 920 goto again;
904} 921}
905 922
923int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
924 u64 *failed_start, struct extent_state **cached_state,
925 gfp_t mask)
926{
927 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
928 cached_state, mask);
929}
930
931
906/** 932/**
907 * convert_extent - convert all bits in a given range from one bit to another 933 * convert_extent - convert all bits in a given range from one bit to another
908 * @tree: the io tree to search 934 * @tree: the io tree to search
@@ -949,7 +975,8 @@ again:
949 } 975 }
950 err = insert_state(tree, prealloc, start, end, &bits); 976 err = insert_state(tree, prealloc, start, end, &bits);
951 prealloc = NULL; 977 prealloc = NULL;
952 BUG_ON(err == -EEXIST); 978 if (err)
979 extent_io_tree_panic(tree, err);
953 goto out; 980 goto out;
954 } 981 }
955 state = rb_entry(node, struct extent_state, rb_node); 982 state = rb_entry(node, struct extent_state, rb_node);
@@ -1005,7 +1032,8 @@ hit_next:
1005 goto out; 1032 goto out;
1006 } 1033 }
1007 err = split_state(tree, state, prealloc, start); 1034 err = split_state(tree, state, prealloc, start);
1008 BUG_ON(err == -EEXIST); 1035 if (err)
1036 extent_io_tree_panic(tree, err);
1009 prealloc = NULL; 1037 prealloc = NULL;
1010 if (err) 1038 if (err)
1011 goto out; 1039 goto out;
@@ -1044,12 +1072,8 @@ hit_next:
1044 */ 1072 */
1045 err = insert_state(tree, prealloc, start, this_end, 1073 err = insert_state(tree, prealloc, start, this_end,
1046 &bits); 1074 &bits);
1047 BUG_ON(err == -EEXIST); 1075 if (err)
1048 if (err) { 1076 extent_io_tree_panic(tree, err);
1049 free_extent_state(prealloc);
1050 prealloc = NULL;
1051 goto out;
1052 }
1053 prealloc = NULL; 1077 prealloc = NULL;
1054 start = this_end + 1; 1078 start = this_end + 1;
1055 goto search_again; 1079 goto search_again;
@@ -1068,7 +1092,8 @@ hit_next:
1068 } 1092 }
1069 1093
1070 err = split_state(tree, state, prealloc, end + 1); 1094 err = split_state(tree, state, prealloc, end + 1);
1071 BUG_ON(err == -EEXIST); 1095 if (err)
1096 extent_io_tree_panic(tree, err);
1072 1097
1073 set_state_bits(tree, prealloc, &bits); 1098 set_state_bits(tree, prealloc, &bits);
1074 clear_state_bit(tree, prealloc, &clear_bits, 0); 1099 clear_state_bit(tree, prealloc, &clear_bits, 0);
@@ -1098,14 +1123,14 @@ search_again:
1098int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1123int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1099 gfp_t mask) 1124 gfp_t mask)
1100{ 1125{
1101 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 1126 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1102 NULL, mask); 1127 NULL, mask);
1103} 1128}
1104 1129
1105int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1130int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1106 int bits, gfp_t mask) 1131 int bits, gfp_t mask)
1107{ 1132{
1108 return set_extent_bit(tree, start, end, bits, 0, NULL, 1133 return set_extent_bit(tree, start, end, bits, NULL,
1109 NULL, mask); 1134 NULL, mask);
1110} 1135}
1111 1136
@@ -1120,7 +1145,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1120{ 1145{
1121 return set_extent_bit(tree, start, end, 1146 return set_extent_bit(tree, start, end,
1122 EXTENT_DELALLOC | EXTENT_UPTODATE, 1147 EXTENT_DELALLOC | EXTENT_UPTODATE,
1123 0, NULL, cached_state, mask); 1148 NULL, cached_state, mask);
1124} 1149}
1125 1150
1126int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1151int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1134,7 +1159,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1134int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 1159int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1135 gfp_t mask) 1160 gfp_t mask)
1136{ 1161{
1137 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 1162 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1138 NULL, mask); 1163 NULL, mask);
1139} 1164}
1140 1165
@@ -1142,7 +1167,7 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1142 struct extent_state **cached_state, gfp_t mask) 1167 struct extent_state **cached_state, gfp_t mask)
1143{ 1168{
1144 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 1169 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1145 NULL, cached_state, mask); 1170 cached_state, mask);
1146} 1171}
1147 1172
1148static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 1173static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1158,42 +1183,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1158 * us if waiting is desired. 1183 * us if waiting is desired.
1159 */ 1184 */
1160int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1185int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1161 int bits, struct extent_state **cached_state, gfp_t mask) 1186 int bits, struct extent_state **cached_state)
1162{ 1187{
1163 int err; 1188 int err;
1164 u64 failed_start; 1189 u64 failed_start;
1165 while (1) { 1190 while (1) {
1166 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1191 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1167 EXTENT_LOCKED, &failed_start, 1192 EXTENT_LOCKED, &failed_start,
1168 cached_state, mask); 1193 cached_state, GFP_NOFS);
1169 if (err == -EEXIST && (mask & __GFP_WAIT)) { 1194 if (err == -EEXIST) {
1170 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1195 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1171 start = failed_start; 1196 start = failed_start;
1172 } else { 1197 } else
1173 break; 1198 break;
1174 }
1175 WARN_ON(start > end); 1199 WARN_ON(start > end);
1176 } 1200 }
1177 return err; 1201 return err;
1178} 1202}
1179 1203
1180int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1204int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1181{ 1205{
1182 return lock_extent_bits(tree, start, end, 0, NULL, mask); 1206 return lock_extent_bits(tree, start, end, 0, NULL);
1183} 1207}
1184 1208
1185int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1209int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1186 gfp_t mask)
1187{ 1210{
1188 int err; 1211 int err;
1189 u64 failed_start; 1212 u64 failed_start;
1190 1213
1191 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1214 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1192 &failed_start, NULL, mask); 1215 &failed_start, NULL, GFP_NOFS);
1193 if (err == -EEXIST) { 1216 if (err == -EEXIST) {
1194 if (failed_start > start) 1217 if (failed_start > start)
1195 clear_extent_bit(tree, start, failed_start - 1, 1218 clear_extent_bit(tree, start, failed_start - 1,
1196 EXTENT_LOCKED, 1, 0, NULL, mask); 1219 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1197 return 0; 1220 return 0;
1198 } 1221 }
1199 return 1; 1222 return 1;
@@ -1206,10 +1229,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1206 mask); 1229 mask);
1207} 1230}
1208 1231
1209int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1232int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1210{ 1233{
1211 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1234 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1212 mask); 1235 GFP_NOFS);
1213} 1236}
1214 1237
1215/* 1238/*
@@ -1223,7 +1246,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1223 1246
1224 while (index <= end_index) { 1247 while (index <= end_index) {
1225 page = find_get_page(tree->mapping, index); 1248 page = find_get_page(tree->mapping, index);
1226 BUG_ON(!page); 1249 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1227 set_page_writeback(page); 1250 set_page_writeback(page);
1228 page_cache_release(page); 1251 page_cache_release(page);
1229 index++; 1252 index++;
@@ -1346,9 +1369,9 @@ out:
1346 return found; 1369 return found;
1347} 1370}
1348 1371
1349static noinline int __unlock_for_delalloc(struct inode *inode, 1372static noinline void __unlock_for_delalloc(struct inode *inode,
1350 struct page *locked_page, 1373 struct page *locked_page,
1351 u64 start, u64 end) 1374 u64 start, u64 end)
1352{ 1375{
1353 int ret; 1376 int ret;
1354 struct page *pages[16]; 1377 struct page *pages[16];
@@ -1358,7 +1381,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1358 int i; 1381 int i;
1359 1382
1360 if (index == locked_page->index && end_index == index) 1383 if (index == locked_page->index && end_index == index)
1361 return 0; 1384 return;
1362 1385
1363 while (nr_pages > 0) { 1386 while (nr_pages > 0) {
1364 ret = find_get_pages_contig(inode->i_mapping, index, 1387 ret = find_get_pages_contig(inode->i_mapping, index,
@@ -1373,7 +1396,6 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1373 index += ret; 1396 index += ret;
1374 cond_resched(); 1397 cond_resched();
1375 } 1398 }
1376 return 0;
1377} 1399}
1378 1400
1379static noinline int lock_delalloc_pages(struct inode *inode, 1401static noinline int lock_delalloc_pages(struct inode *inode,
@@ -1503,11 +1525,10 @@ again:
1503 goto out_failed; 1525 goto out_failed;
1504 } 1526 }
1505 } 1527 }
1506 BUG_ON(ret); 1528 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1507 1529
1508 /* step three, lock the state bits for the whole range */ 1530 /* step three, lock the state bits for the whole range */
1509 lock_extent_bits(tree, delalloc_start, delalloc_end, 1531 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1510 0, &cached_state, GFP_NOFS);
1511 1532
1512 /* then test to make sure it is all still delalloc */ 1533 /* then test to make sure it is all still delalloc */
1513 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1534 ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -1764,39 +1785,34 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1764 * helper function to set a given page up to date if all the 1785 * helper function to set a given page up to date if all the
1765 * extents in the tree for that page are up to date 1786 * extents in the tree for that page are up to date
1766 */ 1787 */
1767static int check_page_uptodate(struct extent_io_tree *tree, 1788static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1768 struct page *page)
1769{ 1789{
1770 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1790 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1771 u64 end = start + PAGE_CACHE_SIZE - 1; 1791 u64 end = start + PAGE_CACHE_SIZE - 1;
1772 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1792 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1773 SetPageUptodate(page); 1793 SetPageUptodate(page);
1774 return 0;
1775} 1794}
1776 1795
1777/* 1796/*
1778 * helper function to unlock a page if all the extents in the tree 1797 * helper function to unlock a page if all the extents in the tree
1779 * for that page are unlocked 1798 * for that page are unlocked
1780 */ 1799 */
1781static int check_page_locked(struct extent_io_tree *tree, 1800static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1782 struct page *page)
1783{ 1801{
1784 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1802 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1785 u64 end = start + PAGE_CACHE_SIZE - 1; 1803 u64 end = start + PAGE_CACHE_SIZE - 1;
1786 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) 1804 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1787 unlock_page(page); 1805 unlock_page(page);
1788 return 0;
1789} 1806}
1790 1807
1791/* 1808/*
1792 * helper function to end page writeback if all the extents 1809 * helper function to end page writeback if all the extents
1793 * in the tree for that page are done with writeback 1810 * in the tree for that page are done with writeback
1794 */ 1811 */
1795static int check_page_writeback(struct extent_io_tree *tree, 1812static void check_page_writeback(struct extent_io_tree *tree,
1796 struct page *page) 1813 struct page *page)
1797{ 1814{
1798 end_page_writeback(page); 1815 end_page_writeback(page);
1799 return 0;
1800} 1816}
1801 1817
1802/* 1818/*
@@ -2409,8 +2425,12 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2409 return bio; 2425 return bio;
2410} 2426}
2411 2427
2412static int submit_one_bio(int rw, struct bio *bio, int mirror_num, 2428/*
2413 unsigned long bio_flags) 2429 * Since writes are async, they will only return -ENOMEM.
2430 * Reads can return the full range of I/O error conditions.
2431 */
2432static int __must_check submit_one_bio(int rw, struct bio *bio,
2433 int mirror_num, unsigned long bio_flags)
2414{ 2434{
2415 int ret = 0; 2435 int ret = 0;
2416 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2436 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2436,6 +2456,19 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
2436 return ret; 2456 return ret;
2437} 2457}
2438 2458
2459static int merge_bio(struct extent_io_tree *tree, struct page *page,
2460 unsigned long offset, size_t size, struct bio *bio,
2461 unsigned long bio_flags)
2462{
2463 int ret = 0;
2464 if (tree->ops && tree->ops->merge_bio_hook)
2465 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2466 bio_flags);
2467 BUG_ON(ret < 0);
2468 return ret;
2469
2470}
2471
2439static int submit_extent_page(int rw, struct extent_io_tree *tree, 2472static int submit_extent_page(int rw, struct extent_io_tree *tree,
2440 struct page *page, sector_t sector, 2473 struct page *page, sector_t sector,
2441 size_t size, unsigned long offset, 2474 size_t size, unsigned long offset,
@@ -2464,12 +2497,12 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2464 sector; 2497 sector;
2465 2498
2466 if (prev_bio_flags != bio_flags || !contig || 2499 if (prev_bio_flags != bio_flags || !contig ||
2467 (tree->ops && tree->ops->merge_bio_hook && 2500 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2468 tree->ops->merge_bio_hook(page, offset, page_size, bio,
2469 bio_flags)) ||
2470 bio_add_page(bio, page, page_size, offset) < page_size) { 2501 bio_add_page(bio, page, page_size, offset) < page_size) {
2471 ret = submit_one_bio(rw, bio, mirror_num, 2502 ret = submit_one_bio(rw, bio, mirror_num,
2472 prev_bio_flags); 2503 prev_bio_flags);
2504 if (ret < 0)
2505 return ret;
2473 bio = NULL; 2506 bio = NULL;
2474 } else { 2507 } else {
2475 return 0; 2508 return 0;
@@ -2520,6 +2553,7 @@ void set_page_extent_mapped(struct page *page)
2520 * basic readpage implementation. Locked extent state structs are inserted 2553 * basic readpage implementation. Locked extent state structs are inserted
2521 * into the tree that are removed when the IO is done (by the end_io 2554 * into the tree that are removed when the IO is done (by the end_io
2522 * handlers) 2555 * handlers)
2556 * XXX JDM: This needs looking at to ensure proper page locking
2523 */ 2557 */
2524static int __extent_read_full_page(struct extent_io_tree *tree, 2558static int __extent_read_full_page(struct extent_io_tree *tree,
2525 struct page *page, 2559 struct page *page,
@@ -2559,11 +2593,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2559 2593
2560 end = page_end; 2594 end = page_end;
2561 while (1) { 2595 while (1) {
2562 lock_extent(tree, start, end, GFP_NOFS); 2596 lock_extent(tree, start, end);
2563 ordered = btrfs_lookup_ordered_extent(inode, start); 2597 ordered = btrfs_lookup_ordered_extent(inode, start);
2564 if (!ordered) 2598 if (!ordered)
2565 break; 2599 break;
2566 unlock_extent(tree, start, end, GFP_NOFS); 2600 unlock_extent(tree, start, end);
2567 btrfs_start_ordered_extent(inode, ordered, 1); 2601 btrfs_start_ordered_extent(inode, ordered, 1);
2568 btrfs_put_ordered_extent(ordered); 2602 btrfs_put_ordered_extent(ordered);
2569 } 2603 }
@@ -2600,7 +2634,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2600 end - cur + 1, 0); 2634 end - cur + 1, 0);
2601 if (IS_ERR_OR_NULL(em)) { 2635 if (IS_ERR_OR_NULL(em)) {
2602 SetPageError(page); 2636 SetPageError(page);
2603 unlock_extent(tree, cur, end, GFP_NOFS); 2637 unlock_extent(tree, cur, end);
2604 break; 2638 break;
2605 } 2639 }
2606 extent_offset = cur - em->start; 2640 extent_offset = cur - em->start;
@@ -2652,7 +2686,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2652 if (test_range_bit(tree, cur, cur_end, 2686 if (test_range_bit(tree, cur, cur_end,
2653 EXTENT_UPTODATE, 1, NULL)) { 2687 EXTENT_UPTODATE, 1, NULL)) {
2654 check_page_uptodate(tree, page); 2688 check_page_uptodate(tree, page);
2655 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2689 unlock_extent(tree, cur, cur + iosize - 1);
2656 cur = cur + iosize; 2690 cur = cur + iosize;
2657 pg_offset += iosize; 2691 pg_offset += iosize;
2658 continue; 2692 continue;
@@ -2662,7 +2696,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2662 */ 2696 */
2663 if (block_start == EXTENT_MAP_INLINE) { 2697 if (block_start == EXTENT_MAP_INLINE) {
2664 SetPageError(page); 2698 SetPageError(page);
2665 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2699 unlock_extent(tree, cur, cur + iosize - 1);
2666 cur = cur + iosize; 2700 cur = cur + iosize;
2667 pg_offset += iosize; 2701 pg_offset += iosize;
2668 continue; 2702 continue;
@@ -2682,6 +2716,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2682 end_bio_extent_readpage, mirror_num, 2716 end_bio_extent_readpage, mirror_num,
2683 *bio_flags, 2717 *bio_flags,
2684 this_bio_flag); 2718 this_bio_flag);
2719 BUG_ON(ret == -ENOMEM);
2685 nr++; 2720 nr++;
2686 *bio_flags = this_bio_flag; 2721 *bio_flags = this_bio_flag;
2687 } 2722 }
@@ -2823,7 +2858,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2823 delalloc_end, 2858 delalloc_end,
2824 &page_started, 2859 &page_started,
2825 &nr_written); 2860 &nr_written);
2826 BUG_ON(ret); 2861 /* File system has been set read-only */
2862 if (ret) {
2863 SetPageError(page);
2864 goto done;
2865 }
2827 /* 2866 /*
2828 * delalloc_end is already one less than the total 2867 * delalloc_end is already one less than the total
2829 * length, so we don't subtract one from 2868 * length, so we don't subtract one from
@@ -3396,10 +3435,14 @@ retry:
3396static void flush_epd_write_bio(struct extent_page_data *epd) 3435static void flush_epd_write_bio(struct extent_page_data *epd)
3397{ 3436{
3398 if (epd->bio) { 3437 if (epd->bio) {
3438 int rw = WRITE;
3439 int ret;
3440
3399 if (epd->sync_io) 3441 if (epd->sync_io)
3400 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); 3442 rw = WRITE_SYNC;
3401 else 3443
3402 submit_one_bio(WRITE, epd->bio, 0, 0); 3444 ret = submit_one_bio(rw, epd->bio, 0, 0);
3445 BUG_ON(ret < 0); /* -ENOMEM */
3403 epd->bio = NULL; 3446 epd->bio = NULL;
3404 } 3447 }
3405} 3448}
@@ -3516,7 +3559,7 @@ int extent_readpages(struct extent_io_tree *tree,
3516 } 3559 }
3517 BUG_ON(!list_empty(pages)); 3560 BUG_ON(!list_empty(pages));
3518 if (bio) 3561 if (bio)
3519 submit_one_bio(READ, bio, 0, bio_flags); 3562 return submit_one_bio(READ, bio, 0, bio_flags);
3520 return 0; 3563 return 0;
3521} 3564}
3522 3565
@@ -3537,7 +3580,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
3537 if (start > end) 3580 if (start > end)
3538 return 0; 3581 return 0;
3539 3582
3540 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); 3583 lock_extent_bits(tree, start, end, 0, &cached_state);
3541 wait_on_page_writeback(page); 3584 wait_on_page_writeback(page);
3542 clear_extent_bit(tree, start, end, 3585 clear_extent_bit(tree, start, end,
3543 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 3586 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -3751,7 +3794,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3751 } 3794 }
3752 3795
3753 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3796 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3754 &cached_state, GFP_NOFS); 3797 &cached_state);
3755 3798
3756 em = get_extent_skip_holes(inode, start, last_for_get_extent, 3799 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3757 get_extent); 3800 get_extent);
@@ -4239,14 +4282,13 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
4239 release_extent_buffer(eb, GFP_NOFS); 4282 release_extent_buffer(eb, GFP_NOFS);
4240} 4283}
4241 4284
4242int clear_extent_buffer_dirty(struct extent_buffer *eb) 4285void clear_extent_buffer_dirty(struct extent_buffer *eb)
4243{ 4286{
4244 unsigned long i; 4287 unsigned long i;
4245 unsigned long num_pages; 4288 unsigned long num_pages;
4246 struct page *page; 4289 struct page *page;
4247 4290
4248 num_pages = num_extent_pages(eb->start, eb->len); 4291 num_pages = num_extent_pages(eb->start, eb->len);
4249 WARN_ON(atomic_read(&eb->refs) == 0);
4250 4292
4251 for (i = 0; i < num_pages; i++) { 4293 for (i = 0; i < num_pages; i++) {
4252 page = extent_buffer_page(eb, i); 4294 page = extent_buffer_page(eb, i);
@@ -4268,7 +4310,6 @@ int clear_extent_buffer_dirty(struct extent_buffer *eb)
4268 unlock_page(page); 4310 unlock_page(page);
4269 } 4311 }
4270 WARN_ON(atomic_read(&eb->refs) == 0); 4312 WARN_ON(atomic_read(&eb->refs) == 0);
4271 return 0;
4272} 4313}
4273 4314
4274int set_extent_buffer_dirty(struct extent_buffer *eb) 4315int set_extent_buffer_dirty(struct extent_buffer *eb)
@@ -4433,8 +4474,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4433 } 4474 }
4434 } 4475 }
4435 4476
4436 if (bio) 4477 if (bio) {
4437 submit_one_bio(READ, bio, mirror_num, bio_flags); 4478 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4479 if (err)
4480 return err;
4481 }
4438 4482
4439 if (ret || wait != WAIT_COMPLETE) 4483 if (ret || wait != WAIT_COMPLETE)
4440 return ret; 4484 return ret;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 38c1af7092f3..faf10eb57f75 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -194,14 +194,13 @@ int try_release_extent_buffer(struct page *page, gfp_t mask);
194int try_release_extent_state(struct extent_map_tree *map, 194int try_release_extent_state(struct extent_map_tree *map,
195 struct extent_io_tree *tree, struct page *page, 195 struct extent_io_tree *tree, struct page *page,
196 gfp_t mask); 196 gfp_t mask);
197int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 197int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
198int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 198int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
199 int bits, struct extent_state **cached, gfp_t mask); 199 int bits, struct extent_state **cached);
200int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 200int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
201int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, 201int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
202 struct extent_state **cached, gfp_t mask); 202 struct extent_state **cached, gfp_t mask);
203int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 203int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
204 gfp_t mask);
205int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 204int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
206 get_extent_t *get_extent, int mirror_num); 205 get_extent_t *get_extent, int mirror_num);
207int __init extent_io_init(void); 206int __init extent_io_init(void);
@@ -222,7 +221,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
222int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 221int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
223 int bits, gfp_t mask); 222 int bits, gfp_t mask);
224int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 223int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
225 int bits, int exclusive_bits, u64 *failed_start, 224 int bits, u64 *failed_start,
226 struct extent_state **cached_state, gfp_t mask); 225 struct extent_state **cached_state, gfp_t mask);
227int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 226int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
228 struct extent_state **cached_state, gfp_t mask); 227 struct extent_state **cached_state, gfp_t mask);
@@ -301,8 +300,8 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
301 unsigned long src_offset, unsigned long len); 300 unsigned long src_offset, unsigned long len);
302void memset_extent_buffer(struct extent_buffer *eb, char c, 301void memset_extent_buffer(struct extent_buffer *eb, char c,
303 unsigned long start, unsigned long len); 302 unsigned long start, unsigned long len);
304int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); 303void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
305int clear_extent_buffer_dirty(struct extent_buffer *eb); 304void clear_extent_buffer_dirty(struct extent_buffer *eb);
306int set_extent_buffer_dirty(struct extent_buffer *eb); 305int set_extent_buffer_dirty(struct extent_buffer *eb);
307int set_extent_buffer_uptodate(struct extent_buffer *eb); 306int set_extent_buffer_uptodate(struct extent_buffer *eb);
308int clear_extent_buffer_uptodate(struct extent_buffer *eb); 307int clear_extent_buffer_uptodate(struct extent_buffer *eb);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index c7fb3a4247d3..a14dbca5974e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -59,7 +59,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
59 sizeof(*item)); 59 sizeof(*item));
60 if (ret < 0) 60 if (ret < 0)
61 goto out; 61 goto out;
62 BUG_ON(ret); 62 BUG_ON(ret); /* Can't happen */
63 leaf = path->nodes[0]; 63 leaf = path->nodes[0];
64 item = btrfs_item_ptr(leaf, path->slots[0], 64 item = btrfs_item_ptr(leaf, path->slots[0],
65 struct btrfs_file_extent_item); 65 struct btrfs_file_extent_item);
@@ -284,6 +284,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
284 struct btrfs_ordered_sum *sums; 284 struct btrfs_ordered_sum *sums;
285 struct btrfs_sector_sum *sector_sum; 285 struct btrfs_sector_sum *sector_sum;
286 struct btrfs_csum_item *item; 286 struct btrfs_csum_item *item;
287 LIST_HEAD(tmplist);
287 unsigned long offset; 288 unsigned long offset;
288 int ret; 289 int ret;
289 size_t size; 290 size_t size;
@@ -358,7 +359,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
358 MAX_ORDERED_SUM_BYTES(root)); 359 MAX_ORDERED_SUM_BYTES(root));
359 sums = kzalloc(btrfs_ordered_sum_size(root, size), 360 sums = kzalloc(btrfs_ordered_sum_size(root, size),
360 GFP_NOFS); 361 GFP_NOFS);
361 BUG_ON(!sums); 362 if (!sums) {
363 ret = -ENOMEM;
364 goto fail;
365 }
362 366
363 sector_sum = sums->sums; 367 sector_sum = sums->sums;
364 sums->bytenr = start; 368 sums->bytenr = start;
@@ -380,12 +384,19 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
380 offset += csum_size; 384 offset += csum_size;
381 sector_sum++; 385 sector_sum++;
382 } 386 }
383 list_add_tail(&sums->list, list); 387 list_add_tail(&sums->list, &tmplist);
384 } 388 }
385 path->slots[0]++; 389 path->slots[0]++;
386 } 390 }
387 ret = 0; 391 ret = 0;
388fail: 392fail:
393 while (ret < 0 && !list_empty(&tmplist)) {
394 sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
395 list_del(&sums->list);
396 kfree(sums);
397 }
398 list_splice_tail(&tmplist, list);
399
389 btrfs_free_path(path); 400 btrfs_free_path(path);
390 return ret; 401 return ret;
391} 402}
@@ -420,7 +431,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
420 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 431 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
421 432
422 ordered = btrfs_lookup_ordered_extent(inode, offset); 433 ordered = btrfs_lookup_ordered_extent(inode, offset);
423 BUG_ON(!ordered); 434 BUG_ON(!ordered); /* Logic error */
424 sums->bytenr = ordered->start; 435 sums->bytenr = ordered->start;
425 436
426 while (bio_index < bio->bi_vcnt) { 437 while (bio_index < bio->bi_vcnt) {
@@ -439,11 +450,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
439 450
440 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 451 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
441 GFP_NOFS); 452 GFP_NOFS);
442 BUG_ON(!sums); 453 BUG_ON(!sums); /* -ENOMEM */
443 sector_sum = sums->sums; 454 sector_sum = sums->sums;
444 sums->len = bytes_left; 455 sums->len = bytes_left;
445 ordered = btrfs_lookup_ordered_extent(inode, offset); 456 ordered = btrfs_lookup_ordered_extent(inode, offset);
446 BUG_ON(!ordered); 457 BUG_ON(!ordered); /* Logic error */
447 sums->bytenr = ordered->start; 458 sums->bytenr = ordered->start;
448 } 459 }
449 460
@@ -483,18 +494,17 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
483 * This calls btrfs_truncate_item with the correct args based on the 494 * This calls btrfs_truncate_item with the correct args based on the
484 * overlap, and fixes up the key as required. 495 * overlap, and fixes up the key as required.
485 */ 496 */
486static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, 497static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
487 struct btrfs_root *root, 498 struct btrfs_root *root,
488 struct btrfs_path *path, 499 struct btrfs_path *path,
489 struct btrfs_key *key, 500 struct btrfs_key *key,
490 u64 bytenr, u64 len) 501 u64 bytenr, u64 len)
491{ 502{
492 struct extent_buffer *leaf; 503 struct extent_buffer *leaf;
493 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 504 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
494 u64 csum_end; 505 u64 csum_end;
495 u64 end_byte = bytenr + len; 506 u64 end_byte = bytenr + len;
496 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; 507 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
497 int ret;
498 508
499 leaf = path->nodes[0]; 509 leaf = path->nodes[0];
500 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 510 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
@@ -510,7 +520,7 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
510 */ 520 */
511 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 521 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
512 new_size *= csum_size; 522 new_size *= csum_size;
513 ret = btrfs_truncate_item(trans, root, path, new_size, 1); 523 btrfs_truncate_item(trans, root, path, new_size, 1);
514 } else if (key->offset >= bytenr && csum_end > end_byte && 524 } else if (key->offset >= bytenr && csum_end > end_byte &&
515 end_byte > key->offset) { 525 end_byte > key->offset) {
516 /* 526 /*
@@ -522,15 +532,13 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
522 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 532 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
523 new_size *= csum_size; 533 new_size *= csum_size;
524 534
525 ret = btrfs_truncate_item(trans, root, path, new_size, 0); 535 btrfs_truncate_item(trans, root, path, new_size, 0);
526 536
527 key->offset = end_byte; 537 key->offset = end_byte;
528 ret = btrfs_set_item_key_safe(trans, root, path, key); 538 btrfs_set_item_key_safe(trans, root, path, key);
529 BUG_ON(ret);
530 } else { 539 } else {
531 BUG(); 540 BUG();
532 } 541 }
533 return 0;
534} 542}
535 543
536/* 544/*
@@ -635,13 +643,14 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
635 * item changed size or key 643 * item changed size or key
636 */ 644 */
637 ret = btrfs_split_item(trans, root, path, &key, offset); 645 ret = btrfs_split_item(trans, root, path, &key, offset);
638 BUG_ON(ret && ret != -EAGAIN); 646 if (ret && ret != -EAGAIN) {
647 btrfs_abort_transaction(trans, root, ret);
648 goto out;
649 }
639 650
640 key.offset = end_byte - 1; 651 key.offset = end_byte - 1;
641 } else { 652 } else {
642 ret = truncate_one_csum(trans, root, path, 653 truncate_one_csum(trans, root, path, &key, bytenr, len);
643 &key, bytenr, len);
644 BUG_ON(ret);
645 if (key.offset < bytenr) 654 if (key.offset < bytenr)
646 break; 655 break;
647 } 656 }
@@ -772,7 +781,7 @@ again:
772 if (diff != csum_size) 781 if (diff != csum_size)
773 goto insert; 782 goto insert;
774 783
775 ret = btrfs_extend_item(trans, root, path, diff); 784 btrfs_extend_item(trans, root, path, diff);
776 goto csum; 785 goto csum;
777 } 786 }
778 787
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e8d06b6b9194..d83260d7498f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -452,7 +452,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
452 split = alloc_extent_map(); 452 split = alloc_extent_map();
453 if (!split2) 453 if (!split2)
454 split2 = alloc_extent_map(); 454 split2 = alloc_extent_map();
455 BUG_ON(!split || !split2); 455 BUG_ON(!split || !split2); /* -ENOMEM */
456 456
457 write_lock(&em_tree->lock); 457 write_lock(&em_tree->lock);
458 em = lookup_extent_mapping(em_tree, start, len); 458 em = lookup_extent_mapping(em_tree, start, len);
@@ -494,7 +494,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
494 split->flags = flags; 494 split->flags = flags;
495 split->compress_type = em->compress_type; 495 split->compress_type = em->compress_type;
496 ret = add_extent_mapping(em_tree, split); 496 ret = add_extent_mapping(em_tree, split);
497 BUG_ON(ret); 497 BUG_ON(ret); /* Logic error */
498 free_extent_map(split); 498 free_extent_map(split);
499 split = split2; 499 split = split2;
500 split2 = NULL; 500 split2 = NULL;
@@ -520,7 +520,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
520 } 520 }
521 521
522 ret = add_extent_mapping(em_tree, split); 522 ret = add_extent_mapping(em_tree, split);
523 BUG_ON(ret); 523 BUG_ON(ret); /* Logic error */
524 free_extent_map(split); 524 free_extent_map(split);
525 split = NULL; 525 split = NULL;
526 } 526 }
@@ -679,7 +679,7 @@ next_slot:
679 root->root_key.objectid, 679 root->root_key.objectid,
680 new_key.objectid, 680 new_key.objectid,
681 start - extent_offset, 0); 681 start - extent_offset, 0);
682 BUG_ON(ret); 682 BUG_ON(ret); /* -ENOMEM */
683 *hint_byte = disk_bytenr; 683 *hint_byte = disk_bytenr;
684 } 684 }
685 key.offset = start; 685 key.offset = start;
@@ -754,7 +754,7 @@ next_slot:
754 root->root_key.objectid, 754 root->root_key.objectid,
755 key.objectid, key.offset - 755 key.objectid, key.offset -
756 extent_offset, 0); 756 extent_offset, 0);
757 BUG_ON(ret); 757 BUG_ON(ret); /* -ENOMEM */
758 inode_sub_bytes(inode, 758 inode_sub_bytes(inode,
759 extent_end - key.offset); 759 extent_end - key.offset);
760 *hint_byte = disk_bytenr; 760 *hint_byte = disk_bytenr;
@@ -770,7 +770,10 @@ next_slot:
770 770
771 ret = btrfs_del_items(trans, root, path, del_slot, 771 ret = btrfs_del_items(trans, root, path, del_slot,
772 del_nr); 772 del_nr);
773 BUG_ON(ret); 773 if (ret) {
774 btrfs_abort_transaction(trans, root, ret);
775 goto out;
776 }
774 777
775 del_nr = 0; 778 del_nr = 0;
776 del_slot = 0; 779 del_slot = 0;
@@ -782,11 +785,13 @@ next_slot:
782 BUG_ON(1); 785 BUG_ON(1);
783 } 786 }
784 787
785 if (del_nr > 0) { 788 if (!ret && del_nr > 0) {
786 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 789 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
787 BUG_ON(ret); 790 if (ret)
791 btrfs_abort_transaction(trans, root, ret);
788 } 792 }
789 793
794out:
790 btrfs_free_path(path); 795 btrfs_free_path(path);
791 return ret; 796 return ret;
792} 797}
@@ -944,7 +949,10 @@ again:
944 btrfs_release_path(path); 949 btrfs_release_path(path);
945 goto again; 950 goto again;
946 } 951 }
947 BUG_ON(ret < 0); 952 if (ret < 0) {
953 btrfs_abort_transaction(trans, root, ret);
954 goto out;
955 }
948 956
949 leaf = path->nodes[0]; 957 leaf = path->nodes[0];
950 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 958 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
@@ -963,7 +971,7 @@ again:
963 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 971 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
964 root->root_key.objectid, 972 root->root_key.objectid,
965 ino, orig_offset, 0); 973 ino, orig_offset, 0);
966 BUG_ON(ret); 974 BUG_ON(ret); /* -ENOMEM */
967 975
968 if (split == start) { 976 if (split == start) {
969 key.offset = start; 977 key.offset = start;
@@ -990,7 +998,7 @@ again:
990 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 998 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
991 0, root->root_key.objectid, 999 0, root->root_key.objectid,
992 ino, orig_offset, 0); 1000 ino, orig_offset, 0);
993 BUG_ON(ret); 1001 BUG_ON(ret); /* -ENOMEM */
994 } 1002 }
995 other_start = 0; 1003 other_start = 0;
996 other_end = start; 1004 other_end = start;
@@ -1007,7 +1015,7 @@ again:
1007 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1015 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1008 0, root->root_key.objectid, 1016 0, root->root_key.objectid,
1009 ino, orig_offset, 0); 1017 ino, orig_offset, 0);
1010 BUG_ON(ret); 1018 BUG_ON(ret); /* -ENOMEM */
1011 } 1019 }
1012 if (del_nr == 0) { 1020 if (del_nr == 0) {
1013 fi = btrfs_item_ptr(leaf, path->slots[0], 1021 fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -1025,7 +1033,10 @@ again:
1025 btrfs_mark_buffer_dirty(leaf); 1033 btrfs_mark_buffer_dirty(leaf);
1026 1034
1027 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1035 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1028 BUG_ON(ret); 1036 if (ret < 0) {
1037 btrfs_abort_transaction(trans, root, ret);
1038 goto out;
1039 }
1029 } 1040 }
1030out: 1041out:
1031 btrfs_free_path(path); 1042 btrfs_free_path(path);
@@ -1105,8 +1116,7 @@ again:
1105 if (start_pos < inode->i_size) { 1116 if (start_pos < inode->i_size) {
1106 struct btrfs_ordered_extent *ordered; 1117 struct btrfs_ordered_extent *ordered;
1107 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1118 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1108 start_pos, last_pos - 1, 0, &cached_state, 1119 start_pos, last_pos - 1, 0, &cached_state);
1109 GFP_NOFS);
1110 ordered = btrfs_lookup_first_ordered_extent(inode, 1120 ordered = btrfs_lookup_first_ordered_extent(inode,
1111 last_pos - 1); 1121 last_pos - 1);
1112 if (ordered && 1122 if (ordered &&
@@ -1638,7 +1648,7 @@ static long btrfs_fallocate(struct file *file, int mode,
1638 * transaction 1648 * transaction
1639 */ 1649 */
1640 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 1650 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1641 locked_end, 0, &cached_state, GFP_NOFS); 1651 locked_end, 0, &cached_state);
1642 ordered = btrfs_lookup_first_ordered_extent(inode, 1652 ordered = btrfs_lookup_first_ordered_extent(inode,
1643 alloc_end - 1); 1653 alloc_end - 1);
1644 if (ordered && 1654 if (ordered &&
@@ -1667,7 +1677,13 @@ static long btrfs_fallocate(struct file *file, int mode,
1667 1677
1668 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1678 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1669 alloc_end - cur_offset, 0); 1679 alloc_end - cur_offset, 0);
1670 BUG_ON(IS_ERR_OR_NULL(em)); 1680 if (IS_ERR_OR_NULL(em)) {
1681 if (!em)
1682 ret = -ENOMEM;
1683 else
1684 ret = PTR_ERR(em);
1685 break;
1686 }
1671 last_byte = min(extent_map_end(em), alloc_end); 1687 last_byte = min(extent_map_end(em), alloc_end);
1672 actual_end = min_t(u64, extent_map_end(em), offset + len); 1688 actual_end = min_t(u64, extent_map_end(em), offset + len);
1673 last_byte = (last_byte + mask) & ~mask; 1689 last_byte = (last_byte + mask) & ~mask;
@@ -1737,7 +1753,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1737 return -ENXIO; 1753 return -ENXIO;
1738 1754
1739 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 1755 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
1740 &cached_state, GFP_NOFS); 1756 &cached_state);
1741 1757
1742 /* 1758 /*
1743 * Delalloc is such a pain. If we have a hole and we have pending 1759 * Delalloc is such a pain. If we have a hole and we have pending
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 710ea380c7ed..054707ed5791 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -230,11 +230,13 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
230 230
231 if (ret) { 231 if (ret) {
232 trans->block_rsv = rsv; 232 trans->block_rsv = rsv;
233 WARN_ON(1); 233 btrfs_abort_transaction(trans, root, ret);
234 return ret; 234 return ret;
235 } 235 }
236 236
237 ret = btrfs_update_inode(trans, root, inode); 237 ret = btrfs_update_inode(trans, root, inode);
238 if (ret)
239 btrfs_abort_transaction(trans, root, ret);
238 trans->block_rsv = rsv; 240 trans->block_rsv = rsv;
239 241
240 return ret; 242 return ret;
@@ -869,7 +871,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
869 io_ctl_prepare_pages(&io_ctl, inode, 0); 871 io_ctl_prepare_pages(&io_ctl, inode, 0);
870 872
871 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 873 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
872 0, &cached_state, GFP_NOFS); 874 0, &cached_state);
873 875
874 node = rb_first(&ctl->free_space_offset); 876 node = rb_first(&ctl->free_space_offset);
875 if (!node && cluster) { 877 if (!node && cluster) {
@@ -1948,14 +1950,14 @@ again:
1948 */ 1950 */
1949 ret = btrfs_add_free_space(block_group, old_start, 1951 ret = btrfs_add_free_space(block_group, old_start,
1950 offset - old_start); 1952 offset - old_start);
1951 WARN_ON(ret); 1953 WARN_ON(ret); /* -ENOMEM */
1952 goto out; 1954 goto out;
1953 } 1955 }
1954 1956
1955 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1957 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1956 if (ret == -EAGAIN) 1958 if (ret == -EAGAIN)
1957 goto again; 1959 goto again;
1958 BUG_ON(ret); 1960 BUG_ON(ret); /* logic error */
1959out_lock: 1961out_lock:
1960 spin_unlock(&ctl->tree_lock); 1962 spin_unlock(&ctl->tree_lock);
1961out: 1963out:
@@ -2346,7 +2348,7 @@ again:
2346 rb_erase(&entry->offset_index, &ctl->free_space_offset); 2348 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2347 ret = tree_insert_offset(&cluster->root, entry->offset, 2349 ret = tree_insert_offset(&cluster->root, entry->offset,
2348 &entry->offset_index, 1); 2350 &entry->offset_index, 1);
2349 BUG_ON(ret); 2351 BUG_ON(ret); /* -EEXIST; Logic error */
2350 2352
2351 trace_btrfs_setup_cluster(block_group, cluster, 2353 trace_btrfs_setup_cluster(block_group, cluster,
2352 total_found * block_group->sectorsize, 1); 2354 total_found * block_group->sectorsize, 1);
@@ -2439,7 +2441,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2439 ret = tree_insert_offset(&cluster->root, entry->offset, 2441 ret = tree_insert_offset(&cluster->root, entry->offset,
2440 &entry->offset_index, 0); 2442 &entry->offset_index, 0);
2441 total_size += entry->bytes; 2443 total_size += entry->bytes;
2442 BUG_ON(ret); 2444 BUG_ON(ret); /* -EEXIST; Logic error */
2443 } while (node && entry != last); 2445 } while (node && entry != last);
2444 2446
2445 cluster->max_size = max_extent; 2447 cluster->max_size = max_extent;
@@ -2830,6 +2832,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2830 int ret; 2832 int ret;
2831 2833
2832 ret = search_bitmap(ctl, entry, &offset, &count); 2834 ret = search_bitmap(ctl, entry, &offset, &count);
2835 /* Logic error; Should be empty if it can't find anything */
2833 BUG_ON(ret); 2836 BUG_ON(ret);
2834 2837
2835 ino = offset; 2838 ino = offset;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 6ea71c60e80a..a13cf1a96c73 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -129,13 +129,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
129 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); 129 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
130 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, 130 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
131 item_size - (ptr + sub_item_len - item_start)); 131 item_size - (ptr + sub_item_len - item_start));
132 ret = btrfs_truncate_item(trans, root, path, 132 btrfs_truncate_item(trans, root, path,
133 item_size - sub_item_len, 1); 133 item_size - sub_item_len, 1);
134out: 134out:
135 btrfs_free_path(path); 135 btrfs_free_path(path);
136 return ret; 136 return ret;
137} 137}
138 138
139/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
139int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 140int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root, 141 struct btrfs_root *root,
141 const char *name, int name_len, 142 const char *name, int name_len,
@@ -166,7 +167,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
166 goto out; 167 goto out;
167 168
168 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 169 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
169 ret = btrfs_extend_item(trans, root, path, ins_len); 170 btrfs_extend_item(trans, root, path, ins_len);
170 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 171 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
171 struct btrfs_inode_ref); 172 struct btrfs_inode_ref);
172 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); 173 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index ee15d88b33d2..7ca46e6e11ae 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -178,7 +178,7 @@ static void start_caching(struct btrfs_root *root)
178 178
179 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", 179 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
180 root->root_key.objectid); 180 root->root_key.objectid);
181 BUG_ON(IS_ERR(tsk)); 181 BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
182} 182}
183 183
184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -271,7 +271,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
271 break; 271 break;
272 272
273 info = rb_entry(n, struct btrfs_free_space, offset_index); 273 info = rb_entry(n, struct btrfs_free_space, offset_index);
274 BUG_ON(info->bitmap); 274 BUG_ON(info->bitmap); /* Logic error */
275 275
276 if (info->offset > root->cache_progress) 276 if (info->offset > root->cache_progress)
277 goto free; 277 goto free;
@@ -443,13 +443,13 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
443 trans->bytes_reserved, 1); 443 trans->bytes_reserved, 1);
444again: 444again:
445 inode = lookup_free_ino_inode(root, path); 445 inode = lookup_free_ino_inode(root, path);
446 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 446 if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
447 ret = PTR_ERR(inode); 447 ret = PTR_ERR(inode);
448 goto out_release; 448 goto out_release;
449 } 449 }
450 450
451 if (IS_ERR(inode)) { 451 if (IS_ERR(inode)) {
452 BUG_ON(retry); 452 BUG_ON(retry); /* Logic error */
453 retry = true; 453 retry = true;
454 454
455 ret = create_free_ino_inode(root, trans, path); 455 ret = create_free_ino_inode(root, trans, path);
@@ -460,12 +460,17 @@ again:
460 460
461 BTRFS_I(inode)->generation = 0; 461 BTRFS_I(inode)->generation = 0;
462 ret = btrfs_update_inode(trans, root, inode); 462 ret = btrfs_update_inode(trans, root, inode);
463 WARN_ON(ret); 463 if (ret) {
464 btrfs_abort_transaction(trans, root, ret);
465 goto out_put;
466 }
464 467
465 if (i_size_read(inode) > 0) { 468 if (i_size_read(inode) > 0) {
466 ret = btrfs_truncate_free_space_cache(root, trans, path, inode); 469 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
467 if (ret) 470 if (ret) {
471 btrfs_abort_transaction(trans, root, ret);
468 goto out_put; 472 goto out_put;
473 }
469 } 474 }
470 475
471 spin_lock(&root->cache_lock); 476 spin_lock(&root->cache_lock);
@@ -532,7 +537,7 @@ static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
532 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 537 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
533 if (ret < 0) 538 if (ret < 0)
534 goto error; 539 goto error;
535 BUG_ON(ret == 0); 540 BUG_ON(ret == 0); /* Corruption */
536 if (path->slots[0] > 0) { 541 if (path->slots[0] > 0) {
537 slot = path->slots[0] - 1; 542 slot = path->slots[0] - 1;
538 l = path->nodes[0]; 543 l = path->nodes[0];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 341a8670165f..eb6aec7bbacb 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -150,7 +150,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
150 inode_add_bytes(inode, size); 150 inode_add_bytes(inode, size);
151 ret = btrfs_insert_empty_item(trans, root, path, &key, 151 ret = btrfs_insert_empty_item(trans, root, path, &key,
152 datasize); 152 datasize);
153 BUG_ON(ret);
154 if (ret) { 153 if (ret) {
155 err = ret; 154 err = ret;
156 goto fail; 155 goto fail;
@@ -206,9 +205,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
206 * could end up racing with unlink. 205 * could end up racing with unlink.
207 */ 206 */
208 BTRFS_I(inode)->disk_i_size = inode->i_size; 207 BTRFS_I(inode)->disk_i_size = inode->i_size;
209 btrfs_update_inode(trans, root, inode); 208 ret = btrfs_update_inode(trans, root, inode);
210 209
211 return 0; 210 return ret;
212fail: 211fail:
213 btrfs_free_path(path); 212 btrfs_free_path(path);
214 return err; 213 return err;
@@ -250,14 +249,18 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
250 249
251 ret = btrfs_drop_extents(trans, inode, start, aligned_end, 250 ret = btrfs_drop_extents(trans, inode, start, aligned_end,
252 &hint_byte, 1); 251 &hint_byte, 1);
253 BUG_ON(ret); 252 if (ret)
253 return ret;
254 254
255 if (isize > actual_end) 255 if (isize > actual_end)
256 inline_len = min_t(u64, isize, actual_end); 256 inline_len = min_t(u64, isize, actual_end);
257 ret = insert_inline_extent(trans, root, inode, start, 257 ret = insert_inline_extent(trans, root, inode, start,
258 inline_len, compressed_size, 258 inline_len, compressed_size,
259 compress_type, compressed_pages); 259 compress_type, compressed_pages);
260 BUG_ON(ret); 260 if (ret) {
261 btrfs_abort_transaction(trans, root, ret);
262 return ret;
263 }
261 btrfs_delalloc_release_metadata(inode, end + 1 - start); 264 btrfs_delalloc_release_metadata(inode, end + 1 - start);
262 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 265 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
263 return 0; 266 return 0;
@@ -293,7 +296,7 @@ static noinline int add_async_extent(struct async_cow *cow,
293 struct async_extent *async_extent; 296 struct async_extent *async_extent;
294 297
295 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 298 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
296 BUG_ON(!async_extent); 299 BUG_ON(!async_extent); /* -ENOMEM */
297 async_extent->start = start; 300 async_extent->start = start;
298 async_extent->ram_size = ram_size; 301 async_extent->ram_size = ram_size;
299 async_extent->compressed_size = compressed_size; 302 async_extent->compressed_size = compressed_size;
@@ -433,7 +436,11 @@ again:
433cont: 436cont:
434 if (start == 0) { 437 if (start == 0) {
435 trans = btrfs_join_transaction(root); 438 trans = btrfs_join_transaction(root);
436 BUG_ON(IS_ERR(trans)); 439 if (IS_ERR(trans)) {
440 ret = PTR_ERR(trans);
441 trans = NULL;
442 goto cleanup_and_out;
443 }
437 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 444 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
438 445
439 /* lets try to make an inline extent */ 446 /* lets try to make an inline extent */
@@ -450,11 +457,11 @@ cont:
450 total_compressed, 457 total_compressed,
451 compress_type, pages); 458 compress_type, pages);
452 } 459 }
453 if (ret == 0) { 460 if (ret <= 0) {
454 /* 461 /*
455 * inline extent creation worked, we don't need 462 * inline extent creation worked or returned error,
456 * to create any more async work items. Unlock 463 * we don't need to create any more async work items.
457 * and free up our temp pages. 464 * Unlock and free up our temp pages.
458 */ 465 */
459 extent_clear_unlock_delalloc(inode, 466 extent_clear_unlock_delalloc(inode,
460 &BTRFS_I(inode)->io_tree, 467 &BTRFS_I(inode)->io_tree,
@@ -547,7 +554,7 @@ cleanup_and_bail_uncompressed:
547 } 554 }
548 555
549out: 556out:
550 return 0; 557 return ret;
551 558
552free_pages_out: 559free_pages_out:
553 for (i = 0; i < nr_pages_ret; i++) { 560 for (i = 0; i < nr_pages_ret; i++) {
@@ -557,6 +564,20 @@ free_pages_out:
557 kfree(pages); 564 kfree(pages);
558 565
559 goto out; 566 goto out;
567
568cleanup_and_out:
569 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
570 start, end, NULL,
571 EXTENT_CLEAR_UNLOCK_PAGE |
572 EXTENT_CLEAR_DIRTY |
573 EXTENT_CLEAR_DELALLOC |
574 EXTENT_SET_WRITEBACK |
575 EXTENT_END_WRITEBACK);
576 if (!trans || IS_ERR(trans))
577 btrfs_error(root->fs_info, ret, "Failed to join transaction");
578 else
579 btrfs_abort_transaction(trans, root, ret);
580 goto free_pages_out;
560} 581}
561 582
562/* 583/*
@@ -597,7 +618,7 @@ retry:
597 618
598 lock_extent(io_tree, async_extent->start, 619 lock_extent(io_tree, async_extent->start,
599 async_extent->start + 620 async_extent->start +
600 async_extent->ram_size - 1, GFP_NOFS); 621 async_extent->ram_size - 1);
601 622
602 /* allocate blocks */ 623 /* allocate blocks */
603 ret = cow_file_range(inode, async_cow->locked_page, 624 ret = cow_file_range(inode, async_cow->locked_page,
@@ -606,6 +627,8 @@ retry:
606 async_extent->ram_size - 1, 627 async_extent->ram_size - 1,
607 &page_started, &nr_written, 0); 628 &page_started, &nr_written, 0);
608 629
630 /* JDM XXX */
631
609 /* 632 /*
610 * if page_started, cow_file_range inserted an 633 * if page_started, cow_file_range inserted an
611 * inline extent and took care of all the unlocking 634 * inline extent and took care of all the unlocking
@@ -625,17 +648,21 @@ retry:
625 } 648 }
626 649
627 lock_extent(io_tree, async_extent->start, 650 lock_extent(io_tree, async_extent->start,
628 async_extent->start + async_extent->ram_size - 1, 651 async_extent->start + async_extent->ram_size - 1);
629 GFP_NOFS);
630 652
631 trans = btrfs_join_transaction(root); 653 trans = btrfs_join_transaction(root);
632 BUG_ON(IS_ERR(trans)); 654 if (IS_ERR(trans)) {
633 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 655 ret = PTR_ERR(trans);
634 ret = btrfs_reserve_extent(trans, root, 656 } else {
657 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
658 ret = btrfs_reserve_extent(trans, root,
635 async_extent->compressed_size, 659 async_extent->compressed_size,
636 async_extent->compressed_size, 660 async_extent->compressed_size,
637 0, alloc_hint, &ins, 1); 661 0, alloc_hint, &ins, 1);
638 btrfs_end_transaction(trans, root); 662 if (ret)
663 btrfs_abort_transaction(trans, root, ret);
664 btrfs_end_transaction(trans, root);
665 }
639 666
640 if (ret) { 667 if (ret) {
641 int i; 668 int i;
@@ -648,8 +675,10 @@ retry:
648 async_extent->pages = NULL; 675 async_extent->pages = NULL;
649 unlock_extent(io_tree, async_extent->start, 676 unlock_extent(io_tree, async_extent->start,
650 async_extent->start + 677 async_extent->start +
651 async_extent->ram_size - 1, GFP_NOFS); 678 async_extent->ram_size - 1);
652 goto retry; 679 if (ret == -ENOSPC)
680 goto retry;
681 goto out_free; /* JDM: Requeue? */
653 } 682 }
654 683
655 /* 684 /*
@@ -661,7 +690,7 @@ retry:
661 async_extent->ram_size - 1, 0); 690 async_extent->ram_size - 1, 0);
662 691
663 em = alloc_extent_map(); 692 em = alloc_extent_map();
664 BUG_ON(!em); 693 BUG_ON(!em); /* -ENOMEM */
665 em->start = async_extent->start; 694 em->start = async_extent->start;
666 em->len = async_extent->ram_size; 695 em->len = async_extent->ram_size;
667 em->orig_start = em->start; 696 em->orig_start = em->start;
@@ -693,7 +722,7 @@ retry:
693 ins.offset, 722 ins.offset,
694 BTRFS_ORDERED_COMPRESSED, 723 BTRFS_ORDERED_COMPRESSED,
695 async_extent->compress_type); 724 async_extent->compress_type);
696 BUG_ON(ret); 725 BUG_ON(ret); /* -ENOMEM */
697 726
698 /* 727 /*
699 * clear dirty, set writeback and unlock the pages. 728 * clear dirty, set writeback and unlock the pages.
@@ -715,13 +744,17 @@ retry:
715 ins.offset, async_extent->pages, 744 ins.offset, async_extent->pages,
716 async_extent->nr_pages); 745 async_extent->nr_pages);
717 746
718 BUG_ON(ret); 747 BUG_ON(ret); /* -ENOMEM */
719 alloc_hint = ins.objectid + ins.offset; 748 alloc_hint = ins.objectid + ins.offset;
720 kfree(async_extent); 749 kfree(async_extent);
721 cond_resched(); 750 cond_resched();
722 } 751 }
723 752 ret = 0;
724 return 0; 753out:
754 return ret;
755out_free:
756 kfree(async_extent);
757 goto out;
725} 758}
726 759
727static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 760static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -790,7 +823,18 @@ static noinline int cow_file_range(struct inode *inode,
790 823
791 BUG_ON(btrfs_is_free_space_inode(root, inode)); 824 BUG_ON(btrfs_is_free_space_inode(root, inode));
792 trans = btrfs_join_transaction(root); 825 trans = btrfs_join_transaction(root);
793 BUG_ON(IS_ERR(trans)); 826 if (IS_ERR(trans)) {
827 extent_clear_unlock_delalloc(inode,
828 &BTRFS_I(inode)->io_tree,
829 start, end, NULL,
830 EXTENT_CLEAR_UNLOCK_PAGE |
831 EXTENT_CLEAR_UNLOCK |
832 EXTENT_CLEAR_DELALLOC |
833 EXTENT_CLEAR_DIRTY |
834 EXTENT_SET_WRITEBACK |
835 EXTENT_END_WRITEBACK);
836 return PTR_ERR(trans);
837 }
794 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 838 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
795 839
796 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 840 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -820,8 +864,10 @@ static noinline int cow_file_range(struct inode *inode,
820 *nr_written = *nr_written + 864 *nr_written = *nr_written +
821 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 865 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
822 *page_started = 1; 866 *page_started = 1;
823 ret = 0;
824 goto out; 867 goto out;
868 } else if (ret < 0) {
869 btrfs_abort_transaction(trans, root, ret);
870 goto out_unlock;
825 } 871 }
826 } 872 }
827 873
@@ -838,10 +884,13 @@ static noinline int cow_file_range(struct inode *inode,
838 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 884 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
839 root->sectorsize, 0, alloc_hint, 885 root->sectorsize, 0, alloc_hint,
840 &ins, 1); 886 &ins, 1);
841 BUG_ON(ret); 887 if (ret < 0) {
888 btrfs_abort_transaction(trans, root, ret);
889 goto out_unlock;
890 }
842 891
843 em = alloc_extent_map(); 892 em = alloc_extent_map();
844 BUG_ON(!em); 893 BUG_ON(!em); /* -ENOMEM */
845 em->start = start; 894 em->start = start;
846 em->orig_start = em->start; 895 em->orig_start = em->start;
847 ram_size = ins.offset; 896 ram_size = ins.offset;
@@ -867,13 +916,16 @@ static noinline int cow_file_range(struct inode *inode,
867 cur_alloc_size = ins.offset; 916 cur_alloc_size = ins.offset;
868 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 917 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
869 ram_size, cur_alloc_size, 0); 918 ram_size, cur_alloc_size, 0);
870 BUG_ON(ret); 919 BUG_ON(ret); /* -ENOMEM */
871 920
872 if (root->root_key.objectid == 921 if (root->root_key.objectid ==
873 BTRFS_DATA_RELOC_TREE_OBJECTID) { 922 BTRFS_DATA_RELOC_TREE_OBJECTID) {
874 ret = btrfs_reloc_clone_csums(inode, start, 923 ret = btrfs_reloc_clone_csums(inode, start,
875 cur_alloc_size); 924 cur_alloc_size);
876 BUG_ON(ret); 925 if (ret) {
926 btrfs_abort_transaction(trans, root, ret);
927 goto out_unlock;
928 }
877 } 929 }
878 930
879 if (disk_num_bytes < cur_alloc_size) 931 if (disk_num_bytes < cur_alloc_size)
@@ -898,11 +950,23 @@ static noinline int cow_file_range(struct inode *inode,
898 alloc_hint = ins.objectid + ins.offset; 950 alloc_hint = ins.objectid + ins.offset;
899 start += cur_alloc_size; 951 start += cur_alloc_size;
900 } 952 }
901out:
902 ret = 0; 953 ret = 0;
954out:
903 btrfs_end_transaction(trans, root); 955 btrfs_end_transaction(trans, root);
904 956
905 return ret; 957 return ret;
958out_unlock:
959 extent_clear_unlock_delalloc(inode,
960 &BTRFS_I(inode)->io_tree,
961 start, end, NULL,
962 EXTENT_CLEAR_UNLOCK_PAGE |
963 EXTENT_CLEAR_UNLOCK |
964 EXTENT_CLEAR_DELALLOC |
965 EXTENT_CLEAR_DIRTY |
966 EXTENT_SET_WRITEBACK |
967 EXTENT_END_WRITEBACK);
968
969 goto out;
906} 970}
907 971
908/* 972/*
@@ -968,7 +1032,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
968 1, 0, NULL, GFP_NOFS); 1032 1, 0, NULL, GFP_NOFS);
969 while (start < end) { 1033 while (start < end) {
970 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1034 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
971 BUG_ON(!async_cow); 1035 BUG_ON(!async_cow); /* -ENOMEM */
972 async_cow->inode = inode; 1036 async_cow->inode = inode;
973 async_cow->root = root; 1037 async_cow->root = root;
974 async_cow->locked_page = locked_page; 1038 async_cow->locked_page = locked_page;
@@ -1059,7 +1123,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1059 u64 disk_bytenr; 1123 u64 disk_bytenr;
1060 u64 num_bytes; 1124 u64 num_bytes;
1061 int extent_type; 1125 int extent_type;
1062 int ret; 1126 int ret, err;
1063 int type; 1127 int type;
1064 int nocow; 1128 int nocow;
1065 int check_prev = 1; 1129 int check_prev = 1;
@@ -1077,7 +1141,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1077 else 1141 else
1078 trans = btrfs_join_transaction(root); 1142 trans = btrfs_join_transaction(root);
1079 1143
1080 BUG_ON(IS_ERR(trans)); 1144 if (IS_ERR(trans)) {
1145 btrfs_free_path(path);
1146 return PTR_ERR(trans);
1147 }
1148
1081 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1149 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1082 1150
1083 cow_start = (u64)-1; 1151 cow_start = (u64)-1;
@@ -1085,7 +1153,10 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1085 while (1) { 1153 while (1) {
1086 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1154 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1087 cur_offset, 0); 1155 cur_offset, 0);
1088 BUG_ON(ret < 0); 1156 if (ret < 0) {
1157 btrfs_abort_transaction(trans, root, ret);
1158 goto error;
1159 }
1089 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1160 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1090 leaf = path->nodes[0]; 1161 leaf = path->nodes[0];
1091 btrfs_item_key_to_cpu(leaf, &found_key, 1162 btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1099,8 +1170,10 @@ next_slot:
1099 leaf = path->nodes[0]; 1170 leaf = path->nodes[0];
1100 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1171 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1101 ret = btrfs_next_leaf(root, path); 1172 ret = btrfs_next_leaf(root, path);
1102 if (ret < 0) 1173 if (ret < 0) {
1103 BUG_ON(1); 1174 btrfs_abort_transaction(trans, root, ret);
1175 goto error;
1176 }
1104 if (ret > 0) 1177 if (ret > 0)
1105 break; 1178 break;
1106 leaf = path->nodes[0]; 1179 leaf = path->nodes[0];
@@ -1188,7 +1261,10 @@ out_check:
1188 ret = cow_file_range(inode, locked_page, cow_start, 1261 ret = cow_file_range(inode, locked_page, cow_start,
1189 found_key.offset - 1, page_started, 1262 found_key.offset - 1, page_started,
1190 nr_written, 1); 1263 nr_written, 1);
1191 BUG_ON(ret); 1264 if (ret) {
1265 btrfs_abort_transaction(trans, root, ret);
1266 goto error;
1267 }
1192 cow_start = (u64)-1; 1268 cow_start = (u64)-1;
1193 } 1269 }
1194 1270
@@ -1197,7 +1273,7 @@ out_check:
1197 struct extent_map_tree *em_tree; 1273 struct extent_map_tree *em_tree;
1198 em_tree = &BTRFS_I(inode)->extent_tree; 1274 em_tree = &BTRFS_I(inode)->extent_tree;
1199 em = alloc_extent_map(); 1275 em = alloc_extent_map();
1200 BUG_ON(!em); 1276 BUG_ON(!em); /* -ENOMEM */
1201 em->start = cur_offset; 1277 em->start = cur_offset;
1202 em->orig_start = em->start; 1278 em->orig_start = em->start;
1203 em->len = num_bytes; 1279 em->len = num_bytes;
@@ -1223,13 +1299,16 @@ out_check:
1223 1299
1224 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1300 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1225 num_bytes, num_bytes, type); 1301 num_bytes, num_bytes, type);
1226 BUG_ON(ret); 1302 BUG_ON(ret); /* -ENOMEM */
1227 1303
1228 if (root->root_key.objectid == 1304 if (root->root_key.objectid ==
1229 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1305 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1230 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1306 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1231 num_bytes); 1307 num_bytes);
1232 BUG_ON(ret); 1308 if (ret) {
1309 btrfs_abort_transaction(trans, root, ret);
1310 goto error;
1311 }
1233 } 1312 }
1234 1313
1235 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1314 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
@@ -1248,18 +1327,23 @@ out_check:
1248 if (cow_start != (u64)-1) { 1327 if (cow_start != (u64)-1) {
1249 ret = cow_file_range(inode, locked_page, cow_start, end, 1328 ret = cow_file_range(inode, locked_page, cow_start, end,
1250 page_started, nr_written, 1); 1329 page_started, nr_written, 1);
1251 BUG_ON(ret); 1330 if (ret) {
1331 btrfs_abort_transaction(trans, root, ret);
1332 goto error;
1333 }
1252 } 1334 }
1253 1335
1336error:
1254 if (nolock) { 1337 if (nolock) {
1255 ret = btrfs_end_transaction_nolock(trans, root); 1338 err = btrfs_end_transaction_nolock(trans, root);
1256 BUG_ON(ret);
1257 } else { 1339 } else {
1258 ret = btrfs_end_transaction(trans, root); 1340 err = btrfs_end_transaction(trans, root);
1259 BUG_ON(ret);
1260 } 1341 }
1342 if (!ret)
1343 ret = err;
1344
1261 btrfs_free_path(path); 1345 btrfs_free_path(path);
1262 return 0; 1346 return ret;
1263} 1347}
1264 1348
1265/* 1349/*
@@ -1424,10 +1508,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1424 map_length = length; 1508 map_length = length;
1425 ret = btrfs_map_block(map_tree, READ, logical, 1509 ret = btrfs_map_block(map_tree, READ, logical,
1426 &map_length, NULL, 0); 1510 &map_length, NULL, 0);
1427 1511 /* Will always return 0 or 1 with map_multi == NULL */
1512 BUG_ON(ret < 0);
1428 if (map_length < length + size) 1513 if (map_length < length + size)
1429 return 1; 1514 return 1;
1430 return ret; 1515 return 0;
1431} 1516}
1432 1517
1433/* 1518/*
@@ -1447,7 +1532,7 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1447 int ret = 0; 1532 int ret = 0;
1448 1533
1449 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1534 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1450 BUG_ON(ret); 1535 BUG_ON(ret); /* -ENOMEM */
1451 return 0; 1536 return 0;
1452} 1537}
1453 1538
@@ -1478,14 +1563,16 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1478 struct btrfs_root *root = BTRFS_I(inode)->root; 1563 struct btrfs_root *root = BTRFS_I(inode)->root;
1479 int ret = 0; 1564 int ret = 0;
1480 int skip_sum; 1565 int skip_sum;
1566 int metadata = 0;
1481 1567
1482 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1568 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1483 1569
1484 if (btrfs_is_free_space_inode(root, inode)) 1570 if (btrfs_is_free_space_inode(root, inode))
1485 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); 1571 metadata = 2;
1486 else 1572
1487 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1573 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1488 BUG_ON(ret); 1574 if (ret)
1575 return ret;
1489 1576
1490 if (!(rw & REQ_WRITE)) { 1577 if (!(rw & REQ_WRITE)) {
1491 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1578 if (bio_flags & EXTENT_BIO_COMPRESSED) {
@@ -1570,7 +1657,7 @@ again:
1570 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1657 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1571 1658
1572 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1659 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1573 &cached_state, GFP_NOFS); 1660 &cached_state);
1574 1661
1575 /* already ordered? We're done */ 1662 /* already ordered? We're done */
1576 if (PagePrivate2(page)) 1663 if (PagePrivate2(page))
@@ -1674,13 +1761,15 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1674 */ 1761 */
1675 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, 1762 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1676 &hint, 0); 1763 &hint, 0);
1677 BUG_ON(ret); 1764 if (ret)
1765 goto out;
1678 1766
1679 ins.objectid = btrfs_ino(inode); 1767 ins.objectid = btrfs_ino(inode);
1680 ins.offset = file_pos; 1768 ins.offset = file_pos;
1681 ins.type = BTRFS_EXTENT_DATA_KEY; 1769 ins.type = BTRFS_EXTENT_DATA_KEY;
1682 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1770 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1683 BUG_ON(ret); 1771 if (ret)
1772 goto out;
1684 leaf = path->nodes[0]; 1773 leaf = path->nodes[0];
1685 fi = btrfs_item_ptr(leaf, path->slots[0], 1774 fi = btrfs_item_ptr(leaf, path->slots[0],
1686 struct btrfs_file_extent_item); 1775 struct btrfs_file_extent_item);
@@ -1708,10 +1797,10 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1708 ret = btrfs_alloc_reserved_file_extent(trans, root, 1797 ret = btrfs_alloc_reserved_file_extent(trans, root,
1709 root->root_key.objectid, 1798 root->root_key.objectid,
1710 btrfs_ino(inode), file_pos, &ins); 1799 btrfs_ino(inode), file_pos, &ins);
1711 BUG_ON(ret); 1800out:
1712 btrfs_free_path(path); 1801 btrfs_free_path(path);
1713 1802
1714 return 0; 1803 return ret;
1715} 1804}
1716 1805
1717/* 1806/*
@@ -1739,35 +1828,41 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1739 end - start + 1); 1828 end - start + 1);
1740 if (!ret) 1829 if (!ret)
1741 return 0; 1830 return 0;
1742 BUG_ON(!ordered_extent); 1831 BUG_ON(!ordered_extent); /* Logic error */
1743 1832
1744 nolock = btrfs_is_free_space_inode(root, inode); 1833 nolock = btrfs_is_free_space_inode(root, inode);
1745 1834
1746 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1835 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1747 BUG_ON(!list_empty(&ordered_extent->list)); 1836 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1748 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1837 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1749 if (!ret) { 1838 if (!ret) {
1750 if (nolock) 1839 if (nolock)
1751 trans = btrfs_join_transaction_nolock(root); 1840 trans = btrfs_join_transaction_nolock(root);
1752 else 1841 else
1753 trans = btrfs_join_transaction(root); 1842 trans = btrfs_join_transaction(root);
1754 BUG_ON(IS_ERR(trans)); 1843 if (IS_ERR(trans))
1844 return PTR_ERR(trans);
1755 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1845 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1756 ret = btrfs_update_inode_fallback(trans, root, inode); 1846 ret = btrfs_update_inode_fallback(trans, root, inode);
1757 BUG_ON(ret); 1847 if (ret) /* -ENOMEM or corruption */
1848 btrfs_abort_transaction(trans, root, ret);
1758 } 1849 }
1759 goto out; 1850 goto out;
1760 } 1851 }
1761 1852
1762 lock_extent_bits(io_tree, ordered_extent->file_offset, 1853 lock_extent_bits(io_tree, ordered_extent->file_offset,
1763 ordered_extent->file_offset + ordered_extent->len - 1, 1854 ordered_extent->file_offset + ordered_extent->len - 1,
1764 0, &cached_state, GFP_NOFS); 1855 0, &cached_state);
1765 1856
1766 if (nolock) 1857 if (nolock)
1767 trans = btrfs_join_transaction_nolock(root); 1858 trans = btrfs_join_transaction_nolock(root);
1768 else 1859 else
1769 trans = btrfs_join_transaction(root); 1860 trans = btrfs_join_transaction(root);
1770 BUG_ON(IS_ERR(trans)); 1861 if (IS_ERR(trans)) {
1862 ret = PTR_ERR(trans);
1863 trans = NULL;
1864 goto out_unlock;
1865 }
1771 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1866 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1772 1867
1773 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1868 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1778,7 +1873,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1778 ordered_extent->file_offset, 1873 ordered_extent->file_offset,
1779 ordered_extent->file_offset + 1874 ordered_extent->file_offset +
1780 ordered_extent->len); 1875 ordered_extent->len);
1781 BUG_ON(ret);
1782 } else { 1876 } else {
1783 BUG_ON(root == root->fs_info->tree_root); 1877 BUG_ON(root == root->fs_info->tree_root);
1784 ret = insert_reserved_file_extent(trans, inode, 1878 ret = insert_reserved_file_extent(trans, inode,
@@ -1792,11 +1886,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1792 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 1886 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1793 ordered_extent->file_offset, 1887 ordered_extent->file_offset,
1794 ordered_extent->len); 1888 ordered_extent->len);
1795 BUG_ON(ret);
1796 } 1889 }
1797 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1890 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1798 ordered_extent->file_offset + 1891 ordered_extent->file_offset +
1799 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1892 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1893 if (ret < 0) {
1894 btrfs_abort_transaction(trans, root, ret);
1895 goto out;
1896 }
1800 1897
1801 add_pending_csums(trans, inode, ordered_extent->file_offset, 1898 add_pending_csums(trans, inode, ordered_extent->file_offset,
1802 &ordered_extent->list); 1899 &ordered_extent->list);
@@ -1804,7 +1901,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1804 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1901 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1805 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1902 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1806 ret = btrfs_update_inode_fallback(trans, root, inode); 1903 ret = btrfs_update_inode_fallback(trans, root, inode);
1807 BUG_ON(ret); 1904 if (ret) { /* -ENOMEM or corruption */
1905 btrfs_abort_transaction(trans, root, ret);
1906 goto out;
1907 }
1808 } 1908 }
1809 ret = 0; 1909 ret = 0;
1810out: 1910out:
@@ -1823,6 +1923,11 @@ out:
1823 btrfs_put_ordered_extent(ordered_extent); 1923 btrfs_put_ordered_extent(ordered_extent);
1824 1924
1825 return 0; 1925 return 0;
1926out_unlock:
1927 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1928 ordered_extent->file_offset +
1929 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1930 goto out;
1826} 1931}
1827 1932
1828static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1933static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
@@ -1904,6 +2009,8 @@ struct delayed_iput {
1904 struct inode *inode; 2009 struct inode *inode;
1905}; 2010};
1906 2011
2012/* JDM: If this is fs-wide, why can't we add a pointer to
2013 * btrfs_inode instead and avoid the allocation? */
1907void btrfs_add_delayed_iput(struct inode *inode) 2014void btrfs_add_delayed_iput(struct inode *inode)
1908{ 2015{
1909 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2016 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
@@ -2050,20 +2157,27 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2050 /* grab metadata reservation from transaction handle */ 2157 /* grab metadata reservation from transaction handle */
2051 if (reserve) { 2158 if (reserve) {
2052 ret = btrfs_orphan_reserve_metadata(trans, inode); 2159 ret = btrfs_orphan_reserve_metadata(trans, inode);
2053 BUG_ON(ret); 2160 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2054 } 2161 }
2055 2162
2056 /* insert an orphan item to track this unlinked/truncated file */ 2163 /* insert an orphan item to track this unlinked/truncated file */
2057 if (insert >= 1) { 2164 if (insert >= 1) {
2058 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2165 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2059 BUG_ON(ret && ret != -EEXIST); 2166 if (ret && ret != -EEXIST) {
2167 btrfs_abort_transaction(trans, root, ret);
2168 return ret;
2169 }
2170 ret = 0;
2060 } 2171 }
2061 2172
2062 /* insert an orphan item to track subvolume contains orphan files */ 2173 /* insert an orphan item to track subvolume contains orphan files */
2063 if (insert >= 2) { 2174 if (insert >= 2) {
2064 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 2175 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2065 root->root_key.objectid); 2176 root->root_key.objectid);
2066 BUG_ON(ret); 2177 if (ret && ret != -EEXIST) {
2178 btrfs_abort_transaction(trans, root, ret);
2179 return ret;
2180 }
2067 } 2181 }
2068 return 0; 2182 return 0;
2069} 2183}
@@ -2093,7 +2207,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2093 2207
2094 if (trans && delete_item) { 2208 if (trans && delete_item) {
2095 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); 2209 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2096 BUG_ON(ret); 2210 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2097 } 2211 }
2098 2212
2099 if (release_rsv) 2213 if (release_rsv)
@@ -2227,7 +2341,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2227 } 2341 }
2228 ret = btrfs_del_orphan_item(trans, root, 2342 ret = btrfs_del_orphan_item(trans, root,
2229 found_key.objectid); 2343 found_key.objectid);
2230 BUG_ON(ret); 2344 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2231 btrfs_end_transaction(trans, root); 2345 btrfs_end_transaction(trans, root);
2232 continue; 2346 continue;
2233 } 2347 }
@@ -2609,16 +2723,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2609 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2723 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2610 "inode %llu parent %llu\n", name_len, name, 2724 "inode %llu parent %llu\n", name_len, name,
2611 (unsigned long long)ino, (unsigned long long)dir_ino); 2725 (unsigned long long)ino, (unsigned long long)dir_ino);
2726 btrfs_abort_transaction(trans, root, ret);
2612 goto err; 2727 goto err;
2613 } 2728 }
2614 2729
2615 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 2730 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2616 if (ret) 2731 if (ret) {
2732 btrfs_abort_transaction(trans, root, ret);
2617 goto err; 2733 goto err;
2734 }
2618 2735
2619 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2736 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2620 inode, dir_ino); 2737 inode, dir_ino);
2621 BUG_ON(ret != 0 && ret != -ENOENT); 2738 if (ret != 0 && ret != -ENOENT) {
2739 btrfs_abort_transaction(trans, root, ret);
2740 goto err;
2741 }
2622 2742
2623 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2743 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2624 dir, index); 2744 dir, index);
@@ -2776,7 +2896,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2776 err = ret; 2896 err = ret;
2777 goto out; 2897 goto out;
2778 } 2898 }
2779 BUG_ON(ret == 0); 2899 BUG_ON(ret == 0); /* Corruption */
2780 if (check_path_shared(root, path)) 2900 if (check_path_shared(root, path))
2781 goto out; 2901 goto out;
2782 btrfs_release_path(path); 2902 btrfs_release_path(path);
@@ -2809,7 +2929,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2809 err = PTR_ERR(ref); 2929 err = PTR_ERR(ref);
2810 goto out; 2930 goto out;
2811 } 2931 }
2812 BUG_ON(!ref); 2932 BUG_ON(!ref); /* Logic error */
2813 if (check_path_shared(root, path)) 2933 if (check_path_shared(root, path))
2814 goto out; 2934 goto out;
2815 index = btrfs_inode_ref_index(path->nodes[0], ref); 2935 index = btrfs_inode_ref_index(path->nodes[0], ref);
@@ -2916,23 +3036,42 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2916 3036
2917 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3037 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2918 name, name_len, -1); 3038 name, name_len, -1);
2919 BUG_ON(IS_ERR_OR_NULL(di)); 3039 if (IS_ERR_OR_NULL(di)) {
3040 if (!di)
3041 ret = -ENOENT;
3042 else
3043 ret = PTR_ERR(di);
3044 goto out;
3045 }
2920 3046
2921 leaf = path->nodes[0]; 3047 leaf = path->nodes[0];
2922 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3048 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2923 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3049 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2924 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3050 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2925 BUG_ON(ret); 3051 if (ret) {
3052 btrfs_abort_transaction(trans, root, ret);
3053 goto out;
3054 }
2926 btrfs_release_path(path); 3055 btrfs_release_path(path);
2927 3056
2928 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3057 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2929 objectid, root->root_key.objectid, 3058 objectid, root->root_key.objectid,
2930 dir_ino, &index, name, name_len); 3059 dir_ino, &index, name, name_len);
2931 if (ret < 0) { 3060 if (ret < 0) {
2932 BUG_ON(ret != -ENOENT); 3061 if (ret != -ENOENT) {
3062 btrfs_abort_transaction(trans, root, ret);
3063 goto out;
3064 }
2933 di = btrfs_search_dir_index_item(root, path, dir_ino, 3065 di = btrfs_search_dir_index_item(root, path, dir_ino,
2934 name, name_len); 3066 name, name_len);
2935 BUG_ON(IS_ERR_OR_NULL(di)); 3067 if (IS_ERR_OR_NULL(di)) {
3068 if (!di)
3069 ret = -ENOENT;
3070 else
3071 ret = PTR_ERR(di);
3072 btrfs_abort_transaction(trans, root, ret);
3073 goto out;
3074 }
2936 3075
2937 leaf = path->nodes[0]; 3076 leaf = path->nodes[0];
2938 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3077 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
@@ -2942,15 +3081,19 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2942 btrfs_release_path(path); 3081 btrfs_release_path(path);
2943 3082
2944 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3083 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2945 BUG_ON(ret); 3084 if (ret) {
3085 btrfs_abort_transaction(trans, root, ret);
3086 goto out;
3087 }
2946 3088
2947 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3089 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2948 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3090 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2949 ret = btrfs_update_inode(trans, root, dir); 3091 ret = btrfs_update_inode(trans, root, dir);
2950 BUG_ON(ret); 3092 if (ret)
2951 3093 btrfs_abort_transaction(trans, root, ret);
3094out:
2952 btrfs_free_path(path); 3095 btrfs_free_path(path);
2953 return 0; 3096 return ret;
2954} 3097}
2955 3098
2956static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 3099static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -3160,8 +3303,8 @@ search_again:
3160 } 3303 }
3161 size = 3304 size =
3162 btrfs_file_extent_calc_inline_size(size); 3305 btrfs_file_extent_calc_inline_size(size);
3163 ret = btrfs_truncate_item(trans, root, path, 3306 btrfs_truncate_item(trans, root, path,
3164 size, 1); 3307 size, 1);
3165 } else if (root->ref_cows) { 3308 } else if (root->ref_cows) {
3166 inode_sub_bytes(inode, item_end + 1 - 3309 inode_sub_bytes(inode, item_end + 1 -
3167 found_key.offset); 3310 found_key.offset);
@@ -3209,7 +3352,11 @@ delete:
3209 ret = btrfs_del_items(trans, root, path, 3352 ret = btrfs_del_items(trans, root, path,
3210 pending_del_slot, 3353 pending_del_slot,
3211 pending_del_nr); 3354 pending_del_nr);
3212 BUG_ON(ret); 3355 if (ret) {
3356 btrfs_abort_transaction(trans,
3357 root, ret);
3358 goto error;
3359 }
3213 pending_del_nr = 0; 3360 pending_del_nr = 0;
3214 } 3361 }
3215 btrfs_release_path(path); 3362 btrfs_release_path(path);
@@ -3222,8 +3369,10 @@ out:
3222 if (pending_del_nr) { 3369 if (pending_del_nr) {
3223 ret = btrfs_del_items(trans, root, path, pending_del_slot, 3370 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3224 pending_del_nr); 3371 pending_del_nr);
3225 BUG_ON(ret); 3372 if (ret)
3373 btrfs_abort_transaction(trans, root, ret);
3226 } 3374 }
3375error:
3227 btrfs_free_path(path); 3376 btrfs_free_path(path);
3228 return err; 3377 return err;
3229} 3378}
@@ -3281,8 +3430,7 @@ again:
3281 } 3430 }
3282 wait_on_page_writeback(page); 3431 wait_on_page_writeback(page);
3283 3432
3284 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, 3433 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
3285 GFP_NOFS);
3286 set_page_extent_mapped(page); 3434 set_page_extent_mapped(page);
3287 3435
3288 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3436 ordered = btrfs_lookup_ordered_extent(inode, page_start);
@@ -3358,7 +3506,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3358 btrfs_wait_ordered_range(inode, hole_start, 3506 btrfs_wait_ordered_range(inode, hole_start,
3359 block_end - hole_start); 3507 block_end - hole_start);
3360 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 3508 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3361 &cached_state, GFP_NOFS); 3509 &cached_state);
3362 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3510 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3363 if (!ordered) 3511 if (!ordered)
3364 break; 3512 break;
@@ -3371,7 +3519,10 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3371 while (1) { 3519 while (1) {
3372 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 3520 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3373 block_end - cur_offset, 0); 3521 block_end - cur_offset, 0);
3374 BUG_ON(IS_ERR_OR_NULL(em)); 3522 if (IS_ERR(em)) {
3523 err = PTR_ERR(em);
3524 break;
3525 }
3375 last_byte = min(extent_map_end(em), block_end); 3526 last_byte = min(extent_map_end(em), block_end);
3376 last_byte = (last_byte + mask) & ~mask; 3527 last_byte = (last_byte + mask) & ~mask;
3377 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3528 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3388,7 +3539,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3388 cur_offset + hole_size, 3539 cur_offset + hole_size,
3389 &hint_byte, 1); 3540 &hint_byte, 1);
3390 if (err) { 3541 if (err) {
3391 btrfs_update_inode(trans, root, inode); 3542 btrfs_abort_transaction(trans, root, err);
3392 btrfs_end_transaction(trans, root); 3543 btrfs_end_transaction(trans, root);
3393 break; 3544 break;
3394 } 3545 }
@@ -3398,7 +3549,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3398 0, hole_size, 0, hole_size, 3549 0, hole_size, 0, hole_size,
3399 0, 0, 0); 3550 0, 0, 0);
3400 if (err) { 3551 if (err) {
3401 btrfs_update_inode(trans, root, inode); 3552 btrfs_abort_transaction(trans, root, err);
3402 btrfs_end_transaction(trans, root); 3553 btrfs_end_transaction(trans, root);
3403 break; 3554 break;
3404 } 3555 }
@@ -3778,7 +3929,7 @@ static void inode_tree_del(struct inode *inode)
3778 } 3929 }
3779} 3930}
3780 3931
3781int btrfs_invalidate_inodes(struct btrfs_root *root) 3932void btrfs_invalidate_inodes(struct btrfs_root *root)
3782{ 3933{
3783 struct rb_node *node; 3934 struct rb_node *node;
3784 struct rb_node *prev; 3935 struct rb_node *prev;
@@ -3838,7 +3989,6 @@ again:
3838 node = rb_next(node); 3989 node = rb_next(node);
3839 } 3990 }
3840 spin_unlock(&root->inode_lock); 3991 spin_unlock(&root->inode_lock);
3841 return 0;
3842} 3992}
3843 3993
3844static int btrfs_init_locked_inode(struct inode *inode, void *p) 3994static int btrfs_init_locked_inode(struct inode *inode, void *p)
@@ -4580,18 +4730,26 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4580 parent_ino, index); 4730 parent_ino, index);
4581 } 4731 }
4582 4732
4583 if (ret == 0) { 4733 /* Nothing to clean up yet */
4584 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4734 if (ret)
4585 parent_inode, &key, 4735 return ret;
4586 btrfs_inode_type(inode), index);
4587 if (ret)
4588 goto fail_dir_item;
4589 4736
4590 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4737 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4591 name_len * 2); 4738 parent_inode, &key,
4592 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 4739 btrfs_inode_type(inode), index);
4593 ret = btrfs_update_inode(trans, root, parent_inode); 4740 if (ret == -EEXIST)
4741 goto fail_dir_item;
4742 else if (ret) {
4743 btrfs_abort_transaction(trans, root, ret);
4744 return ret;
4594 } 4745 }
4746
4747 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4748 name_len * 2);
4749 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4750 ret = btrfs_update_inode(trans, root, parent_inode);
4751 if (ret)
4752 btrfs_abort_transaction(trans, root, ret);
4595 return ret; 4753 return ret;
4596 4754
4597fail_dir_item: 4755fail_dir_item:
@@ -4805,7 +4963,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4805 } else { 4963 } else {
4806 struct dentry *parent = dentry->d_parent; 4964 struct dentry *parent = dentry->d_parent;
4807 err = btrfs_update_inode(trans, root, inode); 4965 err = btrfs_update_inode(trans, root, inode);
4808 BUG_ON(err); 4966 if (err)
4967 goto fail;
4809 d_instantiate(dentry, inode); 4968 d_instantiate(dentry, inode);
4810 btrfs_log_new_name(trans, inode, NULL, parent); 4969 btrfs_log_new_name(trans, inode, NULL, parent);
4811 } 4970 }
@@ -5136,7 +5295,7 @@ again:
5136 ret = uncompress_inline(path, inode, page, 5295 ret = uncompress_inline(path, inode, page,
5137 pg_offset, 5296 pg_offset,
5138 extent_offset, item); 5297 extent_offset, item);
5139 BUG_ON(ret); 5298 BUG_ON(ret); /* -ENOMEM */
5140 } else { 5299 } else {
5141 map = kmap(page); 5300 map = kmap(page);
5142 read_extent_buffer(leaf, map + pg_offset, ptr, 5301 read_extent_buffer(leaf, map + pg_offset, ptr,
@@ -5251,6 +5410,7 @@ out:
5251 free_extent_map(em); 5410 free_extent_map(em);
5252 return ERR_PTR(err); 5411 return ERR_PTR(err);
5253 } 5412 }
5413 BUG_ON(!em); /* Error is always set */
5254 return em; 5414 return em;
5255} 5415}
5256 5416
@@ -5601,7 +5761,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5601 free_extent_map(em); 5761 free_extent_map(em);
5602 /* DIO will do one hole at a time, so just unlock a sector */ 5762 /* DIO will do one hole at a time, so just unlock a sector */
5603 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5763 unlock_extent(&BTRFS_I(inode)->io_tree, start,
5604 start + root->sectorsize - 1, GFP_NOFS); 5764 start + root->sectorsize - 1);
5605 return 0; 5765 return 0;
5606 } 5766 }
5607 5767
@@ -5742,7 +5902,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5742 } while (bvec <= bvec_end); 5902 } while (bvec <= bvec_end);
5743 5903
5744 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 5904 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5745 dip->logical_offset + dip->bytes - 1, GFP_NOFS); 5905 dip->logical_offset + dip->bytes - 1);
5746 bio->bi_private = dip->private; 5906 bio->bi_private = dip->private;
5747 5907
5748 kfree(dip->csums); 5908 kfree(dip->csums);
@@ -5793,7 +5953,7 @@ again:
5793 5953
5794 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5954 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5795 ordered->file_offset + ordered->len - 1, 0, 5955 ordered->file_offset + ordered->len - 1, 0,
5796 &cached_state, GFP_NOFS); 5956 &cached_state);
5797 5957
5798 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { 5958 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5799 ret = btrfs_mark_extent_written(trans, inode, 5959 ret = btrfs_mark_extent_written(trans, inode,
@@ -5867,7 +6027,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5867 int ret; 6027 int ret;
5868 struct btrfs_root *root = BTRFS_I(inode)->root; 6028 struct btrfs_root *root = BTRFS_I(inode)->root;
5869 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 6029 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
5870 BUG_ON(ret); 6030 BUG_ON(ret); /* -ENOMEM */
5871 return 0; 6031 return 0;
5872} 6032}
5873 6033
@@ -6208,7 +6368,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6208 6368
6209 while (1) { 6369 while (1) {
6210 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6370 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6211 0, &cached_state, GFP_NOFS); 6371 0, &cached_state);
6212 /* 6372 /*
6213 * We're concerned with the entire range that we're going to be 6373 * We're concerned with the entire range that we're going to be
6214 * doing DIO to, so we need to make sure theres no ordered 6374 * doing DIO to, so we need to make sure theres no ordered
@@ -6232,7 +6392,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6232 if (writing) { 6392 if (writing) {
6233 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; 6393 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6234 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6394 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6235 EXTENT_DELALLOC, 0, NULL, &cached_state, 6395 EXTENT_DELALLOC, NULL, &cached_state,
6236 GFP_NOFS); 6396 GFP_NOFS);
6237 if (ret) { 6397 if (ret) {
6238 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6398 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
@@ -6362,8 +6522,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6362 btrfs_releasepage(page, GFP_NOFS); 6522 btrfs_releasepage(page, GFP_NOFS);
6363 return; 6523 return;
6364 } 6524 }
6365 lock_extent_bits(tree, page_start, page_end, 0, &cached_state, 6525 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6366 GFP_NOFS);
6367 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 6526 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
6368 page_offset(page)); 6527 page_offset(page));
6369 if (ordered) { 6528 if (ordered) {
@@ -6385,8 +6544,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6385 } 6544 }
6386 btrfs_put_ordered_extent(ordered); 6545 btrfs_put_ordered_extent(ordered);
6387 cached_state = NULL; 6546 cached_state = NULL;
6388 lock_extent_bits(tree, page_start, page_end, 0, &cached_state, 6547 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6389 GFP_NOFS);
6390 } 6548 }
6391 clear_extent_bit(tree, page_start, page_end, 6549 clear_extent_bit(tree, page_start, page_end,
6392 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 6550 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -6461,8 +6619,7 @@ again:
6461 } 6619 }
6462 wait_on_page_writeback(page); 6620 wait_on_page_writeback(page);
6463 6621
6464 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, 6622 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
6465 GFP_NOFS);
6466 set_page_extent_mapped(page); 6623 set_page_extent_mapped(page);
6467 6624
6468 /* 6625 /*
@@ -6736,10 +6893,9 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6736 btrfs_i_size_write(inode, 0); 6893 btrfs_i_size_write(inode, 0);
6737 6894
6738 err = btrfs_update_inode(trans, new_root, inode); 6895 err = btrfs_update_inode(trans, new_root, inode);
6739 BUG_ON(err);
6740 6896
6741 iput(inode); 6897 iput(inode);
6742 return 0; 6898 return err;
6743} 6899}
6744 6900
6745struct inode *btrfs_alloc_inode(struct super_block *sb) 6901struct inode *btrfs_alloc_inode(struct super_block *sb)
@@ -7073,7 +7229,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7073 if (!ret) 7229 if (!ret)
7074 ret = btrfs_update_inode(trans, root, old_inode); 7230 ret = btrfs_update_inode(trans, root, old_inode);
7075 } 7231 }
7076 BUG_ON(ret); 7232 if (ret) {
7233 btrfs_abort_transaction(trans, root, ret);
7234 goto out_fail;
7235 }
7077 7236
7078 if (new_inode) { 7237 if (new_inode) {
7079 new_inode->i_ctime = CURRENT_TIME; 7238 new_inode->i_ctime = CURRENT_TIME;
@@ -7091,11 +7250,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7091 new_dentry->d_name.name, 7250 new_dentry->d_name.name,
7092 new_dentry->d_name.len); 7251 new_dentry->d_name.len);
7093 } 7252 }
7094 BUG_ON(ret); 7253 if (!ret && new_inode->i_nlink == 0) {
7095 if (new_inode->i_nlink == 0) {
7096 ret = btrfs_orphan_add(trans, new_dentry->d_inode); 7254 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7097 BUG_ON(ret); 7255 BUG_ON(ret);
7098 } 7256 }
7257 if (ret) {
7258 btrfs_abort_transaction(trans, root, ret);
7259 goto out_fail;
7260 }
7099 } 7261 }
7100 7262
7101 fixup_inode_flags(new_dir, old_inode); 7263 fixup_inode_flags(new_dir, old_inode);
@@ -7103,7 +7265,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7103 ret = btrfs_add_link(trans, new_dir, old_inode, 7265 ret = btrfs_add_link(trans, new_dir, old_inode,
7104 new_dentry->d_name.name, 7266 new_dentry->d_name.name,
7105 new_dentry->d_name.len, 0, index); 7267 new_dentry->d_name.len, 0, index);
7106 BUG_ON(ret); 7268 if (ret) {
7269 btrfs_abort_transaction(trans, root, ret);
7270 goto out_fail;
7271 }
7107 7272
7108 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 7273 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7109 struct dentry *parent = new_dentry->d_parent; 7274 struct dentry *parent = new_dentry->d_parent;
@@ -7328,7 +7493,12 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7328 ins.offset, ins.offset, 7493 ins.offset, ins.offset,
7329 ins.offset, 0, 0, 0, 7494 ins.offset, 0, 0, 0,
7330 BTRFS_FILE_EXTENT_PREALLOC); 7495 BTRFS_FILE_EXTENT_PREALLOC);
7331 BUG_ON(ret); 7496 if (ret) {
7497 btrfs_abort_transaction(trans, root, ret);
7498 if (own_trans)
7499 btrfs_end_transaction(trans, root);
7500 break;
7501 }
7332 btrfs_drop_extent_cache(inode, cur_offset, 7502 btrfs_drop_extent_cache(inode, cur_offset,
7333 cur_offset + ins.offset -1, 0); 7503 cur_offset + ins.offset -1, 0);
7334 7504
@@ -7350,7 +7520,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7350 } 7520 }
7351 7521
7352 ret = btrfs_update_inode(trans, root, inode); 7522 ret = btrfs_update_inode(trans, root, inode);
7353 BUG_ON(ret); 7523
7524 if (ret) {
7525 btrfs_abort_transaction(trans, root, ret);
7526 if (own_trans)
7527 btrfs_end_transaction(trans, root);
7528 break;
7529 }
7354 7530
7355 if (own_trans) 7531 if (own_trans)
7356 btrfs_end_transaction(trans, root); 7532 btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d8b54715c2de..205809200714 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -425,22 +425,37 @@ static noinline int create_subvol(struct btrfs_root *root,
425 425
426 key.offset = (u64)-1; 426 key.offset = (u64)-1;
427 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); 427 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
428 BUG_ON(IS_ERR(new_root)); 428 if (IS_ERR(new_root)) {
429 btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
430 ret = PTR_ERR(new_root);
431 goto fail;
432 }
429 433
430 btrfs_record_root_in_trans(trans, new_root); 434 btrfs_record_root_in_trans(trans, new_root);
431 435
432 ret = btrfs_create_subvol_root(trans, new_root, new_dirid); 436 ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
437 if (ret) {
438 /* We potentially lose an unused inode item here */
439 btrfs_abort_transaction(trans, root, ret);
440 goto fail;
441 }
442
433 /* 443 /*
434 * insert the directory item 444 * insert the directory item
435 */ 445 */
436 ret = btrfs_set_inode_index(dir, &index); 446 ret = btrfs_set_inode_index(dir, &index);
437 BUG_ON(ret); 447 if (ret) {
448 btrfs_abort_transaction(trans, root, ret);
449 goto fail;
450 }
438 451
439 ret = btrfs_insert_dir_item(trans, root, 452 ret = btrfs_insert_dir_item(trans, root,
440 name, namelen, dir, &key, 453 name, namelen, dir, &key,
441 BTRFS_FT_DIR, index); 454 BTRFS_FT_DIR, index);
442 if (ret) 455 if (ret) {
456 btrfs_abort_transaction(trans, root, ret);
443 goto fail; 457 goto fail;
458 }
444 459
445 btrfs_i_size_write(dir, dir->i_size + namelen * 2); 460 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
446 ret = btrfs_update_inode(trans, root, dir); 461 ret = btrfs_update_inode(trans, root, dir);
@@ -797,9 +812,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
797 812
798 if (!em) { 813 if (!em) {
799 /* get the big lock and read metadata off disk */ 814 /* get the big lock and read metadata off disk */
800 lock_extent(io_tree, start, start + len - 1, GFP_NOFS); 815 lock_extent(io_tree, start, start + len - 1);
801 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 816 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
802 unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); 817 unlock_extent(io_tree, start, start + len - 1);
803 818
804 if (IS_ERR(em)) 819 if (IS_ERR(em))
805 return 0; 820 return 0;
@@ -887,10 +902,10 @@ again:
887 page_start = page_offset(page); 902 page_start = page_offset(page);
888 page_end = page_start + PAGE_CACHE_SIZE - 1; 903 page_end = page_start + PAGE_CACHE_SIZE - 1;
889 while (1) { 904 while (1) {
890 lock_extent(tree, page_start, page_end, GFP_NOFS); 905 lock_extent(tree, page_start, page_end);
891 ordered = btrfs_lookup_ordered_extent(inode, 906 ordered = btrfs_lookup_ordered_extent(inode,
892 page_start); 907 page_start);
893 unlock_extent(tree, page_start, page_end, GFP_NOFS); 908 unlock_extent(tree, page_start, page_end);
894 if (!ordered) 909 if (!ordered)
895 break; 910 break;
896 911
@@ -946,8 +961,7 @@ again:
946 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; 961 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
947 962
948 lock_extent_bits(&BTRFS_I(inode)->io_tree, 963 lock_extent_bits(&BTRFS_I(inode)->io_tree,
949 page_start, page_end - 1, 0, &cached_state, 964 page_start, page_end - 1, 0, &cached_state);
950 GFP_NOFS);
951 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, 965 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
952 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 966 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
953 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 967 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
@@ -1966,7 +1980,11 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1966 dest->root_key.objectid, 1980 dest->root_key.objectid,
1967 dentry->d_name.name, 1981 dentry->d_name.name,
1968 dentry->d_name.len); 1982 dentry->d_name.len);
1969 BUG_ON(ret); 1983 if (ret) {
1984 err = ret;
1985 btrfs_abort_transaction(trans, root, ret);
1986 goto out_end_trans;
1987 }
1970 1988
1971 btrfs_record_root_in_trans(trans, dest); 1989 btrfs_record_root_in_trans(trans, dest);
1972 1990
@@ -1979,11 +1997,16 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1979 ret = btrfs_insert_orphan_item(trans, 1997 ret = btrfs_insert_orphan_item(trans,
1980 root->fs_info->tree_root, 1998 root->fs_info->tree_root,
1981 dest->root_key.objectid); 1999 dest->root_key.objectid);
1982 BUG_ON(ret); 2000 if (ret) {
2001 btrfs_abort_transaction(trans, root, ret);
2002 err = ret;
2003 goto out_end_trans;
2004 }
1983 } 2005 }
1984 2006out_end_trans:
1985 ret = btrfs_end_transaction(trans, root); 2007 ret = btrfs_end_transaction(trans, root);
1986 BUG_ON(ret); 2008 if (ret && !err)
2009 err = ret;
1987 inode->i_flags |= S_DEAD; 2010 inode->i_flags |= S_DEAD;
1988out_up_write: 2011out_up_write:
1989 up_write(&root->fs_info->subvol_sem); 2012 up_write(&root->fs_info->subvol_sem);
@@ -2326,13 +2349,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2326 another, and lock file content */ 2349 another, and lock file content */
2327 while (1) { 2350 while (1) {
2328 struct btrfs_ordered_extent *ordered; 2351 struct btrfs_ordered_extent *ordered;
2329 lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2352 lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2330 ordered = btrfs_lookup_first_ordered_extent(src, off+len); 2353 ordered = btrfs_lookup_first_ordered_extent(src, off+len);
2331 if (!ordered && 2354 if (!ordered &&
2332 !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, 2355 !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
2333 EXTENT_DELALLOC, 0, NULL)) 2356 EXTENT_DELALLOC, 0, NULL))
2334 break; 2357 break;
2335 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2358 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2336 if (ordered) 2359 if (ordered)
2337 btrfs_put_ordered_extent(ordered); 2360 btrfs_put_ordered_extent(ordered);
2338 btrfs_wait_ordered_range(src, off, len); 2361 btrfs_wait_ordered_range(src, off, len);
@@ -2447,11 +2470,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2447 new_key.offset, 2470 new_key.offset,
2448 new_key.offset + datal, 2471 new_key.offset + datal,
2449 &hint_byte, 1); 2472 &hint_byte, 1);
2450 BUG_ON(ret); 2473 if (ret) {
2474 btrfs_abort_transaction(trans, root,
2475 ret);
2476 btrfs_end_transaction(trans, root);
2477 goto out;
2478 }
2451 2479
2452 ret = btrfs_insert_empty_item(trans, root, path, 2480 ret = btrfs_insert_empty_item(trans, root, path,
2453 &new_key, size); 2481 &new_key, size);
2454 BUG_ON(ret); 2482 if (ret) {
2483 btrfs_abort_transaction(trans, root,
2484 ret);
2485 btrfs_end_transaction(trans, root);
2486 goto out;
2487 }
2455 2488
2456 leaf = path->nodes[0]; 2489 leaf = path->nodes[0];
2457 slot = path->slots[0]; 2490 slot = path->slots[0];
@@ -2478,7 +2511,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2478 btrfs_ino(inode), 2511 btrfs_ino(inode),
2479 new_key.offset - datao, 2512 new_key.offset - datao,
2480 0); 2513 0);
2481 BUG_ON(ret); 2514 if (ret) {
2515 btrfs_abort_transaction(trans,
2516 root,
2517 ret);
2518 btrfs_end_transaction(trans,
2519 root);
2520 goto out;
2521
2522 }
2482 } 2523 }
2483 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 2524 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
2484 u64 skip = 0; 2525 u64 skip = 0;
@@ -2503,11 +2544,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2503 new_key.offset, 2544 new_key.offset,
2504 new_key.offset + datal, 2545 new_key.offset + datal,
2505 &hint_byte, 1); 2546 &hint_byte, 1);
2506 BUG_ON(ret); 2547 if (ret) {
2548 btrfs_abort_transaction(trans, root,
2549 ret);
2550 btrfs_end_transaction(trans, root);
2551 goto out;
2552 }
2507 2553
2508 ret = btrfs_insert_empty_item(trans, root, path, 2554 ret = btrfs_insert_empty_item(trans, root, path,
2509 &new_key, size); 2555 &new_key, size);
2510 BUG_ON(ret); 2556 if (ret) {
2557 btrfs_abort_transaction(trans, root,
2558 ret);
2559 btrfs_end_transaction(trans, root);
2560 goto out;
2561 }
2511 2562
2512 if (skip) { 2563 if (skip) {
2513 u32 start = 2564 u32 start =
@@ -2541,8 +2592,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2541 btrfs_i_size_write(inode, endoff); 2592 btrfs_i_size_write(inode, endoff);
2542 2593
2543 ret = btrfs_update_inode(trans, root, inode); 2594 ret = btrfs_update_inode(trans, root, inode);
2544 BUG_ON(ret); 2595 if (ret) {
2545 btrfs_end_transaction(trans, root); 2596 btrfs_abort_transaction(trans, root, ret);
2597 btrfs_end_transaction(trans, root);
2598 goto out;
2599 }
2600 ret = btrfs_end_transaction(trans, root);
2546 } 2601 }
2547next: 2602next:
2548 btrfs_release_path(path); 2603 btrfs_release_path(path);
@@ -2551,7 +2606,7 @@ next:
2551 ret = 0; 2606 ret = 0;
2552out: 2607out:
2553 btrfs_release_path(path); 2608 btrfs_release_path(path);
2554 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2609 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2555out_unlock: 2610out_unlock:
2556 mutex_unlock(&src->i_mutex); 2611 mutex_unlock(&src->i_mutex);
2557 mutex_unlock(&inode->i_mutex); 2612 mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5e178d8f7167..272f911203ff 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -208,7 +208,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
208 * take a spinning write lock. This will wait for both 208 * take a spinning write lock. This will wait for both
209 * blocking readers or writers 209 * blocking readers or writers
210 */ 210 */
211int btrfs_tree_lock(struct extent_buffer *eb) 211void btrfs_tree_lock(struct extent_buffer *eb)
212{ 212{
213again: 213again:
214 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); 214 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
@@ -230,13 +230,12 @@ again:
230 atomic_inc(&eb->spinning_writers); 230 atomic_inc(&eb->spinning_writers);
231 atomic_inc(&eb->write_locks); 231 atomic_inc(&eb->write_locks);
232 eb->lock_owner = current->pid; 232 eb->lock_owner = current->pid;
233 return 0;
234} 233}
235 234
236/* 235/*
237 * drop a spinning or a blocking write lock. 236 * drop a spinning or a blocking write lock.
238 */ 237 */
239int btrfs_tree_unlock(struct extent_buffer *eb) 238void btrfs_tree_unlock(struct extent_buffer *eb)
240{ 239{
241 int blockers = atomic_read(&eb->blocking_writers); 240 int blockers = atomic_read(&eb->blocking_writers);
242 241
@@ -255,7 +254,6 @@ int btrfs_tree_unlock(struct extent_buffer *eb)
255 atomic_dec(&eb->spinning_writers); 254 atomic_dec(&eb->spinning_writers);
256 write_unlock(&eb->lock); 255 write_unlock(&eb->lock);
257 } 256 }
258 return 0;
259} 257}
260 258
261void btrfs_assert_tree_locked(struct extent_buffer *eb) 259void btrfs_assert_tree_locked(struct extent_buffer *eb)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 17247ddb81a0..ca52681e5f40 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -24,8 +24,8 @@
24#define BTRFS_WRITE_LOCK_BLOCKING 3 24#define BTRFS_WRITE_LOCK_BLOCKING 3
25#define BTRFS_READ_LOCK_BLOCKING 4 25#define BTRFS_READ_LOCK_BLOCKING 4
26 26
27int btrfs_tree_lock(struct extent_buffer *eb); 27void btrfs_tree_lock(struct extent_buffer *eb);
28int btrfs_tree_unlock(struct extent_buffer *eb); 28void btrfs_tree_unlock(struct extent_buffer *eb);
29int btrfs_try_spin_lock(struct extent_buffer *eb); 29int btrfs_try_spin_lock(struct extent_buffer *eb);
30 30
31void btrfs_tree_read_lock(struct extent_buffer *eb); 31void btrfs_tree_read_lock(struct extent_buffer *eb);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a1c940425307..bbf6d0d9aebe 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -59,6 +59,14 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
59 return NULL; 59 return NULL;
60} 60}
61 61
62static void ordered_data_tree_panic(struct inode *inode, int errno,
63 u64 offset)
64{
65 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
66 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
67 "%llu\n", (unsigned long long)offset);
68}
69
62/* 70/*
63 * look for a given offset in the tree, and if it can't be found return the 71 * look for a given offset in the tree, and if it can't be found return the
64 * first lesser offset 72 * first lesser offset
@@ -207,7 +215,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
207 spin_lock(&tree->lock); 215 spin_lock(&tree->lock);
208 node = tree_insert(&tree->tree, file_offset, 216 node = tree_insert(&tree->tree, file_offset,
209 &entry->rb_node); 217 &entry->rb_node);
210 BUG_ON(node); 218 if (node)
219 ordered_data_tree_panic(inode, -EEXIST, file_offset);
211 spin_unlock(&tree->lock); 220 spin_unlock(&tree->lock);
212 221
213 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 222 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
@@ -215,7 +224,6 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
215 &BTRFS_I(inode)->root->fs_info->ordered_extents); 224 &BTRFS_I(inode)->root->fs_info->ordered_extents);
216 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 225 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
217 226
218 BUG_ON(node);
219 return 0; 227 return 0;
220} 228}
221 229
@@ -249,9 +257,9 @@ int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
249 * when an ordered extent is finished. If the list covers more than one 257 * when an ordered extent is finished. If the list covers more than one
250 * ordered extent, it is split across multiples. 258 * ordered extent, it is split across multiples.
251 */ 259 */
252int btrfs_add_ordered_sum(struct inode *inode, 260void btrfs_add_ordered_sum(struct inode *inode,
253 struct btrfs_ordered_extent *entry, 261 struct btrfs_ordered_extent *entry,
254 struct btrfs_ordered_sum *sum) 262 struct btrfs_ordered_sum *sum)
255{ 263{
256 struct btrfs_ordered_inode_tree *tree; 264 struct btrfs_ordered_inode_tree *tree;
257 265
@@ -259,7 +267,6 @@ int btrfs_add_ordered_sum(struct inode *inode,
259 spin_lock(&tree->lock); 267 spin_lock(&tree->lock);
260 list_add_tail(&sum->list, &entry->list); 268 list_add_tail(&sum->list, &entry->list);
261 spin_unlock(&tree->lock); 269 spin_unlock(&tree->lock);
262 return 0;
263} 270}
264 271
265/* 272/*
@@ -384,7 +391,7 @@ out:
384 * used to drop a reference on an ordered extent. This will free 391 * used to drop a reference on an ordered extent. This will free
385 * the extent if the last reference is dropped 392 * the extent if the last reference is dropped
386 */ 393 */
387int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 394void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
388{ 395{
389 struct list_head *cur; 396 struct list_head *cur;
390 struct btrfs_ordered_sum *sum; 397 struct btrfs_ordered_sum *sum;
@@ -400,7 +407,6 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
400 } 407 }
401 kfree(entry); 408 kfree(entry);
402 } 409 }
403 return 0;
404} 410}
405 411
406/* 412/*
@@ -408,8 +414,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
408 * and you must wake_up entry->wait. You must hold the tree lock 414 * and you must wake_up entry->wait. You must hold the tree lock
409 * while you call this function. 415 * while you call this function.
410 */ 416 */
411static int __btrfs_remove_ordered_extent(struct inode *inode, 417static void __btrfs_remove_ordered_extent(struct inode *inode,
412 struct btrfs_ordered_extent *entry) 418 struct btrfs_ordered_extent *entry)
413{ 419{
414 struct btrfs_ordered_inode_tree *tree; 420 struct btrfs_ordered_inode_tree *tree;
415 struct btrfs_root *root = BTRFS_I(inode)->root; 421 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -436,35 +442,30 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
436 list_del_init(&BTRFS_I(inode)->ordered_operations); 442 list_del_init(&BTRFS_I(inode)->ordered_operations);
437 } 443 }
438 spin_unlock(&root->fs_info->ordered_extent_lock); 444 spin_unlock(&root->fs_info->ordered_extent_lock);
439
440 return 0;
441} 445}
442 446
443/* 447/*
444 * remove an ordered extent from the tree. No references are dropped 448 * remove an ordered extent from the tree. No references are dropped
445 * but any waiters are woken. 449 * but any waiters are woken.
446 */ 450 */
447int btrfs_remove_ordered_extent(struct inode *inode, 451void btrfs_remove_ordered_extent(struct inode *inode,
448 struct btrfs_ordered_extent *entry) 452 struct btrfs_ordered_extent *entry)
449{ 453{
450 struct btrfs_ordered_inode_tree *tree; 454 struct btrfs_ordered_inode_tree *tree;
451 int ret;
452 455
453 tree = &BTRFS_I(inode)->ordered_tree; 456 tree = &BTRFS_I(inode)->ordered_tree;
454 spin_lock(&tree->lock); 457 spin_lock(&tree->lock);
455 ret = __btrfs_remove_ordered_extent(inode, entry); 458 __btrfs_remove_ordered_extent(inode, entry);
456 spin_unlock(&tree->lock); 459 spin_unlock(&tree->lock);
457 wake_up(&entry->wait); 460 wake_up(&entry->wait);
458
459 return ret;
460} 461}
461 462
462/* 463/*
463 * wait for all the ordered extents in a root. This is done when balancing 464 * wait for all the ordered extents in a root. This is done when balancing
464 * space between drives. 465 * space between drives.
465 */ 466 */
466int btrfs_wait_ordered_extents(struct btrfs_root *root, 467void btrfs_wait_ordered_extents(struct btrfs_root *root,
467 int nocow_only, int delay_iput) 468 int nocow_only, int delay_iput)
468{ 469{
469 struct list_head splice; 470 struct list_head splice;
470 struct list_head *cur; 471 struct list_head *cur;
@@ -512,7 +513,6 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
512 spin_lock(&root->fs_info->ordered_extent_lock); 513 spin_lock(&root->fs_info->ordered_extent_lock);
513 } 514 }
514 spin_unlock(&root->fs_info->ordered_extent_lock); 515 spin_unlock(&root->fs_info->ordered_extent_lock);
515 return 0;
516} 516}
517 517
518/* 518/*
@@ -525,7 +525,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
525 * extra check to make sure the ordered operation list really is empty 525 * extra check to make sure the ordered operation list really is empty
526 * before we return 526 * before we return
527 */ 527 */
528int btrfs_run_ordered_operations(struct btrfs_root *root, int wait) 528void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
529{ 529{
530 struct btrfs_inode *btrfs_inode; 530 struct btrfs_inode *btrfs_inode;
531 struct inode *inode; 531 struct inode *inode;
@@ -573,8 +573,6 @@ again:
573 573
574 spin_unlock(&root->fs_info->ordered_extent_lock); 574 spin_unlock(&root->fs_info->ordered_extent_lock);
575 mutex_unlock(&root->fs_info->ordered_operations_mutex); 575 mutex_unlock(&root->fs_info->ordered_operations_mutex);
576
577 return 0;
578} 576}
579 577
580/* 578/*
@@ -609,7 +607,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
609/* 607/*
610 * Used to wait on ordered extents across a large range of bytes. 608 * Used to wait on ordered extents across a large range of bytes.
611 */ 609 */
612int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 610void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
613{ 611{
614 u64 end; 612 u64 end;
615 u64 orig_end; 613 u64 orig_end;
@@ -664,7 +662,6 @@ again:
664 schedule_timeout(1); 662 schedule_timeout(1);
665 goto again; 663 goto again;
666 } 664 }
667 return 0;
668} 665}
669 666
670/* 667/*
@@ -948,9 +945,8 @@ out:
948 * If trans is not null, we'll do a friendly check for a transaction that 945 * If trans is not null, we'll do a friendly check for a transaction that
949 * is already flushing things and force the IO down ourselves. 946 * is already flushing things and force the IO down ourselves.
950 */ 947 */
951int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 948void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
952 struct btrfs_root *root, 949 struct btrfs_root *root, struct inode *inode)
953 struct inode *inode)
954{ 950{
955 u64 last_mod; 951 u64 last_mod;
956 952
@@ -961,7 +957,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
961 * commit, we can safely return without doing anything 957 * commit, we can safely return without doing anything
962 */ 958 */
963 if (last_mod < root->fs_info->last_trans_committed) 959 if (last_mod < root->fs_info->last_trans_committed)
964 return 0; 960 return;
965 961
966 /* 962 /*
967 * the transaction is already committing. Just start the IO and 963 * the transaction is already committing. Just start the IO and
@@ -969,7 +965,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
969 */ 965 */
970 if (trans && root->fs_info->running_transaction->blocked) { 966 if (trans && root->fs_info->running_transaction->blocked) {
971 btrfs_wait_ordered_range(inode, 0, (u64)-1); 967 btrfs_wait_ordered_range(inode, 0, (u64)-1);
972 return 0; 968 return;
973 } 969 }
974 970
975 spin_lock(&root->fs_info->ordered_extent_lock); 971 spin_lock(&root->fs_info->ordered_extent_lock);
@@ -978,6 +974,4 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
978 &root->fs_info->ordered_operations); 974 &root->fs_info->ordered_operations);
979 } 975 }
980 spin_unlock(&root->fs_info->ordered_extent_lock); 976 spin_unlock(&root->fs_info->ordered_extent_lock);
981
982 return 0;
983} 977}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index ff1f69aa1883..c355ad4dc1a6 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -138,8 +138,8 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
138 t->last = NULL; 138 t->last = NULL;
139} 139}
140 140
141int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); 141void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
142int btrfs_remove_ordered_extent(struct inode *inode, 142void btrfs_remove_ordered_extent(struct inode *inode,
143 struct btrfs_ordered_extent *entry); 143 struct btrfs_ordered_extent *entry);
144int btrfs_dec_test_ordered_pending(struct inode *inode, 144int btrfs_dec_test_ordered_pending(struct inode *inode,
145 struct btrfs_ordered_extent **cached, 145 struct btrfs_ordered_extent **cached,
@@ -154,14 +154,14 @@ int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
154int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 154int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
155 u64 start, u64 len, u64 disk_len, 155 u64 start, u64 len, u64 disk_len,
156 int type, int compress_type); 156 int type, int compress_type);
157int btrfs_add_ordered_sum(struct inode *inode, 157void btrfs_add_ordered_sum(struct inode *inode,
158 struct btrfs_ordered_extent *entry, 158 struct btrfs_ordered_extent *entry,
159 struct btrfs_ordered_sum *sum); 159 struct btrfs_ordered_sum *sum);
160struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 160struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
161 u64 file_offset); 161 u64 file_offset);
162void btrfs_start_ordered_extent(struct inode *inode, 162void btrfs_start_ordered_extent(struct inode *inode,
163 struct btrfs_ordered_extent *entry, int wait); 163 struct btrfs_ordered_extent *entry, int wait);
164int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); 164void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
165struct btrfs_ordered_extent * 165struct btrfs_ordered_extent *
166btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); 166btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
167struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, 167struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
@@ -170,10 +170,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
170int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 170int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
171 struct btrfs_ordered_extent *ordered); 171 struct btrfs_ordered_extent *ordered);
172int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 172int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
173int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); 173void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
174int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 174void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
175 struct btrfs_root *root, 175 struct btrfs_root *root,
176 struct inode *inode); 176 struct inode *inode);
177int btrfs_wait_ordered_extents(struct btrfs_root *root, 177void btrfs_wait_ordered_extents(struct btrfs_root *root,
178 int nocow_only, int delay_iput); 178 int nocow_only, int delay_iput);
179#endif 179#endif
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index f8be250963a0..24cad1695af7 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -58,7 +58,7 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
59 if (ret < 0) 59 if (ret < 0)
60 goto out; 60 goto out;
61 if (ret) { 61 if (ret) { /* JDM: Really? */
62 ret = -ENOENT; 62 ret = -ENOENT;
63 goto out; 63 goto out;
64 } 64 }
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8c1aae2c845d..017281dbb2a7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -326,6 +326,19 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
326 return NULL; 326 return NULL;
327} 327}
328 328
329void backref_tree_panic(struct rb_node *rb_node, int errno,
330 u64 bytenr)
331{
332
333 struct btrfs_fs_info *fs_info = NULL;
334 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
335 rb_node);
336 if (bnode->root)
337 fs_info = bnode->root->fs_info;
338 btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
339 "found at offset %llu\n", (unsigned long long)bytenr);
340}
341
329/* 342/*
330 * walk up backref nodes until reach node presents tree root 343 * walk up backref nodes until reach node presents tree root
331 */ 344 */
@@ -452,7 +465,8 @@ static void update_backref_node(struct backref_cache *cache,
452 rb_erase(&node->rb_node, &cache->rb_root); 465 rb_erase(&node->rb_node, &cache->rb_root);
453 node->bytenr = bytenr; 466 node->bytenr = bytenr;
454 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 467 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
455 BUG_ON(rb_node); 468 if (rb_node)
469 backref_tree_panic(rb_node, -EEXIST, bytenr);
456} 470}
457 471
458/* 472/*
@@ -999,7 +1013,8 @@ next:
999 if (!cowonly) { 1013 if (!cowonly) {
1000 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1014 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1001 &node->rb_node); 1015 &node->rb_node);
1002 BUG_ON(rb_node); 1016 if (rb_node)
1017 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1003 list_add_tail(&node->lower, &cache->leaves); 1018 list_add_tail(&node->lower, &cache->leaves);
1004 } 1019 }
1005 1020
@@ -1034,7 +1049,9 @@ next:
1034 if (!cowonly) { 1049 if (!cowonly) {
1035 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1050 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1036 &upper->rb_node); 1051 &upper->rb_node);
1037 BUG_ON(rb_node); 1052 if (rb_node)
1053 backref_tree_panic(rb_node, -EEXIST,
1054 upper->bytenr);
1038 } 1055 }
1039 1056
1040 list_add_tail(&edge->list[UPPER], &upper->lower); 1057 list_add_tail(&edge->list[UPPER], &upper->lower);
@@ -1180,7 +1197,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1180 1197
1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1198 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1182 &new_node->rb_node); 1199 &new_node->rb_node);
1183 BUG_ON(rb_node); 1200 if (rb_node)
1201 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1184 1202
1185 if (!new_node->lowest) { 1203 if (!new_node->lowest) {
1186 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1204 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
@@ -1203,14 +1221,15 @@ fail:
1203/* 1221/*
1204 * helper to add 'address of tree root -> reloc tree' mapping 1222 * helper to add 'address of tree root -> reloc tree' mapping
1205 */ 1223 */
1206static int __add_reloc_root(struct btrfs_root *root) 1224static int __must_check __add_reloc_root(struct btrfs_root *root)
1207{ 1225{
1208 struct rb_node *rb_node; 1226 struct rb_node *rb_node;
1209 struct mapping_node *node; 1227 struct mapping_node *node;
1210 struct reloc_control *rc = root->fs_info->reloc_ctl; 1228 struct reloc_control *rc = root->fs_info->reloc_ctl;
1211 1229
1212 node = kmalloc(sizeof(*node), GFP_NOFS); 1230 node = kmalloc(sizeof(*node), GFP_NOFS);
1213 BUG_ON(!node); 1231 if (!node)
1232 return -ENOMEM;
1214 1233
1215 node->bytenr = root->node->start; 1234 node->bytenr = root->node->start;
1216 node->data = root; 1235 node->data = root;
@@ -1219,7 +1238,12 @@ static int __add_reloc_root(struct btrfs_root *root)
1219 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1238 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1220 node->bytenr, &node->rb_node); 1239 node->bytenr, &node->rb_node);
1221 spin_unlock(&rc->reloc_root_tree.lock); 1240 spin_unlock(&rc->reloc_root_tree.lock);
1222 BUG_ON(rb_node); 1241 if (rb_node) {
1242 kfree(node);
1243 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
1244 "for start=%llu while inserting into relocation "
1245 "tree\n");
1246 }
1223 1247
1224 list_add_tail(&root->root_list, &rc->reloc_roots); 1248 list_add_tail(&root->root_list, &rc->reloc_roots);
1225 return 0; 1249 return 0;
@@ -1252,7 +1276,8 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1252 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1276 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1253 node->bytenr, &node->rb_node); 1277 node->bytenr, &node->rb_node);
1254 spin_unlock(&rc->reloc_root_tree.lock); 1278 spin_unlock(&rc->reloc_root_tree.lock);
1255 BUG_ON(rb_node); 1279 if (rb_node)
1280 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1256 } else { 1281 } else {
1257 list_del_init(&root->root_list); 1282 list_del_init(&root->root_list);
1258 kfree(node); 1283 kfree(node);
@@ -1334,6 +1359,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1334 struct btrfs_root *reloc_root; 1359 struct btrfs_root *reloc_root;
1335 struct reloc_control *rc = root->fs_info->reloc_ctl; 1360 struct reloc_control *rc = root->fs_info->reloc_ctl;
1336 int clear_rsv = 0; 1361 int clear_rsv = 0;
1362 int ret;
1337 1363
1338 if (root->reloc_root) { 1364 if (root->reloc_root) {
1339 reloc_root = root->reloc_root; 1365 reloc_root = root->reloc_root;
@@ -1353,7 +1379,8 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1353 if (clear_rsv) 1379 if (clear_rsv)
1354 trans->block_rsv = NULL; 1380 trans->block_rsv = NULL;
1355 1381
1356 __add_reloc_root(reloc_root); 1382 ret = __add_reloc_root(reloc_root);
1383 BUG_ON(ret < 0);
1357 root->reloc_root = reloc_root; 1384 root->reloc_root = reloc_root;
1358 return 0; 1385 return 0;
1359} 1386}
@@ -1577,15 +1604,14 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1577 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1604 WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1578 end--; 1605 end--;
1579 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1606 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1580 key.offset, end, 1607 key.offset, end);
1581 GFP_NOFS);
1582 if (!ret) 1608 if (!ret)
1583 continue; 1609 continue;
1584 1610
1585 btrfs_drop_extent_cache(inode, key.offset, end, 1611 btrfs_drop_extent_cache(inode, key.offset, end,
1586 1); 1612 1);
1587 unlock_extent(&BTRFS_I(inode)->io_tree, 1613 unlock_extent(&BTRFS_I(inode)->io_tree,
1588 key.offset, end, GFP_NOFS); 1614 key.offset, end);
1589 } 1615 }
1590 } 1616 }
1591 1617
@@ -1956,9 +1982,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1956 } 1982 }
1957 1983
1958 /* the lock_extent waits for readpage to complete */ 1984 /* the lock_extent waits for readpage to complete */
1959 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1985 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
1960 btrfs_drop_extent_cache(inode, start, end, 1); 1986 btrfs_drop_extent_cache(inode, start, end, 1);
1961 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1987 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1962 } 1988 }
1963 return 0; 1989 return 0;
1964} 1990}
@@ -2246,7 +2272,8 @@ again:
2246 } else { 2272 } else {
2247 list_del_init(&reloc_root->root_list); 2273 list_del_init(&reloc_root->root_list);
2248 } 2274 }
2249 btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2275 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
2276 BUG_ON(ret < 0);
2250 } 2277 }
2251 2278
2252 if (found) { 2279 if (found) {
@@ -2862,12 +2889,12 @@ int prealloc_file_extent_cluster(struct inode *inode,
2862 else 2889 else
2863 end = cluster->end - offset; 2890 end = cluster->end - offset;
2864 2891
2865 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2892 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2866 num_bytes = end + 1 - start; 2893 num_bytes = end + 1 - start;
2867 ret = btrfs_prealloc_file_range(inode, 0, start, 2894 ret = btrfs_prealloc_file_range(inode, 0, start,
2868 num_bytes, num_bytes, 2895 num_bytes, num_bytes,
2869 end + 1, &alloc_hint); 2896 end + 1, &alloc_hint);
2870 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2897 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2871 if (ret) 2898 if (ret)
2872 break; 2899 break;
2873 nr++; 2900 nr++;
@@ -2899,7 +2926,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2899 em->bdev = root->fs_info->fs_devices->latest_bdev; 2926 em->bdev = root->fs_info->fs_devices->latest_bdev;
2900 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2927 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2901 2928
2902 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2929 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2903 while (1) { 2930 while (1) {
2904 write_lock(&em_tree->lock); 2931 write_lock(&em_tree->lock);
2905 ret = add_extent_mapping(em_tree, em); 2932 ret = add_extent_mapping(em_tree, em);
@@ -2910,7 +2937,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2910 } 2937 }
2911 btrfs_drop_extent_cache(inode, start, end, 0); 2938 btrfs_drop_extent_cache(inode, start, end, 0);
2912 } 2939 }
2913 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2940 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2914 return ret; 2941 return ret;
2915} 2942}
2916 2943
@@ -2990,8 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
2990 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 3017 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2991 page_end = page_start + PAGE_CACHE_SIZE - 1; 3018 page_end = page_start + PAGE_CACHE_SIZE - 1;
2992 3019
2993 lock_extent(&BTRFS_I(inode)->io_tree, 3020 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
2994 page_start, page_end, GFP_NOFS);
2995 3021
2996 set_page_extent_mapped(page); 3022 set_page_extent_mapped(page);
2997 3023
@@ -3007,7 +3033,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3007 set_page_dirty(page); 3033 set_page_dirty(page);
3008 3034
3009 unlock_extent(&BTRFS_I(inode)->io_tree, 3035 unlock_extent(&BTRFS_I(inode)->io_tree,
3010 page_start, page_end, GFP_NOFS); 3036 page_start, page_end);
3011 unlock_page(page); 3037 unlock_page(page);
3012 page_cache_release(page); 3038 page_cache_release(page);
3013 3039
@@ -3154,7 +3180,8 @@ static int add_tree_block(struct reloc_control *rc,
3154 block->key_ready = 0; 3180 block->key_ready = 0;
3155 3181
3156 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3182 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3157 BUG_ON(rb_node); 3183 if (rb_node)
3184 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3158 3185
3159 return 0; 3186 return 0;
3160} 3187}
@@ -3426,7 +3453,9 @@ static int find_data_references(struct reloc_control *rc,
3426 block->key_ready = 1; 3453 block->key_ready = 1;
3427 rb_node = tree_insert(blocks, block->bytenr, 3454 rb_node = tree_insert(blocks, block->bytenr,
3428 &block->rb_node); 3455 &block->rb_node);
3429 BUG_ON(rb_node); 3456 if (rb_node)
3457 backref_tree_panic(rb_node, -EEXIST,
3458 block->bytenr);
3430 } 3459 }
3431 if (counted) 3460 if (counted)
3432 added = 1; 3461 added = 1;
@@ -4073,10 +4102,11 @@ out:
4073static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4102static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4074{ 4103{
4075 struct btrfs_trans_handle *trans; 4104 struct btrfs_trans_handle *trans;
4076 int ret; 4105 int ret, err;
4077 4106
4078 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4107 trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
4079 BUG_ON(IS_ERR(trans)); 4108 if (IS_ERR(trans))
4109 return PTR_ERR(trans);
4080 4110
4081 memset(&root->root_item.drop_progress, 0, 4111 memset(&root->root_item.drop_progress, 0,
4082 sizeof(root->root_item.drop_progress)); 4112 sizeof(root->root_item.drop_progress));
@@ -4084,11 +4114,11 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4084 btrfs_set_root_refs(&root->root_item, 0); 4114 btrfs_set_root_refs(&root->root_item, 0);
4085 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4115 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4086 &root->root_key, &root->root_item); 4116 &root->root_key, &root->root_item);
4087 BUG_ON(ret);
4088 4117
4089 ret = btrfs_end_transaction(trans, root->fs_info->tree_root); 4118 err = btrfs_end_transaction(trans, root->fs_info->tree_root);
4090 BUG_ON(ret); 4119 if (err)
4091 return 0; 4120 return err;
4121 return ret;
4092} 4122}
4093 4123
4094/* 4124/*
@@ -4156,7 +4186,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4156 err = ret; 4186 err = ret;
4157 goto out; 4187 goto out;
4158 } 4188 }
4159 mark_garbage_root(reloc_root); 4189 ret = mark_garbage_root(reloc_root);
4190 if (ret < 0) {
4191 err = ret;
4192 goto out;
4193 }
4160 } 4194 }
4161 } 4195 }
4162 4196
@@ -4202,13 +4236,19 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4202 4236
4203 fs_root = read_fs_root(root->fs_info, 4237 fs_root = read_fs_root(root->fs_info,
4204 reloc_root->root_key.offset); 4238 reloc_root->root_key.offset);
4205 BUG_ON(IS_ERR(fs_root)); 4239 if (IS_ERR(fs_root)) {
4240 err = PTR_ERR(fs_root);
4241 goto out_free;
4242 }
4206 4243
4207 __add_reloc_root(reloc_root); 4244 err = __add_reloc_root(reloc_root);
4245 BUG_ON(err < 0); /* -ENOMEM or logic error */
4208 fs_root->reloc_root = reloc_root; 4246 fs_root->reloc_root = reloc_root;
4209 } 4247 }
4210 4248
4211 btrfs_commit_transaction(trans, rc->extent_root); 4249 err = btrfs_commit_transaction(trans, rc->extent_root);
4250 if (err)
4251 goto out_free;
4212 4252
4213 merge_reloc_roots(rc); 4253 merge_reloc_roots(rc);
4214 4254
@@ -4218,7 +4258,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4218 if (IS_ERR(trans)) 4258 if (IS_ERR(trans))
4219 err = PTR_ERR(trans); 4259 err = PTR_ERR(trans);
4220 else 4260 else
4221 btrfs_commit_transaction(trans, rc->extent_root); 4261 err = btrfs_commit_transaction(trans, rc->extent_root);
4222out_free: 4262out_free:
4223 kfree(rc); 4263 kfree(rc);
4224out: 4264out:
@@ -4267,6 +4307,8 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4267 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4307 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4268 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4308 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
4269 disk_bytenr + len - 1, &list, 0); 4309 disk_bytenr + len - 1, &list, 0);
4310 if (ret)
4311 goto out;
4270 4312
4271 while (!list_empty(&list)) { 4313 while (!list_empty(&list)) {
4272 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4314 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
@@ -4284,6 +4326,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4284 4326
4285 btrfs_add_ordered_sum(inode, ordered, sums); 4327 btrfs_add_ordered_sum(inode, ordered, sums);
4286 } 4328 }
4329out:
4287 btrfs_put_ordered_extent(ordered); 4330 btrfs_put_ordered_extent(ordered);
4288 return ret; 4331 return ret;
4289} 4332}
@@ -4380,7 +4423,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
4380 * called after snapshot is created. migrate block reservation 4423 * called after snapshot is created. migrate block reservation
4381 * and create reloc root for the newly created snapshot 4424 * and create reloc root for the newly created snapshot
4382 */ 4425 */
4383void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4426int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4384 struct btrfs_pending_snapshot *pending) 4427 struct btrfs_pending_snapshot *pending)
4385{ 4428{
4386 struct btrfs_root *root = pending->root; 4429 struct btrfs_root *root = pending->root;
@@ -4390,7 +4433,7 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4390 int ret; 4433 int ret;
4391 4434
4392 if (!root->reloc_root) 4435 if (!root->reloc_root)
4393 return; 4436 return 0;
4394 4437
4395 rc = root->fs_info->reloc_ctl; 4438 rc = root->fs_info->reloc_ctl;
4396 rc->merging_rsv_size += rc->nodes_relocated; 4439 rc->merging_rsv_size += rc->nodes_relocated;
@@ -4399,18 +4442,21 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4399 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4442 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4400 rc->block_rsv, 4443 rc->block_rsv,
4401 rc->nodes_relocated); 4444 rc->nodes_relocated);
4402 BUG_ON(ret); 4445 if (ret)
4446 return ret;
4403 } 4447 }
4404 4448
4405 new_root = pending->snap; 4449 new_root = pending->snap;
4406 reloc_root = create_reloc_root(trans, root->reloc_root, 4450 reloc_root = create_reloc_root(trans, root->reloc_root,
4407 new_root->root_key.objectid); 4451 new_root->root_key.objectid);
4452 if (IS_ERR(reloc_root))
4453 return PTR_ERR(reloc_root);
4408 4454
4409 __add_reloc_root(reloc_root); 4455 ret = __add_reloc_root(reloc_root);
4456 BUG_ON(ret < 0);
4410 new_root->reloc_root = reloc_root; 4457 new_root->reloc_root = reloc_root;
4411 4458
4412 if (rc->create_reloc_tree) { 4459 if (rc->create_reloc_tree)
4413 ret = clone_backref_node(trans, rc, root, reloc_root); 4460 ret = clone_backref_node(trans, rc, root, reloc_root);
4414 BUG_ON(ret); 4461 return ret;
4415 }
4416} 4462}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f4099904565a..24fb8ce4e071 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -93,10 +93,14 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
93 unsigned long ptr; 93 unsigned long ptr;
94 94
95 path = btrfs_alloc_path(); 95 path = btrfs_alloc_path();
96 BUG_ON(!path); 96 if (!path)
97 return -ENOMEM;
98
97 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 99 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
98 if (ret < 0) 100 if (ret < 0) {
101 btrfs_abort_transaction(trans, root, ret);
99 goto out; 102 goto out;
103 }
100 104
101 if (ret != 0) { 105 if (ret != 0) {
102 btrfs_print_leaf(root, path->nodes[0]); 106 btrfs_print_leaf(root, path->nodes[0]);
@@ -116,13 +120,10 @@ out:
116 return ret; 120 return ret;
117} 121}
118 122
119int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 123int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
120 *root, struct btrfs_key *key, struct btrfs_root_item 124 struct btrfs_key *key, struct btrfs_root_item *item)
121 *item)
122{ 125{
123 int ret; 126 return btrfs_insert_item(trans, root, key, item, sizeof(*item));
124 ret = btrfs_insert_item(trans, root, key, item, sizeof(*item));
125 return ret;
126} 127}
127 128
128/* 129/*
@@ -384,6 +385,8 @@ int btrfs_find_root_ref(struct btrfs_root *tree_root,
384 * 385 *
385 * For a back ref the root_id is the id of the subvol or snapshot and 386 * For a back ref the root_id is the id of the subvol or snapshot and
386 * ref_id is the id of the tree referencing it. 387 * ref_id is the id of the tree referencing it.
388 *
389 * Will return 0, -ENOMEM, or anything from the CoW path
387 */ 390 */
388int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 391int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
389 struct btrfs_root *tree_root, 392 struct btrfs_root *tree_root,
@@ -407,7 +410,11 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
407again: 410again:
408 ret = btrfs_insert_empty_item(trans, tree_root, path, &key, 411 ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
409 sizeof(*ref) + name_len); 412 sizeof(*ref) + name_len);
410 BUG_ON(ret); 413 if (ret) {
414 btrfs_abort_transaction(trans, tree_root, ret);
415 btrfs_free_path(path);
416 return ret;
417 }
411 418
412 leaf = path->nodes[0]; 419 leaf = path->nodes[0];
413 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 420 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 5221e072bb65..07e59d97551a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2157,6 +2157,9 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
2157 struct btrfs_device *device = sdev->dev; 2157 struct btrfs_device *device = sdev->dev;
2158 struct btrfs_root *root = device->dev_root; 2158 struct btrfs_root *root = device->dev_root;
2159 2159
2160 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2161 return -EIO;
2162
2160 gen = root->fs_info->last_trans_committed; 2163 gen = root->fs_info->last_trans_committed;
2161 2164
2162 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2165 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -2317,7 +2320,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2317 return ret; 2320 return ret;
2318} 2321}
2319 2322
2320int btrfs_scrub_pause(struct btrfs_root *root) 2323void btrfs_scrub_pause(struct btrfs_root *root)
2321{ 2324{
2322 struct btrfs_fs_info *fs_info = root->fs_info; 2325 struct btrfs_fs_info *fs_info = root->fs_info;
2323 2326
@@ -2332,34 +2335,28 @@ int btrfs_scrub_pause(struct btrfs_root *root)
2332 mutex_lock(&fs_info->scrub_lock); 2335 mutex_lock(&fs_info->scrub_lock);
2333 } 2336 }
2334 mutex_unlock(&fs_info->scrub_lock); 2337 mutex_unlock(&fs_info->scrub_lock);
2335
2336 return 0;
2337} 2338}
2338 2339
2339int btrfs_scrub_continue(struct btrfs_root *root) 2340void btrfs_scrub_continue(struct btrfs_root *root)
2340{ 2341{
2341 struct btrfs_fs_info *fs_info = root->fs_info; 2342 struct btrfs_fs_info *fs_info = root->fs_info;
2342 2343
2343 atomic_dec(&fs_info->scrub_pause_req); 2344 atomic_dec(&fs_info->scrub_pause_req);
2344 wake_up(&fs_info->scrub_pause_wait); 2345 wake_up(&fs_info->scrub_pause_wait);
2345 return 0;
2346} 2346}
2347 2347
2348int btrfs_scrub_pause_super(struct btrfs_root *root) 2348void btrfs_scrub_pause_super(struct btrfs_root *root)
2349{ 2349{
2350 down_write(&root->fs_info->scrub_super_lock); 2350 down_write(&root->fs_info->scrub_super_lock);
2351 return 0;
2352} 2351}
2353 2352
2354int btrfs_scrub_continue_super(struct btrfs_root *root) 2353void btrfs_scrub_continue_super(struct btrfs_root *root)
2355{ 2354{
2356 up_write(&root->fs_info->scrub_super_lock); 2355 up_write(&root->fs_info->scrub_super_lock);
2357 return 0;
2358} 2356}
2359 2357
2360int btrfs_scrub_cancel(struct btrfs_root *root) 2358int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2361{ 2359{
2362 struct btrfs_fs_info *fs_info = root->fs_info;
2363 2360
2364 mutex_lock(&fs_info->scrub_lock); 2361 mutex_lock(&fs_info->scrub_lock);
2365 if (!atomic_read(&fs_info->scrubs_running)) { 2362 if (!atomic_read(&fs_info->scrubs_running)) {
@@ -2380,6 +2377,11 @@ int btrfs_scrub_cancel(struct btrfs_root *root)
2380 return 0; 2377 return 0;
2381} 2378}
2382 2379
2380int btrfs_scrub_cancel(struct btrfs_root *root)
2381{
2382 return __btrfs_scrub_cancel(root->fs_info);
2383}
2384
2383int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) 2385int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2384{ 2386{
2385 struct btrfs_fs_info *fs_info = root->fs_info; 2387 struct btrfs_fs_info *fs_info = root->fs_info;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3ce97b217cbe..9db64165123a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -76,6 +76,9 @@ static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
76 case -EROFS: 76 case -EROFS:
77 errstr = "Readonly filesystem"; 77 errstr = "Readonly filesystem";
78 break; 78 break;
79 case -EEXIST:
80 errstr = "Object already exists";
81 break;
79 default: 82 default:
80 if (nbuf) { 83 if (nbuf) {
81 if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 84 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
@@ -116,6 +119,8 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
116 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 119 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
117 sb->s_flags |= MS_RDONLY; 120 sb->s_flags |= MS_RDONLY;
118 printk(KERN_INFO "btrfs is forced readonly\n"); 121 printk(KERN_INFO "btrfs is forced readonly\n");
122 __btrfs_scrub_cancel(fs_info);
123// WARN_ON(1);
119 } 124 }
120} 125}
121 126
@@ -124,25 +129,132 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
124 * invokes the approciate error response. 129 * invokes the approciate error response.
125 */ 130 */
126void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 131void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
127 unsigned int line, int errno) 132 unsigned int line, int errno, const char *fmt, ...)
128{ 133{
129 struct super_block *sb = fs_info->sb; 134 struct super_block *sb = fs_info->sb;
130 char nbuf[16]; 135 char nbuf[16];
131 const char *errstr; 136 const char *errstr;
137 va_list args;
138 va_start(args, fmt);
132 139
133 /* 140 /*
134 * Special case: if the error is EROFS, and we're already 141 * Special case: if the error is EROFS, and we're already
135 * under MS_RDONLY, then it is safe here. 142 * under MS_RDONLY, then it is safe here.
136 */ 143 */
137 if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 144 if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
145 return;
146
147 errstr = btrfs_decode_error(fs_info, errno, nbuf);
148 if (fmt) {
149 struct va_format vaf = {
150 .fmt = fmt,
151 .va = &args,
152 };
153
154 printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
155 sb->s_id, function, line, errstr, &vaf);
156 } else {
157 printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
158 sb->s_id, function, line, errstr);
159 }
160
161 /* Don't go through full error handling during mount */
162 if (sb->s_flags & MS_BORN) {
163 save_error_info(fs_info);
164 btrfs_handle_error(fs_info);
165 }
166 va_end(args);
167}
168
169const char *logtypes[] = {
170 "emergency",
171 "alert",
172 "critical",
173 "error",
174 "warning",
175 "notice",
176 "info",
177 "debug",
178};
179
180void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
181{
182 struct super_block *sb = fs_info->sb;
183 char lvl[4];
184 struct va_format vaf;
185 va_list args;
186 const char *type = logtypes[4];
187
188 va_start(args, fmt);
189
190 if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
191 strncpy(lvl, fmt, 3);
192 fmt += 3;
193 type = logtypes[fmt[1] - '0'];
194 } else
195 *lvl = '\0';
196
197 vaf.fmt = fmt;
198 vaf.va = &args;
199 printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
200}
201
202/*
203 * We only mark the transaction aborted and then set the file system read-only.
204 * This will prevent new transactions from starting or trying to join this
205 * one.
206 *
207 * This means that error recovery at the call site is limited to freeing
208 * any local memory allocations and passing the error code up without
209 * further cleanup. The transaction should complete as it normally would
210 * in the call path but will return -EIO.
211 *
212 * We'll complete the cleanup in btrfs_end_transaction and
213 * btrfs_commit_transaction.
214 */
215void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
216 struct btrfs_root *root, const char *function,
217 unsigned int line, int errno)
218{
219 WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted");
220 trans->aborted = errno;
221 /* Nothing used. The other threads that have joined this
222 * transaction may be able to continue. */
223 if (!trans->blocks_used) {
224 btrfs_printk(root->fs_info, "Aborting unused transaction.\n");
138 return; 225 return;
226 }
227 trans->transaction->aborted = errno;
228 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
229}
230/*
231 * __btrfs_panic decodes unexpected, fatal errors from the caller,
232 * issues an alert, and either panics or BUGs, depending on mount options.
233 */
234void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
235 unsigned int line, int errno, const char *fmt, ...)
236{
237 char nbuf[16];
238 char *s_id = "<unknown>";
239 const char *errstr;
240 struct va_format vaf = { .fmt = fmt };
241 va_list args;
139 242
140 errstr = btrfs_decode_error(fs_info, errno, nbuf); 243 if (fs_info)
141 printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", 244 s_id = fs_info->sb->s_id;
142 sb->s_id, function, line, errstr); 245
143 save_error_info(fs_info); 246 va_start(args, fmt);
247 vaf.va = &args;
144 248
145 btrfs_handle_error(fs_info); 249 errstr = btrfs_decode_error(fs_info, errno, nbuf);
250 if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
251 panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
252 s_id, function, line, &vaf, errstr);
253
254 printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
255 s_id, function, line, &vaf, errstr);
256 va_end(args);
257 /* Caller calls BUG() */
146} 258}
147 259
148static void btrfs_put_super(struct super_block *sb) 260static void btrfs_put_super(struct super_block *sb)
@@ -166,7 +278,7 @@ enum {
166 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache, 278 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
167 Opt_no_space_cache, Opt_recovery, Opt_skip_balance, 279 Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
168 Opt_check_integrity, Opt_check_integrity_including_extent_data, 280 Opt_check_integrity, Opt_check_integrity_including_extent_data,
169 Opt_check_integrity_print_mask, 281 Opt_check_integrity_print_mask, Opt_fatal_errors,
170 Opt_err, 282 Opt_err,
171}; 283};
172 284
@@ -206,12 +318,14 @@ static match_table_t tokens = {
206 {Opt_check_integrity, "check_int"}, 318 {Opt_check_integrity, "check_int"},
207 {Opt_check_integrity_including_extent_data, "check_int_data"}, 319 {Opt_check_integrity_including_extent_data, "check_int_data"},
208 {Opt_check_integrity_print_mask, "check_int_print_mask=%d"}, 320 {Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
321 {Opt_fatal_errors, "fatal_errors=%s"},
209 {Opt_err, NULL}, 322 {Opt_err, NULL},
210}; 323};
211 324
212/* 325/*
213 * Regular mount options parser. Everything that is needed only when 326 * Regular mount options parser. Everything that is needed only when
214 * reading in a new superblock is parsed here. 327 * reading in a new superblock is parsed here.
328 * XXX JDM: This needs to be cleaned up for remount.
215 */ 329 */
216int btrfs_parse_options(struct btrfs_root *root, char *options) 330int btrfs_parse_options(struct btrfs_root *root, char *options)
217{ 331{
@@ -438,6 +552,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
438 ret = -EINVAL; 552 ret = -EINVAL;
439 goto out; 553 goto out;
440#endif 554#endif
555 case Opt_fatal_errors:
556 if (strcmp(args[0].from, "panic") == 0)
557 btrfs_set_opt(info->mount_opt,
558 PANIC_ON_FATAL_ERROR);
559 else if (strcmp(args[0].from, "bug") == 0)
560 btrfs_clear_opt(info->mount_opt,
561 PANIC_ON_FATAL_ERROR);
562 else {
563 ret = -EINVAL;
564 goto out;
565 }
566 break;
441 case Opt_err: 567 case Opt_err:
442 printk(KERN_INFO "btrfs: unrecognized mount option " 568 printk(KERN_INFO "btrfs: unrecognized mount option "
443 "'%s'\n", p); 569 "'%s'\n", p);
@@ -766,6 +892,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
766 seq_puts(seq, ",inode_cache"); 892 seq_puts(seq, ",inode_cache");
767 if (btrfs_test_opt(root, SKIP_BALANCE)) 893 if (btrfs_test_opt(root, SKIP_BALANCE))
768 seq_puts(seq, ",skip_balance"); 894 seq_puts(seq, ",skip_balance");
895 if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
896 seq_puts(seq, ",fatal_errors=panic");
769 return 0; 897 return 0;
770} 898}
771 899
@@ -999,11 +1127,20 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
999{ 1127{
1000 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1128 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1001 struct btrfs_root *root = fs_info->tree_root; 1129 struct btrfs_root *root = fs_info->tree_root;
1130 unsigned old_flags = sb->s_flags;
1131 unsigned long old_opts = fs_info->mount_opt;
1132 unsigned long old_compress_type = fs_info->compress_type;
1133 u64 old_max_inline = fs_info->max_inline;
1134 u64 old_alloc_start = fs_info->alloc_start;
1135 int old_thread_pool_size = fs_info->thread_pool_size;
1136 unsigned int old_metadata_ratio = fs_info->metadata_ratio;
1002 int ret; 1137 int ret;
1003 1138
1004 ret = btrfs_parse_options(root, data); 1139 ret = btrfs_parse_options(root, data);
1005 if (ret) 1140 if (ret) {
1006 return -EINVAL; 1141 ret = -EINVAL;
1142 goto restore;
1143 }
1007 1144
1008 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 1145 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
1009 return 0; 1146 return 0;
@@ -1011,26 +1148,44 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1011 if (*flags & MS_RDONLY) { 1148 if (*flags & MS_RDONLY) {
1012 sb->s_flags |= MS_RDONLY; 1149 sb->s_flags |= MS_RDONLY;
1013 1150
1014 ret = btrfs_commit_super(root); 1151 ret = btrfs_commit_super(root);
1015 WARN_ON(ret); 1152 if (ret)
1153 goto restore;
1016 } else { 1154 } else {
1017 if (fs_info->fs_devices->rw_devices == 0) 1155 if (fs_info->fs_devices->rw_devices == 0)
1018 return -EACCES; 1156 ret = -EACCES;
1157 goto restore;
1019 1158
1020 if (btrfs_super_log_root(fs_info->super_copy) != 0) 1159 if (btrfs_super_log_root(fs_info->super_copy) != 0)
1021 return -EINVAL; 1160 ret = -EINVAL;
1161 goto restore;
1022 1162
1023 ret = btrfs_cleanup_fs_roots(fs_info); 1163 ret = btrfs_cleanup_fs_roots(fs_info);
1024 WARN_ON(ret); 1164 if (ret)
1165 goto restore;
1025 1166
1026 /* recover relocation */ 1167 /* recover relocation */
1027 ret = btrfs_recover_relocation(root); 1168 ret = btrfs_recover_relocation(root);
1028 WARN_ON(ret); 1169 if (ret)
1170 goto restore;
1029 1171
1030 sb->s_flags &= ~MS_RDONLY; 1172 sb->s_flags &= ~MS_RDONLY;
1031 } 1173 }
1032 1174
1033 return 0; 1175 return 0;
1176
1177restore:
1178 /* We've hit an error - don't reset MS_RDONLY */
1179 if (sb->s_flags & MS_RDONLY)
1180 old_flags |= MS_RDONLY;
1181 sb->s_flags = old_flags;
1182 fs_info->mount_opt = old_opts;
1183 fs_info->compress_type = old_compress_type;
1184 fs_info->max_inline = old_max_inline;
1185 fs_info->alloc_start = old_alloc_start;
1186 fs_info->thread_pool_size = old_thread_pool_size;
1187 fs_info->metadata_ratio = old_metadata_ratio;
1188 return ret;
1034} 1189}
1035 1190
1036/* Used to sort the devices by max_avail(descending sort) */ 1191/* Used to sort the devices by max_avail(descending sort) */
@@ -1360,9 +1515,7 @@ static int __init init_btrfs_fs(void)
1360 if (err) 1515 if (err)
1361 return err; 1516 return err;
1362 1517
1363 err = btrfs_init_compress(); 1518 btrfs_init_compress();
1364 if (err)
1365 goto free_sysfs;
1366 1519
1367 err = btrfs_init_cachep(); 1520 err = btrfs_init_cachep();
1368 if (err) 1521 if (err)
@@ -1403,7 +1556,6 @@ free_cachep:
1403 btrfs_destroy_cachep(); 1556 btrfs_destroy_cachep();
1404free_compress: 1557free_compress:
1405 btrfs_exit_compress(); 1558 btrfs_exit_compress();
1406free_sysfs:
1407 btrfs_exit_sysfs(); 1559 btrfs_exit_sysfs();
1408 return err; 1560 return err;
1409} 1561}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 04b77e3ceb7a..63f835aa9788 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -31,7 +31,7 @@
31 31
32#define BTRFS_ROOT_TRANS_TAG 0 32#define BTRFS_ROOT_TRANS_TAG 0
33 33
34static noinline void put_transaction(struct btrfs_transaction *transaction) 34void put_transaction(struct btrfs_transaction *transaction)
35{ 35{
36 WARN_ON(atomic_read(&transaction->use_count) == 0); 36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) { 37 if (atomic_dec_and_test(&transaction->use_count)) {
@@ -58,6 +58,12 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
58 58
59 spin_lock(&root->fs_info->trans_lock); 59 spin_lock(&root->fs_info->trans_lock);
60loop: 60loop:
61 /* The file system has been taken offline. No new transactions. */
62 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
63 spin_unlock(&root->fs_info->trans_lock);
64 return -EROFS;
65 }
66
61 if (root->fs_info->trans_no_join) { 67 if (root->fs_info->trans_no_join) {
62 if (!nofail) { 68 if (!nofail) {
63 spin_unlock(&root->fs_info->trans_lock); 69 spin_unlock(&root->fs_info->trans_lock);
@@ -67,6 +73,8 @@ loop:
67 73
68 cur_trans = root->fs_info->running_transaction; 74 cur_trans = root->fs_info->running_transaction;
69 if (cur_trans) { 75 if (cur_trans) {
76 if (cur_trans->aborted)
77 return cur_trans->aborted;
70 atomic_inc(&cur_trans->use_count); 78 atomic_inc(&cur_trans->use_count);
71 atomic_inc(&cur_trans->num_writers); 79 atomic_inc(&cur_trans->num_writers);
72 cur_trans->num_joined++; 80 cur_trans->num_joined++;
@@ -123,6 +131,7 @@ loop:
123 root->fs_info->generation++; 131 root->fs_info->generation++;
124 cur_trans->transid = root->fs_info->generation; 132 cur_trans->transid = root->fs_info->generation;
125 root->fs_info->running_transaction = cur_trans; 133 root->fs_info->running_transaction = cur_trans;
134 cur_trans->aborted = 0;
126 spin_unlock(&root->fs_info->trans_lock); 135 spin_unlock(&root->fs_info->trans_lock);
127 136
128 return 0; 137 return 0;
@@ -318,6 +327,7 @@ again:
318 h->use_count = 1; 327 h->use_count = 1;
319 h->block_rsv = NULL; 328 h->block_rsv = NULL;
320 h->orig_rsv = NULL; 329 h->orig_rsv = NULL;
330 h->aborted = 0;
321 331
322 smp_mb(); 332 smp_mb();
323 if (cur_trans->blocked && may_wait_transaction(root, type)) { 333 if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -440,6 +450,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
440 struct btrfs_transaction *cur_trans = trans->transaction; 450 struct btrfs_transaction *cur_trans = trans->transaction;
441 struct btrfs_block_rsv *rsv = trans->block_rsv; 451 struct btrfs_block_rsv *rsv = trans->block_rsv;
442 int updates; 452 int updates;
453 int err;
443 454
444 smp_mb(); 455 smp_mb();
445 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 456 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
@@ -453,8 +464,11 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
453 464
454 updates = trans->delayed_ref_updates; 465 updates = trans->delayed_ref_updates;
455 trans->delayed_ref_updates = 0; 466 trans->delayed_ref_updates = 0;
456 if (updates) 467 if (updates) {
457 btrfs_run_delayed_refs(trans, root, updates); 468 err = btrfs_run_delayed_refs(trans, root, updates);
469 if (err) /* Error code will also eval true */
470 return err;
471 }
458 472
459 trans->block_rsv = rsv; 473 trans->block_rsv = rsv;
460 474
@@ -525,6 +539,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
525 if (throttle) 539 if (throttle)
526 btrfs_run_delayed_iputs(root); 540 btrfs_run_delayed_iputs(root);
527 541
542 if (trans->aborted ||
543 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
544 return -EIO;
545 }
546
528 return 0; 547 return 0;
529} 548}
530 549
@@ -690,11 +709,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
690 ret = btrfs_update_root(trans, tree_root, 709 ret = btrfs_update_root(trans, tree_root,
691 &root->root_key, 710 &root->root_key,
692 &root->root_item); 711 &root->root_item);
693 BUG_ON(ret); 712 if (ret)
713 return ret;
694 714
695 old_root_used = btrfs_root_used(&root->root_item); 715 old_root_used = btrfs_root_used(&root->root_item);
696 ret = btrfs_write_dirty_block_groups(trans, root); 716 ret = btrfs_write_dirty_block_groups(trans, root);
697 BUG_ON(ret); 717 if (ret)
718 return ret;
698 } 719 }
699 720
700 if (root != root->fs_info->extent_root) 721 if (root != root->fs_info->extent_root)
@@ -705,6 +726,10 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
705 726
706/* 727/*
707 * update all the cowonly tree roots on disk 728 * update all the cowonly tree roots on disk
729 *
730 * The error handling in this function may not be obvious. Any of the
731 * failures will cause the file system to go offline. We still need
732 * to clean up the delayed refs.
708 */ 733 */
709static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 734static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
710 struct btrfs_root *root) 735 struct btrfs_root *root)
@@ -715,22 +740,30 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
715 int ret; 740 int ret;
716 741
717 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 742 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
718 BUG_ON(ret); 743 if (ret)
744 return ret;
719 745
720 eb = btrfs_lock_root_node(fs_info->tree_root); 746 eb = btrfs_lock_root_node(fs_info->tree_root);
721 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); 747 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
748 0, &eb);
722 btrfs_tree_unlock(eb); 749 btrfs_tree_unlock(eb);
723 free_extent_buffer(eb); 750 free_extent_buffer(eb);
724 751
752 if (ret)
753 return ret;
754
725 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 755 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
726 BUG_ON(ret); 756 if (ret)
757 return ret;
727 758
728 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 759 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
729 next = fs_info->dirty_cowonly_roots.next; 760 next = fs_info->dirty_cowonly_roots.next;
730 list_del_init(next); 761 list_del_init(next);
731 root = list_entry(next, struct btrfs_root, dirty_list); 762 root = list_entry(next, struct btrfs_root, dirty_list);
732 763
733 update_cowonly_root(trans, root); 764 ret = update_cowonly_root(trans, root);
765 if (ret)
766 return ret;
734 } 767 }
735 768
736 down_write(&fs_info->extent_commit_sem); 769 down_write(&fs_info->extent_commit_sem);
@@ -874,7 +907,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
874 907
875 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 908 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
876 if (!new_root_item) { 909 if (!new_root_item) {
877 pending->error = -ENOMEM; 910 ret = pending->error = -ENOMEM;
878 goto fail; 911 goto fail;
879 } 912 }
880 913
@@ -911,21 +944,24 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
911 * insert the directory item 944 * insert the directory item
912 */ 945 */
913 ret = btrfs_set_inode_index(parent_inode, &index); 946 ret = btrfs_set_inode_index(parent_inode, &index);
914 BUG_ON(ret); 947 BUG_ON(ret); /* -ENOMEM */
915 ret = btrfs_insert_dir_item(trans, parent_root, 948 ret = btrfs_insert_dir_item(trans, parent_root,
916 dentry->d_name.name, dentry->d_name.len, 949 dentry->d_name.name, dentry->d_name.len,
917 parent_inode, &key, 950 parent_inode, &key,
918 BTRFS_FT_DIR, index); 951 BTRFS_FT_DIR, index);
919 if (ret) { 952 if (ret == -EEXIST) {
920 pending->error = -EEXIST; 953 pending->error = -EEXIST;
921 dput(parent); 954 dput(parent);
922 goto fail; 955 goto fail;
956 } else if (ret) {
957 goto abort_trans_dput;
923 } 958 }
924 959
925 btrfs_i_size_write(parent_inode, parent_inode->i_size + 960 btrfs_i_size_write(parent_inode, parent_inode->i_size +
926 dentry->d_name.len * 2); 961 dentry->d_name.len * 2);
927 ret = btrfs_update_inode(trans, parent_root, parent_inode); 962 ret = btrfs_update_inode(trans, parent_root, parent_inode);
928 BUG_ON(ret); 963 if (ret)
964 goto abort_trans_dput;
929 965
930 /* 966 /*
931 * pull in the delayed directory update 967 * pull in the delayed directory update
@@ -934,7 +970,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
934 * snapshot 970 * snapshot
935 */ 971 */
936 ret = btrfs_run_delayed_items(trans, root); 972 ret = btrfs_run_delayed_items(trans, root);
937 BUG_ON(ret); 973 if (ret) { /* Transaction aborted */
974 dput(parent);
975 goto fail;
976 }
938 977
939 record_root_in_trans(trans, root); 978 record_root_in_trans(trans, root);
940 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 979 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
@@ -949,12 +988,21 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
949 btrfs_set_root_flags(new_root_item, root_flags); 988 btrfs_set_root_flags(new_root_item, root_flags);
950 989
951 old = btrfs_lock_root_node(root); 990 old = btrfs_lock_root_node(root);
952 btrfs_cow_block(trans, root, old, NULL, 0, &old); 991 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
992 if (ret) {
993 btrfs_tree_unlock(old);
994 free_extent_buffer(old);
995 goto abort_trans_dput;
996 }
997
953 btrfs_set_lock_blocking(old); 998 btrfs_set_lock_blocking(old);
954 999
955 btrfs_copy_root(trans, root, old, &tmp, objectid); 1000 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1001 /* clean up in any case */
956 btrfs_tree_unlock(old); 1002 btrfs_tree_unlock(old);
957 free_extent_buffer(old); 1003 free_extent_buffer(old);
1004 if (ret)
1005 goto abort_trans_dput;
958 1006
959 /* see comments in should_cow_block() */ 1007 /* see comments in should_cow_block() */
960 root->force_cow = 1; 1008 root->force_cow = 1;
@@ -966,7 +1014,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
966 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1014 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
967 btrfs_tree_unlock(tmp); 1015 btrfs_tree_unlock(tmp);
968 free_extent_buffer(tmp); 1016 free_extent_buffer(tmp);
969 BUG_ON(ret); 1017 if (ret)
1018 goto abort_trans_dput;
970 1019
971 /* 1020 /*
972 * insert root back/forward references 1021 * insert root back/forward references
@@ -975,19 +1024,32 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
975 parent_root->root_key.objectid, 1024 parent_root->root_key.objectid,
976 btrfs_ino(parent_inode), index, 1025 btrfs_ino(parent_inode), index,
977 dentry->d_name.name, dentry->d_name.len); 1026 dentry->d_name.name, dentry->d_name.len);
978 BUG_ON(ret);
979 dput(parent); 1027 dput(parent);
1028 if (ret)
1029 goto fail;
980 1030
981 key.offset = (u64)-1; 1031 key.offset = (u64)-1;
982 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1032 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
983 BUG_ON(IS_ERR(pending->snap)); 1033 if (IS_ERR(pending->snap)) {
1034 ret = PTR_ERR(pending->snap);
1035 goto abort_trans;
1036 }
984 1037
985 btrfs_reloc_post_snapshot(trans, pending); 1038 ret = btrfs_reloc_post_snapshot(trans, pending);
1039 if (ret)
1040 goto abort_trans;
1041 ret = 0;
986fail: 1042fail:
987 kfree(new_root_item); 1043 kfree(new_root_item);
988 trans->block_rsv = rsv; 1044 trans->block_rsv = rsv;
989 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 1045 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
990 return 0; 1046 return ret;
1047
1048abort_trans_dput:
1049 dput(parent);
1050abort_trans:
1051 btrfs_abort_transaction(trans, root, ret);
1052 goto fail;
991} 1053}
992 1054
993/* 1055/*
@@ -1124,6 +1186,33 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1124 return 0; 1186 return 0;
1125} 1187}
1126 1188
1189
1190static void cleanup_transaction(struct btrfs_trans_handle *trans,
1191 struct btrfs_root *root)
1192{
1193 struct btrfs_transaction *cur_trans = trans->transaction;
1194
1195 WARN_ON(trans->use_count > 1);
1196
1197 spin_lock(&root->fs_info->trans_lock);
1198 list_del_init(&cur_trans->list);
1199 spin_unlock(&root->fs_info->trans_lock);
1200
1201 btrfs_cleanup_one_transaction(trans->transaction, root);
1202
1203 put_transaction(cur_trans);
1204 put_transaction(cur_trans);
1205
1206 trace_btrfs_transaction_commit(root);
1207
1208 btrfs_scrub_continue(root);
1209
1210 if (current->journal_info == trans)
1211 current->journal_info = NULL;
1212
1213 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1214}
1215
1127/* 1216/*
1128 * btrfs_transaction state sequence: 1217 * btrfs_transaction state sequence:
1129 * in_commit = 0, blocked = 0 (initial) 1218 * in_commit = 0, blocked = 0 (initial)
@@ -1135,10 +1224,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1135 struct btrfs_root *root) 1224 struct btrfs_root *root)
1136{ 1225{
1137 unsigned long joined = 0; 1226 unsigned long joined = 0;
1138 struct btrfs_transaction *cur_trans; 1227 struct btrfs_transaction *cur_trans = trans->transaction;
1139 struct btrfs_transaction *prev_trans = NULL; 1228 struct btrfs_transaction *prev_trans = NULL;
1140 DEFINE_WAIT(wait); 1229 DEFINE_WAIT(wait);
1141 int ret; 1230 int ret = -EIO;
1142 int should_grow = 0; 1231 int should_grow = 0;
1143 unsigned long now = get_seconds(); 1232 unsigned long now = get_seconds();
1144 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1233 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
@@ -1148,13 +1237,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1148 btrfs_trans_release_metadata(trans, root); 1237 btrfs_trans_release_metadata(trans, root);
1149 trans->block_rsv = NULL; 1238 trans->block_rsv = NULL;
1150 1239
1240 if (cur_trans->aborted)
1241 goto cleanup_transaction;
1242
1151 /* make a pass through all the delayed refs we have so far 1243 /* make a pass through all the delayed refs we have so far
1152 * any runnings procs may add more while we are here 1244 * any runnings procs may add more while we are here
1153 */ 1245 */
1154 ret = btrfs_run_delayed_refs(trans, root, 0); 1246 ret = btrfs_run_delayed_refs(trans, root, 0);
1155 BUG_ON(ret); 1247 if (ret)
1248 goto cleanup_transaction;
1156 1249
1157 cur_trans = trans->transaction; 1250 cur_trans = trans->transaction;
1251
1158 /* 1252 /*
1159 * set the flushing flag so procs in this transaction have to 1253 * set the flushing flag so procs in this transaction have to
1160 * start sending their work down. 1254 * start sending their work down.
@@ -1162,19 +1256,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1162 cur_trans->delayed_refs.flushing = 1; 1256 cur_trans->delayed_refs.flushing = 1;
1163 1257
1164 ret = btrfs_run_delayed_refs(trans, root, 0); 1258 ret = btrfs_run_delayed_refs(trans, root, 0);
1165 BUG_ON(ret); 1259 if (ret)
1260 goto cleanup_transaction;
1166 1261
1167 spin_lock(&cur_trans->commit_lock); 1262 spin_lock(&cur_trans->commit_lock);
1168 if (cur_trans->in_commit) { 1263 if (cur_trans->in_commit) {
1169 spin_unlock(&cur_trans->commit_lock); 1264 spin_unlock(&cur_trans->commit_lock);
1170 atomic_inc(&cur_trans->use_count); 1265 atomic_inc(&cur_trans->use_count);
1171 btrfs_end_transaction(trans, root); 1266 ret = btrfs_end_transaction(trans, root);
1172 1267
1173 wait_for_commit(root, cur_trans); 1268 wait_for_commit(root, cur_trans);
1174 1269
1175 put_transaction(cur_trans); 1270 put_transaction(cur_trans);
1176 1271
1177 return 0; 1272 return ret;
1178 } 1273 }
1179 1274
1180 trans->transaction->in_commit = 1; 1275 trans->transaction->in_commit = 1;
@@ -1214,12 +1309,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1214 1309
1215 if (flush_on_commit || snap_pending) { 1310 if (flush_on_commit || snap_pending) {
1216 btrfs_start_delalloc_inodes(root, 1); 1311 btrfs_start_delalloc_inodes(root, 1);
1217 ret = btrfs_wait_ordered_extents(root, 0, 1); 1312 btrfs_wait_ordered_extents(root, 0, 1);
1218 BUG_ON(ret);
1219 } 1313 }
1220 1314
1221 ret = btrfs_run_delayed_items(trans, root); 1315 ret = btrfs_run_delayed_items(trans, root);
1222 BUG_ON(ret); 1316 if (ret)
1317 goto cleanup_transaction;
1223 1318
1224 /* 1319 /*
1225 * rename don't use btrfs_join_transaction, so, once we 1320 * rename don't use btrfs_join_transaction, so, once we
@@ -1261,13 +1356,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1261 mutex_lock(&root->fs_info->reloc_mutex); 1356 mutex_lock(&root->fs_info->reloc_mutex);
1262 1357
1263 ret = btrfs_run_delayed_items(trans, root); 1358 ret = btrfs_run_delayed_items(trans, root);
1264 BUG_ON(ret); 1359 if (ret) {
1360 mutex_unlock(&root->fs_info->reloc_mutex);
1361 goto cleanup_transaction;
1362 }
1265 1363
1266 ret = create_pending_snapshots(trans, root->fs_info); 1364 ret = create_pending_snapshots(trans, root->fs_info);
1267 BUG_ON(ret); 1365 if (ret) {
1366 mutex_unlock(&root->fs_info->reloc_mutex);
1367 goto cleanup_transaction;
1368 }
1268 1369
1269 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1370 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1270 BUG_ON(ret); 1371 if (ret) {
1372 mutex_unlock(&root->fs_info->reloc_mutex);
1373 goto cleanup_transaction;
1374 }
1271 1375
1272 /* 1376 /*
1273 * make sure none of the code above managed to slip in a 1377 * make sure none of the code above managed to slip in a
@@ -1294,7 +1398,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1294 mutex_lock(&root->fs_info->tree_log_mutex); 1398 mutex_lock(&root->fs_info->tree_log_mutex);
1295 1399
1296 ret = commit_fs_roots(trans, root); 1400 ret = commit_fs_roots(trans, root);
1297 BUG_ON(ret); 1401 if (ret) {
1402 mutex_unlock(&root->fs_info->tree_log_mutex);
1403 goto cleanup_transaction;
1404 }
1298 1405
1299 /* commit_fs_roots gets rid of all the tree log roots, it is now 1406 /* commit_fs_roots gets rid of all the tree log roots, it is now
1300 * safe to free the root of tree log roots 1407 * safe to free the root of tree log roots
@@ -1302,7 +1409,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1302 btrfs_free_log_root_tree(trans, root->fs_info); 1409 btrfs_free_log_root_tree(trans, root->fs_info);
1303 1410
1304 ret = commit_cowonly_roots(trans, root); 1411 ret = commit_cowonly_roots(trans, root);
1305 BUG_ON(ret); 1412 if (ret) {
1413 mutex_unlock(&root->fs_info->tree_log_mutex);
1414 goto cleanup_transaction;
1415 }
1306 1416
1307 btrfs_prepare_extent_commit(trans, root); 1417 btrfs_prepare_extent_commit(trans, root);
1308 1418
@@ -1336,8 +1446,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1336 wake_up(&root->fs_info->transaction_wait); 1446 wake_up(&root->fs_info->transaction_wait);
1337 1447
1338 ret = btrfs_write_and_wait_transaction(trans, root); 1448 ret = btrfs_write_and_wait_transaction(trans, root);
1339 BUG_ON(ret); 1449 if (ret) {
1340 write_ctree_super(trans, root, 0); 1450 btrfs_error(root->fs_info, ret,
1451 "Error while writing out transaction.");
1452 mutex_unlock(&root->fs_info->tree_log_mutex);
1453 goto cleanup_transaction;
1454 }
1455
1456 ret = write_ctree_super(trans, root, 0);
1457 if (ret) {
1458 mutex_unlock(&root->fs_info->tree_log_mutex);
1459 goto cleanup_transaction;
1460 }
1341 1461
1342 /* 1462 /*
1343 * the super is written, we can safely allow the tree-loggers 1463 * the super is written, we can safely allow the tree-loggers
@@ -1373,6 +1493,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1373 btrfs_run_delayed_iputs(root); 1493 btrfs_run_delayed_iputs(root);
1374 1494
1375 return ret; 1495 return ret;
1496
1497cleanup_transaction:
1498 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1499// WARN_ON(1);
1500 if (current->journal_info == trans)
1501 current->journal_info = NULL;
1502 cleanup_transaction(trans, root);
1503
1504 return ret;
1376} 1505}
1377 1506
1378/* 1507/*
@@ -1388,6 +1517,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1388 spin_unlock(&fs_info->trans_lock); 1517 spin_unlock(&fs_info->trans_lock);
1389 1518
1390 while (!list_empty(&list)) { 1519 while (!list_empty(&list)) {
1520 int ret;
1521
1391 root = list_entry(list.next, struct btrfs_root, root_list); 1522 root = list_entry(list.next, struct btrfs_root, root_list);
1392 list_del(&root->root_list); 1523 list_del(&root->root_list);
1393 1524
@@ -1395,9 +1526,10 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1395 1526
1396 if (btrfs_header_backref_rev(root->node) < 1527 if (btrfs_header_backref_rev(root->node) <
1397 BTRFS_MIXED_BACKREF_REV) 1528 BTRFS_MIXED_BACKREF_REV)
1398 btrfs_drop_snapshot(root, NULL, 0, 0); 1529 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1399 else 1530 else
1400 btrfs_drop_snapshot(root, NULL, 1, 0); 1531 ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1532 BUG_ON(ret < 0);
1401 } 1533 }
1402 return 0; 1534 return 0;
1403} 1535}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 02564e6230ac..fe27379e368b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -43,6 +43,7 @@ struct btrfs_transaction {
43 wait_queue_head_t commit_wait; 43 wait_queue_head_t commit_wait;
44 struct list_head pending_snapshots; 44 struct list_head pending_snapshots;
45 struct btrfs_delayed_ref_root delayed_refs; 45 struct btrfs_delayed_ref_root delayed_refs;
46 int aborted;
46}; 47};
47 48
48struct btrfs_trans_handle { 49struct btrfs_trans_handle {
@@ -55,6 +56,7 @@ struct btrfs_trans_handle {
55 struct btrfs_transaction *transaction; 56 struct btrfs_transaction *transaction;
56 struct btrfs_block_rsv *block_rsv; 57 struct btrfs_block_rsv *block_rsv;
57 struct btrfs_block_rsv *orig_rsv; 58 struct btrfs_block_rsv *orig_rsv;
59 int aborted;
58}; 60};
59 61
60struct btrfs_pending_snapshot { 62struct btrfs_pending_snapshot {
@@ -114,4 +116,5 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
114 struct extent_io_tree *dirty_pages, int mark); 116 struct extent_io_tree *dirty_pages, int mark);
115int btrfs_transaction_blocked(struct btrfs_fs_info *info); 117int btrfs_transaction_blocked(struct btrfs_fs_info *info);
116int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 118int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
119void put_transaction(struct btrfs_transaction *transaction);
117#endif 120#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 966cc74f5d6c..d017283ae6f5 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -212,14 +212,13 @@ int btrfs_pin_log_trans(struct btrfs_root *root)
212 * indicate we're done making changes to the log tree 212 * indicate we're done making changes to the log tree
213 * and wake up anyone waiting to do a sync 213 * and wake up anyone waiting to do a sync
214 */ 214 */
215int btrfs_end_log_trans(struct btrfs_root *root) 215void btrfs_end_log_trans(struct btrfs_root *root)
216{ 216{
217 if (atomic_dec_and_test(&root->log_writers)) { 217 if (atomic_dec_and_test(&root->log_writers)) {
218 smp_mb(); 218 smp_mb();
219 if (waitqueue_active(&root->log_writer_wait)) 219 if (waitqueue_active(&root->log_writer_wait))
220 wake_up(&root->log_writer_wait); 220 wake_up(&root->log_writer_wait);
221 } 221 }
222 return 0;
223} 222}
224 223
225 224
@@ -378,12 +377,11 @@ insert:
378 u32 found_size; 377 u32 found_size;
379 found_size = btrfs_item_size_nr(path->nodes[0], 378 found_size = btrfs_item_size_nr(path->nodes[0],
380 path->slots[0]); 379 path->slots[0]);
381 if (found_size > item_size) { 380 if (found_size > item_size)
382 btrfs_truncate_item(trans, root, path, item_size, 1); 381 btrfs_truncate_item(trans, root, path, item_size, 1);
383 } else if (found_size < item_size) { 382 else if (found_size < item_size)
384 ret = btrfs_extend_item(trans, root, path, 383 btrfs_extend_item(trans, root, path,
385 item_size - found_size); 384 item_size - found_size);
386 }
387 } else if (ret) { 385 } else if (ret) {
388 return ret; 386 return ret;
389 } 387 }
@@ -1763,7 +1761,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1763 BTRFS_TREE_LOG_OBJECTID); 1761 BTRFS_TREE_LOG_OBJECTID);
1764 ret = btrfs_free_and_pin_reserved_extent(root, 1762 ret = btrfs_free_and_pin_reserved_extent(root,
1765 bytenr, blocksize); 1763 bytenr, blocksize);
1766 BUG_ON(ret); 1764 BUG_ON(ret); /* -ENOMEM or logic errors */
1767 } 1765 }
1768 free_extent_buffer(next); 1766 free_extent_buffer(next);
1769 continue; 1767 continue;
@@ -1871,20 +1869,26 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1871 wret = walk_down_log_tree(trans, log, path, &level, wc); 1869 wret = walk_down_log_tree(trans, log, path, &level, wc);
1872 if (wret > 0) 1870 if (wret > 0)
1873 break; 1871 break;
1874 if (wret < 0) 1872 if (wret < 0) {
1875 ret = wret; 1873 ret = wret;
1874 goto out;
1875 }
1876 1876
1877 wret = walk_up_log_tree(trans, log, path, &level, wc); 1877 wret = walk_up_log_tree(trans, log, path, &level, wc);
1878 if (wret > 0) 1878 if (wret > 0)
1879 break; 1879 break;
1880 if (wret < 0) 1880 if (wret < 0) {
1881 ret = wret; 1881 ret = wret;
1882 goto out;
1883 }
1882 } 1884 }
1883 1885
1884 /* was the root node processed? if not, catch it here */ 1886 /* was the root node processed? if not, catch it here */
1885 if (path->nodes[orig_level]) { 1887 if (path->nodes[orig_level]) {
1886 wc->process_func(log, path->nodes[orig_level], wc, 1888 ret = wc->process_func(log, path->nodes[orig_level], wc,
1887 btrfs_header_generation(path->nodes[orig_level])); 1889 btrfs_header_generation(path->nodes[orig_level]));
1890 if (ret)
1891 goto out;
1888 if (wc->free) { 1892 if (wc->free) {
1889 struct extent_buffer *next; 1893 struct extent_buffer *next;
1890 1894
@@ -1900,10 +1904,11 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1900 BTRFS_TREE_LOG_OBJECTID); 1904 BTRFS_TREE_LOG_OBJECTID);
1901 ret = btrfs_free_and_pin_reserved_extent(log, next->start, 1905 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
1902 next->len); 1906 next->len);
1903 BUG_ON(ret); 1907 BUG_ON(ret); /* -ENOMEM or logic errors */
1904 } 1908 }
1905 } 1909 }
1906 1910
1911out:
1907 for (i = 0; i <= orig_level; i++) { 1912 for (i = 0; i <= orig_level; i++) {
1908 if (path->nodes[i]) { 1913 if (path->nodes[i]) {
1909 free_extent_buffer(path->nodes[i]); 1914 free_extent_buffer(path->nodes[i]);
@@ -1963,8 +1968,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans,
1963 return 0; 1968 return 0;
1964} 1969}
1965 1970
1966static int wait_for_writer(struct btrfs_trans_handle *trans, 1971static void wait_for_writer(struct btrfs_trans_handle *trans,
1967 struct btrfs_root *root) 1972 struct btrfs_root *root)
1968{ 1973{
1969 DEFINE_WAIT(wait); 1974 DEFINE_WAIT(wait);
1970 while (root->fs_info->last_trans_log_full_commit != 1975 while (root->fs_info->last_trans_log_full_commit !=
@@ -1978,7 +1983,6 @@ static int wait_for_writer(struct btrfs_trans_handle *trans,
1978 mutex_lock(&root->log_mutex); 1983 mutex_lock(&root->log_mutex);
1979 finish_wait(&root->log_writer_wait, &wait); 1984 finish_wait(&root->log_writer_wait, &wait);
1980 } 1985 }
1981 return 0;
1982} 1986}
1983 1987
1984/* 1988/*
@@ -2046,7 +2050,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2046 * wait for them until later. 2050 * wait for them until later.
2047 */ 2051 */
2048 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 2052 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2049 BUG_ON(ret); 2053 if (ret) {
2054 btrfs_abort_transaction(trans, root, ret);
2055 mutex_unlock(&root->log_mutex);
2056 goto out;
2057 }
2050 2058
2051 btrfs_set_root_node(&log->root_item, log->node); 2059 btrfs_set_root_node(&log->root_item, log->node);
2052 2060
@@ -2077,7 +2085,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2077 } 2085 }
2078 2086
2079 if (ret) { 2087 if (ret) {
2080 BUG_ON(ret != -ENOSPC); 2088 if (ret != -ENOSPC) {
2089 btrfs_abort_transaction(trans, root, ret);
2090 mutex_unlock(&log_root_tree->log_mutex);
2091 goto out;
2092 }
2081 root->fs_info->last_trans_log_full_commit = trans->transid; 2093 root->fs_info->last_trans_log_full_commit = trans->transid;
2082 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2094 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2083 mutex_unlock(&log_root_tree->log_mutex); 2095 mutex_unlock(&log_root_tree->log_mutex);
@@ -2117,7 +2129,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2117 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2129 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2118 &log_root_tree->dirty_log_pages, 2130 &log_root_tree->dirty_log_pages,
2119 EXTENT_DIRTY | EXTENT_NEW); 2131 EXTENT_DIRTY | EXTENT_NEW);
2120 BUG_ON(ret); 2132 if (ret) {
2133 btrfs_abort_transaction(trans, root, ret);
2134 mutex_unlock(&log_root_tree->log_mutex);
2135 goto out_wake_log_root;
2136 }
2121 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2137 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2122 2138
2123 btrfs_set_super_log_root(root->fs_info->super_for_commit, 2139 btrfs_set_super_log_root(root->fs_info->super_for_commit,
@@ -2326,7 +2342,9 @@ out_unlock:
2326 if (ret == -ENOSPC) { 2342 if (ret == -ENOSPC) {
2327 root->fs_info->last_trans_log_full_commit = trans->transid; 2343 root->fs_info->last_trans_log_full_commit = trans->transid;
2328 ret = 0; 2344 ret = 0;
2329 } 2345 } else if (ret < 0)
2346 btrfs_abort_transaction(trans, root, ret);
2347
2330 btrfs_end_log_trans(root); 2348 btrfs_end_log_trans(root);
2331 2349
2332 return err; 2350 return err;
@@ -2357,7 +2375,8 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2357 if (ret == -ENOSPC) { 2375 if (ret == -ENOSPC) {
2358 root->fs_info->last_trans_log_full_commit = trans->transid; 2376 root->fs_info->last_trans_log_full_commit = trans->transid;
2359 ret = 0; 2377 ret = 0;
2360 } 2378 } else if (ret < 0 && ret != -ENOENT)
2379 btrfs_abort_transaction(trans, root, ret);
2361 btrfs_end_log_trans(root); 2380 btrfs_end_log_trans(root);
2362 2381
2363 return ret; 2382 return ret;
@@ -3169,13 +3188,20 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3169 fs_info->log_root_recovering = 1; 3188 fs_info->log_root_recovering = 1;
3170 3189
3171 trans = btrfs_start_transaction(fs_info->tree_root, 0); 3190 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3172 BUG_ON(IS_ERR(trans)); 3191 if (IS_ERR(trans)) {
3192 ret = PTR_ERR(trans);
3193 goto error;
3194 }
3173 3195
3174 wc.trans = trans; 3196 wc.trans = trans;
3175 wc.pin = 1; 3197 wc.pin = 1;
3176 3198
3177 ret = walk_log_tree(trans, log_root_tree, &wc); 3199 ret = walk_log_tree(trans, log_root_tree, &wc);
3178 BUG_ON(ret); 3200 if (ret) {
3201 btrfs_error(fs_info, ret, "Failed to pin buffers while "
3202 "recovering log root tree.");
3203 goto error;
3204 }
3179 3205
3180again: 3206again:
3181 key.objectid = BTRFS_TREE_LOG_OBJECTID; 3207 key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3184,8 +3210,12 @@ again:
3184 3210
3185 while (1) { 3211 while (1) {
3186 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 3212 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
3187 if (ret < 0) 3213
3188 break; 3214 if (ret < 0) {
3215 btrfs_error(fs_info, ret,
3216 "Couldn't find tree log root.");
3217 goto error;
3218 }
3189 if (ret > 0) { 3219 if (ret > 0) {
3190 if (path->slots[0] == 0) 3220 if (path->slots[0] == 0)
3191 break; 3221 break;
@@ -3199,14 +3229,24 @@ again:
3199 3229
3200 log = btrfs_read_fs_root_no_radix(log_root_tree, 3230 log = btrfs_read_fs_root_no_radix(log_root_tree,
3201 &found_key); 3231 &found_key);
3202 BUG_ON(IS_ERR(log)); 3232 if (IS_ERR(log)) {
3233 ret = PTR_ERR(log);
3234 btrfs_error(fs_info, ret,
3235 "Couldn't read tree log root.");
3236 goto error;
3237 }
3203 3238
3204 tmp_key.objectid = found_key.offset; 3239 tmp_key.objectid = found_key.offset;
3205 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 3240 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
3206 tmp_key.offset = (u64)-1; 3241 tmp_key.offset = (u64)-1;
3207 3242
3208 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3243 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3209 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); 3244 if (IS_ERR(wc.replay_dest)) {
3245 ret = PTR_ERR(wc.replay_dest);
3246 btrfs_error(fs_info, ret, "Couldn't read target root "
3247 "for tree log recovery.");
3248 goto error;
3249 }
3210 3250
3211 wc.replay_dest->log_root = log; 3251 wc.replay_dest->log_root = log;
3212 btrfs_record_root_in_trans(trans, wc.replay_dest); 3252 btrfs_record_root_in_trans(trans, wc.replay_dest);
@@ -3254,6 +3294,10 @@ again:
3254 3294
3255 kfree(log_root_tree); 3295 kfree(log_root_tree);
3256 return 0; 3296 return 0;
3297
3298error:
3299 btrfs_free_path(path);
3300 return ret;
3257} 3301}
3258 3302
3259/* 3303/*
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 2270ac58d746..862ac813f6b8 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -38,7 +38,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root, 38 struct btrfs_root *root,
39 const char *name, int name_len, 39 const char *name, int name_len,
40 struct inode *inode, u64 dirid); 40 struct inode *inode, u64 dirid);
41int btrfs_end_log_trans(struct btrfs_root *root); 41void btrfs_end_log_trans(struct btrfs_root *root);
42int btrfs_pin_log_trans(struct btrfs_root *root); 42int btrfs_pin_log_trans(struct btrfs_root *root);
43int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 43int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
44 struct btrfs_root *root, struct inode *inode, 44 struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 58aad63e1ad3..d64cd6cbdbb6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -67,7 +67,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
67 kfree(fs_devices); 67 kfree(fs_devices);
68} 68}
69 69
70int btrfs_cleanup_fs_uuids(void) 70void btrfs_cleanup_fs_uuids(void)
71{ 71{
72 struct btrfs_fs_devices *fs_devices; 72 struct btrfs_fs_devices *fs_devices;
73 73
@@ -77,7 +77,6 @@ int btrfs_cleanup_fs_uuids(void)
77 list_del(&fs_devices->list); 77 list_del(&fs_devices->list);
78 free_fs_devices(fs_devices); 78 free_fs_devices(fs_devices);
79 } 79 }
80 return 0;
81} 80}
82 81
83static noinline struct btrfs_device *__find_device(struct list_head *head, 82static noinline struct btrfs_device *__find_device(struct list_head *head,
@@ -130,7 +129,7 @@ static void requeue_list(struct btrfs_pending_bios *pending_bios,
130 * the list if the block device is congested. This way, multiple devices 129 * the list if the block device is congested. This way, multiple devices
131 * can make progress from a single worker thread. 130 * can make progress from a single worker thread.
132 */ 131 */
133static noinline int run_scheduled_bios(struct btrfs_device *device) 132static noinline void run_scheduled_bios(struct btrfs_device *device)
134{ 133{
135 struct bio *pending; 134 struct bio *pending;
136 struct backing_dev_info *bdi; 135 struct backing_dev_info *bdi;
@@ -316,7 +315,6 @@ loop_lock:
316 315
317done: 316done:
318 blk_finish_plug(&plug); 317 blk_finish_plug(&plug);
319 return 0;
320} 318}
321 319
322static void pending_bios_fn(struct btrfs_work *work) 320static void pending_bios_fn(struct btrfs_work *work)
@@ -455,7 +453,7 @@ error:
455 return ERR_PTR(-ENOMEM); 453 return ERR_PTR(-ENOMEM);
456} 454}
457 455
458int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 456void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
459{ 457{
460 struct btrfs_device *device, *next; 458 struct btrfs_device *device, *next;
461 459
@@ -503,7 +501,6 @@ again:
503 fs_devices->latest_trans = latest_transid; 501 fs_devices->latest_trans = latest_transid;
504 502
505 mutex_unlock(&uuid_mutex); 503 mutex_unlock(&uuid_mutex);
506 return 0;
507} 504}
508 505
509static void __free_device(struct work_struct *work) 506static void __free_device(struct work_struct *work)
@@ -552,10 +549,10 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
552 fs_devices->num_can_discard--; 549 fs_devices->num_can_discard--;
553 550
554 new_device = kmalloc(sizeof(*new_device), GFP_NOFS); 551 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
555 BUG_ON(!new_device); 552 BUG_ON(!new_device); /* -ENOMEM */
556 memcpy(new_device, device, sizeof(*new_device)); 553 memcpy(new_device, device, sizeof(*new_device));
557 new_device->name = kstrdup(device->name, GFP_NOFS); 554 new_device->name = kstrdup(device->name, GFP_NOFS);
558 BUG_ON(device->name && !new_device->name); 555 BUG_ON(device->name && !new_device->name); /* -ENOMEM */
559 new_device->bdev = NULL; 556 new_device->bdev = NULL;
560 new_device->writeable = 0; 557 new_device->writeable = 0;
561 new_device->in_fs_metadata = 0; 558 new_device->in_fs_metadata = 0;
@@ -1039,8 +1036,10 @@ again:
1039 leaf = path->nodes[0]; 1036 leaf = path->nodes[0];
1040 extent = btrfs_item_ptr(leaf, path->slots[0], 1037 extent = btrfs_item_ptr(leaf, path->slots[0],
1041 struct btrfs_dev_extent); 1038 struct btrfs_dev_extent);
1039 } else {
1040 btrfs_error(root->fs_info, ret, "Slot search failed");
1041 goto out;
1042 } 1042 }
1043 BUG_ON(ret);
1044 1043
1045 if (device->bytes_used > 0) { 1044 if (device->bytes_used > 0) {
1046 u64 len = btrfs_dev_extent_length(leaf, extent); 1045 u64 len = btrfs_dev_extent_length(leaf, extent);
@@ -1050,7 +1049,10 @@ again:
1050 spin_unlock(&root->fs_info->free_chunk_lock); 1049 spin_unlock(&root->fs_info->free_chunk_lock);
1051 } 1050 }
1052 ret = btrfs_del_item(trans, root, path); 1051 ret = btrfs_del_item(trans, root, path);
1053 1052 if (ret) {
1053 btrfs_error(root->fs_info, ret,
1054 "Failed to remove dev extent item");
1055 }
1054out: 1056out:
1055 btrfs_free_path(path); 1057 btrfs_free_path(path);
1056 return ret; 1058 return ret;
@@ -1078,7 +1080,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1078 key.type = BTRFS_DEV_EXTENT_KEY; 1080 key.type = BTRFS_DEV_EXTENT_KEY;
1079 ret = btrfs_insert_empty_item(trans, root, path, &key, 1081 ret = btrfs_insert_empty_item(trans, root, path, &key,
1080 sizeof(*extent)); 1082 sizeof(*extent));
1081 BUG_ON(ret); 1083 if (ret)
1084 goto out;
1082 1085
1083 leaf = path->nodes[0]; 1086 leaf = path->nodes[0];
1084 extent = btrfs_item_ptr(leaf, path->slots[0], 1087 extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1093,6 +1096,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1093 1096
1094 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1097 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1095 btrfs_mark_buffer_dirty(leaf); 1098 btrfs_mark_buffer_dirty(leaf);
1099out:
1096 btrfs_free_path(path); 1100 btrfs_free_path(path);
1097 return ret; 1101 return ret;
1098} 1102}
@@ -1118,7 +1122,7 @@ static noinline int find_next_chunk(struct btrfs_root *root,
1118 if (ret < 0) 1122 if (ret < 0)
1119 goto error; 1123 goto error;
1120 1124
1121 BUG_ON(ret == 0); 1125 BUG_ON(ret == 0); /* Corruption */
1122 1126
1123 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); 1127 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1124 if (ret) { 1128 if (ret) {
@@ -1162,7 +1166,7 @@ static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1162 if (ret < 0) 1166 if (ret < 0)
1163 goto error; 1167 goto error;
1164 1168
1165 BUG_ON(ret == 0); 1169 BUG_ON(ret == 0); /* Corruption */
1166 1170
1167 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, 1171 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1168 BTRFS_DEV_ITEM_KEY); 1172 BTRFS_DEV_ITEM_KEY);
@@ -1596,7 +1600,7 @@ next_slot:
1596 (unsigned long)btrfs_device_fsid(dev_item), 1600 (unsigned long)btrfs_device_fsid(dev_item),
1597 BTRFS_UUID_SIZE); 1601 BTRFS_UUID_SIZE);
1598 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 1602 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1599 BUG_ON(!device); 1603 BUG_ON(!device); /* Logic error */
1600 1604
1601 if (device->fs_devices->seeding) { 1605 if (device->fs_devices->seeding) {
1602 btrfs_set_device_generation(leaf, dev_item, 1606 btrfs_set_device_generation(leaf, dev_item,
@@ -1706,7 +1710,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1706 if (seeding_dev) { 1710 if (seeding_dev) {
1707 sb->s_flags &= ~MS_RDONLY; 1711 sb->s_flags &= ~MS_RDONLY;
1708 ret = btrfs_prepare_sprout(root); 1712 ret = btrfs_prepare_sprout(root);
1709 BUG_ON(ret); 1713 BUG_ON(ret); /* -ENOMEM */
1710 } 1714 }
1711 1715
1712 device->fs_devices = root->fs_info->fs_devices; 1716 device->fs_devices = root->fs_info->fs_devices;
@@ -1744,11 +1748,15 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1744 1748
1745 if (seeding_dev) { 1749 if (seeding_dev) {
1746 ret = init_first_rw_device(trans, root, device); 1750 ret = init_first_rw_device(trans, root, device);
1747 BUG_ON(ret); 1751 if (ret)
1752 goto error_trans;
1748 ret = btrfs_finish_sprout(trans, root); 1753 ret = btrfs_finish_sprout(trans, root);
1749 BUG_ON(ret); 1754 if (ret)
1755 goto error_trans;
1750 } else { 1756 } else {
1751 ret = btrfs_add_device(trans, root, device); 1757 ret = btrfs_add_device(trans, root, device);
1758 if (ret)
1759 goto error_trans;
1752 } 1760 }
1753 1761
1754 /* 1762 /*
@@ -1758,17 +1766,31 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1758 btrfs_clear_space_info_full(root->fs_info); 1766 btrfs_clear_space_info_full(root->fs_info);
1759 1767
1760 unlock_chunks(root); 1768 unlock_chunks(root);
1761 btrfs_commit_transaction(trans, root); 1769 ret = btrfs_commit_transaction(trans, root);
1762 1770
1763 if (seeding_dev) { 1771 if (seeding_dev) {
1764 mutex_unlock(&uuid_mutex); 1772 mutex_unlock(&uuid_mutex);
1765 up_write(&sb->s_umount); 1773 up_write(&sb->s_umount);
1766 1774
1775 if (ret) /* transaction commit */
1776 return ret;
1777
1767 ret = btrfs_relocate_sys_chunks(root); 1778 ret = btrfs_relocate_sys_chunks(root);
1768 BUG_ON(ret); 1779 if (ret < 0)
1780 btrfs_error(root->fs_info, ret,
1781 "Failed to relocate sys chunks after "
1782 "device initialization. This can be fixed "
1783 "using the \"btrfs balance\" command.");
1769 } 1784 }
1770 1785
1771 return ret; 1786 return ret;
1787
1788error_trans:
1789 unlock_chunks(root);
1790 btrfs_abort_transaction(trans, root, ret);
1791 btrfs_end_transaction(trans, root);
1792 kfree(device->name);
1793 kfree(device);
1772error: 1794error:
1773 blkdev_put(bdev, FMODE_EXCL); 1795 blkdev_put(bdev, FMODE_EXCL);
1774 if (seeding_dev) { 1796 if (seeding_dev) {
@@ -1876,10 +1898,20 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1876 key.type = BTRFS_CHUNK_ITEM_KEY; 1898 key.type = BTRFS_CHUNK_ITEM_KEY;
1877 1899
1878 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1900 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1879 BUG_ON(ret); 1901 if (ret < 0)
1902 goto out;
1903 else if (ret > 0) { /* Logic error or corruption */
1904 btrfs_error(root->fs_info, -ENOENT,
1905 "Failed lookup while freeing chunk.");
1906 ret = -ENOENT;
1907 goto out;
1908 }
1880 1909
1881 ret = btrfs_del_item(trans, root, path); 1910 ret = btrfs_del_item(trans, root, path);
1882 1911 if (ret < 0)
1912 btrfs_error(root->fs_info, ret,
1913 "Failed to delete chunk item.");
1914out:
1883 btrfs_free_path(path); 1915 btrfs_free_path(path);
1884 return ret; 1916 return ret;
1885} 1917}
@@ -2041,7 +2073,7 @@ again:
2041 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2073 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2042 if (ret < 0) 2074 if (ret < 0)
2043 goto error; 2075 goto error;
2044 BUG_ON(ret == 0); 2076 BUG_ON(ret == 0); /* Corruption */
2045 2077
2046 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2078 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2047 key.type); 2079 key.type);
@@ -3328,13 +3360,15 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3328 write_lock(&em_tree->lock); 3360 write_lock(&em_tree->lock);
3329 ret = add_extent_mapping(em_tree, em); 3361 ret = add_extent_mapping(em_tree, em);
3330 write_unlock(&em_tree->lock); 3362 write_unlock(&em_tree->lock);
3331 BUG_ON(ret);
3332 free_extent_map(em); 3363 free_extent_map(em);
3364 if (ret)
3365 goto error;
3333 3366
3334 ret = btrfs_make_block_group(trans, extent_root, 0, type, 3367 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3335 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 3368 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3336 start, num_bytes); 3369 start, num_bytes);
3337 BUG_ON(ret); 3370 if (ret)
3371 goto error;
3338 3372
3339 for (i = 0; i < map->num_stripes; ++i) { 3373 for (i = 0; i < map->num_stripes; ++i) {
3340 struct btrfs_device *device; 3374 struct btrfs_device *device;
@@ -3347,7 +3381,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3347 info->chunk_root->root_key.objectid, 3381 info->chunk_root->root_key.objectid,
3348 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 3382 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3349 start, dev_offset, stripe_size); 3383 start, dev_offset, stripe_size);
3350 BUG_ON(ret); 3384 if (ret) {
3385 btrfs_abort_transaction(trans, extent_root, ret);
3386 goto error;
3387 }
3351 } 3388 }
3352 3389
3353 kfree(devices_info); 3390 kfree(devices_info);
@@ -3383,7 +3420,8 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3383 device = map->stripes[index].dev; 3420 device = map->stripes[index].dev;
3384 device->bytes_used += stripe_size; 3421 device->bytes_used += stripe_size;
3385 ret = btrfs_update_device(trans, device); 3422 ret = btrfs_update_device(trans, device);
3386 BUG_ON(ret); 3423 if (ret)
3424 goto out_free;
3387 index++; 3425 index++;
3388 } 3426 }
3389 3427
@@ -3420,16 +3458,19 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3420 key.offset = chunk_offset; 3458 key.offset = chunk_offset;
3421 3459
3422 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 3460 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3423 BUG_ON(ret);
3424 3461
3425 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3462 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3463 /*
3464 * TODO: Cleanup of inserted chunk root in case of
3465 * failure.
3466 */
3426 ret = btrfs_add_system_chunk(chunk_root, &key, chunk, 3467 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3427 item_size); 3468 item_size);
3428 BUG_ON(ret);
3429 } 3469 }
3430 3470
3471out_free:
3431 kfree(chunk); 3472 kfree(chunk);
3432 return 0; 3473 return ret;
3433} 3474}
3434 3475
3435/* 3476/*
@@ -3461,7 +3502,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3461 3502
3462 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 3503 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3463 chunk_size, stripe_size); 3504 chunk_size, stripe_size);
3464 BUG_ON(ret); 3505 if (ret)
3506 return ret;
3465 return 0; 3507 return 0;
3466} 3508}
3467 3509
@@ -3493,7 +3535,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3493 3535
3494 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 3536 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3495 &stripe_size, chunk_offset, alloc_profile); 3537 &stripe_size, chunk_offset, alloc_profile);
3496 BUG_ON(ret); 3538 if (ret)
3539 return ret;
3497 3540
3498 sys_chunk_offset = chunk_offset + chunk_size; 3541 sys_chunk_offset = chunk_offset + chunk_size;
3499 3542
@@ -3504,10 +3547,12 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3504 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, 3547 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3505 &sys_chunk_size, &sys_stripe_size, 3548 &sys_chunk_size, &sys_stripe_size,
3506 sys_chunk_offset, alloc_profile); 3549 sys_chunk_offset, alloc_profile);
3507 BUG_ON(ret); 3550 if (ret)
3551 goto abort;
3508 3552
3509 ret = btrfs_add_device(trans, fs_info->chunk_root, device); 3553 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3510 BUG_ON(ret); 3554 if (ret)
3555 goto abort;
3511 3556
3512 /* 3557 /*
3513 * Modifying chunk tree needs allocating new blocks from both 3558 * Modifying chunk tree needs allocating new blocks from both
@@ -3517,13 +3562,20 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3517 */ 3562 */
3518 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 3563 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3519 chunk_size, stripe_size); 3564 chunk_size, stripe_size);
3520 BUG_ON(ret); 3565 if (ret)
3566 goto abort;
3521 3567
3522 ret = __finish_chunk_alloc(trans, extent_root, sys_map, 3568 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3523 sys_chunk_offset, sys_chunk_size, 3569 sys_chunk_offset, sys_chunk_size,
3524 sys_stripe_size); 3570 sys_stripe_size);
3525 BUG_ON(ret); 3571 if (ret)
3572 goto abort;
3573
3526 return 0; 3574 return 0;
3575
3576abort:
3577 btrfs_abort_transaction(trans, root, ret);
3578 return ret;
3527} 3579}
3528 3580
3529int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 3581int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
@@ -3874,7 +3926,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3874 do_div(length, map->num_stripes); 3926 do_div(length, map->num_stripes);
3875 3927
3876 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 3928 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3877 BUG_ON(!buf); 3929 BUG_ON(!buf); /* -ENOMEM */
3878 3930
3879 for (i = 0; i < map->num_stripes; i++) { 3931 for (i = 0; i < map->num_stripes; i++) {
3880 if (devid && map->stripes[i].dev->devid != devid) 3932 if (devid && map->stripes[i].dev->devid != devid)
@@ -3967,7 +4019,7 @@ struct async_sched {
3967 * This will add one bio to the pending list for a device and make sure 4019 * This will add one bio to the pending list for a device and make sure
3968 * the work struct is scheduled. 4020 * the work struct is scheduled.
3969 */ 4021 */
3970static noinline int schedule_bio(struct btrfs_root *root, 4022static noinline void schedule_bio(struct btrfs_root *root,
3971 struct btrfs_device *device, 4023 struct btrfs_device *device,
3972 int rw, struct bio *bio) 4024 int rw, struct bio *bio)
3973{ 4025{
@@ -3979,7 +4031,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
3979 bio_get(bio); 4031 bio_get(bio);
3980 btrfsic_submit_bio(rw, bio); 4032 btrfsic_submit_bio(rw, bio);
3981 bio_put(bio); 4033 bio_put(bio);
3982 return 0; 4034 return;
3983 } 4035 }
3984 4036
3985 /* 4037 /*
@@ -4013,7 +4065,6 @@ static noinline int schedule_bio(struct btrfs_root *root,
4013 if (should_queue) 4065 if (should_queue)
4014 btrfs_queue_worker(&root->fs_info->submit_workers, 4066 btrfs_queue_worker(&root->fs_info->submit_workers,
4015 &device->work); 4067 &device->work);
4016 return 0;
4017} 4068}
4018 4069
4019int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 4070int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
@@ -4036,7 +4087,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4036 4087
4037 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio, 4088 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4038 mirror_num); 4089 mirror_num);
4039 BUG_ON(ret); 4090 if (ret) /* -ENOMEM */
4091 return ret;
4040 4092
4041 total_devs = bbio->num_stripes; 4093 total_devs = bbio->num_stripes;
4042 if (map_length < length) { 4094 if (map_length < length) {
@@ -4055,7 +4107,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4055 while (dev_nr < total_devs) { 4107 while (dev_nr < total_devs) {
4056 if (dev_nr < total_devs - 1) { 4108 if (dev_nr < total_devs - 1) {
4057 bio = bio_clone(first_bio, GFP_NOFS); 4109 bio = bio_clone(first_bio, GFP_NOFS);
4058 BUG_ON(!bio); 4110 BUG_ON(!bio); /* -ENOMEM */
4059 } else { 4111 } else {
4060 bio = first_bio; 4112 bio = first_bio;
4061 } 4113 }
@@ -4209,13 +4261,13 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4209 write_lock(&map_tree->map_tree.lock); 4261 write_lock(&map_tree->map_tree.lock);
4210 ret = add_extent_mapping(&map_tree->map_tree, em); 4262 ret = add_extent_mapping(&map_tree->map_tree, em);
4211 write_unlock(&map_tree->map_tree.lock); 4263 write_unlock(&map_tree->map_tree.lock);
4212 BUG_ON(ret); 4264 BUG_ON(ret); /* Tree corruption */
4213 free_extent_map(em); 4265 free_extent_map(em);
4214 4266
4215 return 0; 4267 return 0;
4216} 4268}
4217 4269
4218static int fill_device_from_item(struct extent_buffer *leaf, 4270static void fill_device_from_item(struct extent_buffer *leaf,
4219 struct btrfs_dev_item *dev_item, 4271 struct btrfs_dev_item *dev_item,
4220 struct btrfs_device *device) 4272 struct btrfs_device *device)
4221{ 4273{
@@ -4232,8 +4284,6 @@ static int fill_device_from_item(struct extent_buffer *leaf,
4232 4284
4233 ptr = (unsigned long)btrfs_device_uuid(dev_item); 4285 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4234 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 4286 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4235
4236 return 0;
4237} 4287}
4238 4288
4239static int open_seed_devices(struct btrfs_root *root, u8 *fsid) 4289static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 19ac95048b88..bb6b03f97aaa 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -260,12 +260,12 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
260int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 260int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
261 struct btrfs_fs_devices **fs_devices_ret); 261 struct btrfs_fs_devices **fs_devices_ret);
262int btrfs_close_devices(struct btrfs_fs_devices *fs_devices); 262int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
263int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices); 263void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
264int btrfs_add_device(struct btrfs_trans_handle *trans, 264int btrfs_add_device(struct btrfs_trans_handle *trans,
265 struct btrfs_root *root, 265 struct btrfs_root *root,
266 struct btrfs_device *device); 266 struct btrfs_device *device);
267int btrfs_rm_device(struct btrfs_root *root, char *device_path); 267int btrfs_rm_device(struct btrfs_root *root, char *device_path);
268int btrfs_cleanup_fs_uuids(void); 268void btrfs_cleanup_fs_uuids(void);
269int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); 269int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
270int btrfs_grow_device(struct btrfs_trans_handle *trans, 270int btrfs_grow_device(struct btrfs_trans_handle *trans,
271 struct btrfs_device *device, u64 new_size); 271 struct btrfs_device *device, u64 new_size);