diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/backref.c | 2 | ||||
-rw-r--r-- | fs/btrfs/ctree.c | 17 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 8 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 147 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 153 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 36 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 2 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 65 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 8 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 17 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 7 | ||||
-rw-r--r-- | fs/btrfs/super.c | 6 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 8 | ||||
-rw-r--r-- | fs/btrfs/volumes.h | 6 |
14 files changed, 348 insertions, 134 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8855aad3929c..22c64fff1bd5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | |||
683 | return PTR_ERR(fspath); | 683 | return PTR_ERR(fspath); |
684 | 684 | ||
685 | if (fspath > fspath_min) { | 685 | if (fspath > fspath_min) { |
686 | ipath->fspath->val[i] = (u64)fspath; | 686 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
687 | ++ipath->fspath->elem_cnt; | 687 | ++ipath->fspath->elem_cnt; |
688 | ipath->fspath->bytes_left = fspath - fspath_min; | 688 | ipath->fspath->bytes_left = fspath - fspath_min; |
689 | } else { | 689 | } else { |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0fe615e4ea38..dede441bdeee 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, | |||
514 | struct btrfs_root *root, | 514 | struct btrfs_root *root, |
515 | struct extent_buffer *buf) | 515 | struct extent_buffer *buf) |
516 | { | 516 | { |
517 | /* ensure we can see the force_cow */ | ||
518 | smp_rmb(); | ||
519 | |||
520 | /* | ||
521 | * We do not need to cow a block if | ||
522 | * 1) this block is not created or changed in this transaction; | ||
523 | * 2) this block does not belong to TREE_RELOC tree; | ||
524 | * 3) the root is not forced COW. | ||
525 | * | ||
526 | * What is forced COW: | ||
527 | * when we create snapshot during commiting the transaction, | ||
528 | * after we've finished coping src root, we must COW the shared | ||
529 | * block to ensure the metadata consistency. | ||
530 | */ | ||
517 | if (btrfs_header_generation(buf) == trans->transid && | 531 | if (btrfs_header_generation(buf) == trans->transid && |
518 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && | 532 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && |
519 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && | 533 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
520 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) | 534 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && |
535 | !root->force_cow) | ||
521 | return 0; | 536 | return 0; |
522 | return 1; | 537 | return 1; |
523 | } | 538 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b9ba59ff9292..50634abef9b4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -848,7 +848,8 @@ struct btrfs_free_cluster { | |||
848 | enum btrfs_caching_type { | 848 | enum btrfs_caching_type { |
849 | BTRFS_CACHE_NO = 0, | 849 | BTRFS_CACHE_NO = 0, |
850 | BTRFS_CACHE_STARTED = 1, | 850 | BTRFS_CACHE_STARTED = 1, |
851 | BTRFS_CACHE_FINISHED = 2, | 851 | BTRFS_CACHE_FAST = 2, |
852 | BTRFS_CACHE_FINISHED = 3, | ||
852 | }; | 853 | }; |
853 | 854 | ||
854 | enum btrfs_disk_cache_state { | 855 | enum btrfs_disk_cache_state { |
@@ -1271,6 +1272,8 @@ struct btrfs_root { | |||
1271 | * for stat. It may be used for more later | 1272 | * for stat. It may be used for more later |
1272 | */ | 1273 | */ |
1273 | dev_t anon_dev; | 1274 | dev_t anon_dev; |
1275 | |||
1276 | int force_cow; | ||
1274 | }; | 1277 | }; |
1275 | 1278 | ||
1276 | struct btrfs_ioctl_defrag_range_args { | 1279 | struct btrfs_ioctl_defrag_range_args { |
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root, | |||
2366 | int btrfs_block_rsv_refill(struct btrfs_root *root, | 2369 | int btrfs_block_rsv_refill(struct btrfs_root *root, |
2367 | struct btrfs_block_rsv *block_rsv, | 2370 | struct btrfs_block_rsv *block_rsv, |
2368 | u64 min_reserved); | 2371 | u64 min_reserved); |
2372 | int btrfs_block_rsv_refill_noflush(struct btrfs_root *root, | ||
2373 | struct btrfs_block_rsv *block_rsv, | ||
2374 | u64 min_reserved); | ||
2369 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | 2375 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, |
2370 | struct btrfs_block_rsv *dst_rsv, | 2376 | struct btrfs_block_rsv *dst_rsv, |
2371 | u64 num_bytes); | 2377 | u64 num_bytes); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 62afe5c5694e..632f8f3cc9db 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -620,7 +620,7 @@ out: | |||
620 | 620 | ||
621 | static int btree_io_failed_hook(struct bio *failed_bio, | 621 | static int btree_io_failed_hook(struct bio *failed_bio, |
622 | struct page *page, u64 start, u64 end, | 622 | struct page *page, u64 start, u64 end, |
623 | u64 mirror_num, struct extent_state *state) | 623 | int mirror_num, struct extent_state *state) |
624 | { | 624 | { |
625 | struct extent_io_tree *tree; | 625 | struct extent_io_tree *tree; |
626 | unsigned long len; | 626 | unsigned long len; |
@@ -2573,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2573 | int errors = 0; | 2573 | int errors = 0; |
2574 | u32 crc; | 2574 | u32 crc; |
2575 | u64 bytenr; | 2575 | u64 bytenr; |
2576 | int last_barrier = 0; | ||
2577 | 2576 | ||
2578 | if (max_mirrors == 0) | 2577 | if (max_mirrors == 0) |
2579 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | 2578 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
2580 | 2579 | ||
2581 | /* make sure only the last submit_bh does a barrier */ | ||
2582 | if (do_barriers) { | ||
2583 | for (i = 0; i < max_mirrors; i++) { | ||
2584 | bytenr = btrfs_sb_offset(i); | ||
2585 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | ||
2586 | device->total_bytes) | ||
2587 | break; | ||
2588 | last_barrier = i; | ||
2589 | } | ||
2590 | } | ||
2591 | |||
2592 | for (i = 0; i < max_mirrors; i++) { | 2580 | for (i = 0; i < max_mirrors; i++) { |
2593 | bytenr = btrfs_sb_offset(i); | 2581 | bytenr = btrfs_sb_offset(i); |
2594 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) | 2582 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) |
@@ -2634,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2634 | bh->b_end_io = btrfs_end_buffer_write_sync; | 2622 | bh->b_end_io = btrfs_end_buffer_write_sync; |
2635 | } | 2623 | } |
2636 | 2624 | ||
2637 | if (i == last_barrier && do_barriers) | 2625 | /* |
2638 | ret = submit_bh(WRITE_FLUSH_FUA, bh); | 2626 | * we fua the first super. The others we allow |
2639 | else | 2627 | * to go down lazy. |
2640 | ret = submit_bh(WRITE_SYNC, bh); | 2628 | */ |
2641 | 2629 | ret = submit_bh(WRITE_FUA, bh); | |
2642 | if (ret) | 2630 | if (ret) |
2643 | errors++; | 2631 | errors++; |
2644 | } | 2632 | } |
2645 | return errors < i ? 0 : -1; | 2633 | return errors < i ? 0 : -1; |
2646 | } | 2634 | } |
2647 | 2635 | ||
2636 | /* | ||
2637 | * endio for the write_dev_flush, this will wake anyone waiting | ||
2638 | * for the barrier when it is done | ||
2639 | */ | ||
2640 | static void btrfs_end_empty_barrier(struct bio *bio, int err) | ||
2641 | { | ||
2642 | if (err) { | ||
2643 | if (err == -EOPNOTSUPP) | ||
2644 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | ||
2645 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
2646 | } | ||
2647 | if (bio->bi_private) | ||
2648 | complete(bio->bi_private); | ||
2649 | bio_put(bio); | ||
2650 | } | ||
2651 | |||
2652 | /* | ||
2653 | * trigger flushes for one the devices. If you pass wait == 0, the flushes are | ||
2654 | * sent down. With wait == 1, it waits for the previous flush. | ||
2655 | * | ||
2656 | * any device where the flush fails with eopnotsupp are flagged as not-barrier | ||
2657 | * capable | ||
2658 | */ | ||
2659 | static int write_dev_flush(struct btrfs_device *device, int wait) | ||
2660 | { | ||
2661 | struct bio *bio; | ||
2662 | int ret = 0; | ||
2663 | |||
2664 | if (device->nobarriers) | ||
2665 | return 0; | ||
2666 | |||
2667 | if (wait) { | ||
2668 | bio = device->flush_bio; | ||
2669 | if (!bio) | ||
2670 | return 0; | ||
2671 | |||
2672 | wait_for_completion(&device->flush_wait); | ||
2673 | |||
2674 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | ||
2675 | printk("btrfs: disabling barriers on dev %s\n", | ||
2676 | device->name); | ||
2677 | device->nobarriers = 1; | ||
2678 | } | ||
2679 | if (!bio_flagged(bio, BIO_UPTODATE)) { | ||
2680 | ret = -EIO; | ||
2681 | } | ||
2682 | |||
2683 | /* drop the reference from the wait == 0 run */ | ||
2684 | bio_put(bio); | ||
2685 | device->flush_bio = NULL; | ||
2686 | |||
2687 | return ret; | ||
2688 | } | ||
2689 | |||
2690 | /* | ||
2691 | * one reference for us, and we leave it for the | ||
2692 | * caller | ||
2693 | */ | ||
2694 | device->flush_bio = NULL;; | ||
2695 | bio = bio_alloc(GFP_NOFS, 0); | ||
2696 | if (!bio) | ||
2697 | return -ENOMEM; | ||
2698 | |||
2699 | bio->bi_end_io = btrfs_end_empty_barrier; | ||
2700 | bio->bi_bdev = device->bdev; | ||
2701 | init_completion(&device->flush_wait); | ||
2702 | bio->bi_private = &device->flush_wait; | ||
2703 | device->flush_bio = bio; | ||
2704 | |||
2705 | bio_get(bio); | ||
2706 | submit_bio(WRITE_FLUSH, bio); | ||
2707 | |||
2708 | return 0; | ||
2709 | } | ||
2710 | |||
2711 | /* | ||
2712 | * send an empty flush down to each device in parallel, | ||
2713 | * then wait for them | ||
2714 | */ | ||
2715 | static int barrier_all_devices(struct btrfs_fs_info *info) | ||
2716 | { | ||
2717 | struct list_head *head; | ||
2718 | struct btrfs_device *dev; | ||
2719 | int errors = 0; | ||
2720 | int ret; | ||
2721 | |||
2722 | /* send down all the barriers */ | ||
2723 | head = &info->fs_devices->devices; | ||
2724 | list_for_each_entry_rcu(dev, head, dev_list) { | ||
2725 | if (!dev->bdev) { | ||
2726 | errors++; | ||
2727 | continue; | ||
2728 | } | ||
2729 | if (!dev->in_fs_metadata || !dev->writeable) | ||
2730 | continue; | ||
2731 | |||
2732 | ret = write_dev_flush(dev, 0); | ||
2733 | if (ret) | ||
2734 | errors++; | ||
2735 | } | ||
2736 | |||
2737 | /* wait for all the barriers */ | ||
2738 | list_for_each_entry_rcu(dev, head, dev_list) { | ||
2739 | if (!dev->bdev) { | ||
2740 | errors++; | ||
2741 | continue; | ||
2742 | } | ||
2743 | if (!dev->in_fs_metadata || !dev->writeable) | ||
2744 | continue; | ||
2745 | |||
2746 | ret = write_dev_flush(dev, 1); | ||
2747 | if (ret) | ||
2748 | errors++; | ||
2749 | } | ||
2750 | if (errors) | ||
2751 | return -EIO; | ||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2648 | int write_all_supers(struct btrfs_root *root, int max_mirrors) | 2755 | int write_all_supers(struct btrfs_root *root, int max_mirrors) |
2649 | { | 2756 | { |
2650 | struct list_head *head; | 2757 | struct list_head *head; |
@@ -2666,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
2666 | 2773 | ||
2667 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 2774 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
2668 | head = &root->fs_info->fs_devices->devices; | 2775 | head = &root->fs_info->fs_devices->devices; |
2776 | |||
2777 | if (do_barriers) | ||
2778 | barrier_all_devices(root->fs_info); | ||
2779 | |||
2669 | list_for_each_entry_rcu(dev, head, dev_list) { | 2780 | list_for_each_entry_rcu(dev, head, dev_list) { |
2670 | if (!dev->bdev) { | 2781 | if (!dev->bdev) { |
2671 | total_errors++; | 2782 | total_errors++; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b232150b5b6b..f0d5718d2587 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
467 | struct btrfs_root *root, | 467 | struct btrfs_root *root, |
468 | int load_cache_only) | 468 | int load_cache_only) |
469 | { | 469 | { |
470 | DEFINE_WAIT(wait); | ||
470 | struct btrfs_fs_info *fs_info = cache->fs_info; | 471 | struct btrfs_fs_info *fs_info = cache->fs_info; |
471 | struct btrfs_caching_control *caching_ctl; | 472 | struct btrfs_caching_control *caching_ctl; |
472 | int ret = 0; | 473 | int ret = 0; |
473 | 474 | ||
474 | smp_mb(); | 475 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); |
475 | if (cache->cached != BTRFS_CACHE_NO) | 476 | BUG_ON(!caching_ctl); |
477 | |||
478 | INIT_LIST_HEAD(&caching_ctl->list); | ||
479 | mutex_init(&caching_ctl->mutex); | ||
480 | init_waitqueue_head(&caching_ctl->wait); | ||
481 | caching_ctl->block_group = cache; | ||
482 | caching_ctl->progress = cache->key.objectid; | ||
483 | atomic_set(&caching_ctl->count, 1); | ||
484 | caching_ctl->work.func = caching_thread; | ||
485 | |||
486 | spin_lock(&cache->lock); | ||
487 | /* | ||
488 | * This should be a rare occasion, but this could happen I think in the | ||
489 | * case where one thread starts to load the space cache info, and then | ||
490 | * some other thread starts a transaction commit which tries to do an | ||
491 | * allocation while the other thread is still loading the space cache | ||
492 | * info. The previous loop should have kept us from choosing this block | ||
493 | * group, but if we've moved to the state where we will wait on caching | ||
494 | * block groups we need to first check if we're doing a fast load here, | ||
495 | * so we can wait for it to finish, otherwise we could end up allocating | ||
496 | * from a block group who's cache gets evicted for one reason or | ||
497 | * another. | ||
498 | */ | ||
499 | while (cache->cached == BTRFS_CACHE_FAST) { | ||
500 | struct btrfs_caching_control *ctl; | ||
501 | |||
502 | ctl = cache->caching_ctl; | ||
503 | atomic_inc(&ctl->count); | ||
504 | prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); | ||
505 | spin_unlock(&cache->lock); | ||
506 | |||
507 | schedule(); | ||
508 | |||
509 | finish_wait(&ctl->wait, &wait); | ||
510 | put_caching_control(ctl); | ||
511 | spin_lock(&cache->lock); | ||
512 | } | ||
513 | |||
514 | if (cache->cached != BTRFS_CACHE_NO) { | ||
515 | spin_unlock(&cache->lock); | ||
516 | kfree(caching_ctl); | ||
476 | return 0; | 517 | return 0; |
518 | } | ||
519 | WARN_ON(cache->caching_ctl); | ||
520 | cache->caching_ctl = caching_ctl; | ||
521 | cache->cached = BTRFS_CACHE_FAST; | ||
522 | spin_unlock(&cache->lock); | ||
477 | 523 | ||
478 | /* | 524 | /* |
479 | * We can't do the read from on-disk cache during a commit since we need | 525 | * We can't do the read from on-disk cache during a commit since we need |
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
484 | if (trans && (!trans->transaction->in_commit) && | 530 | if (trans && (!trans->transaction->in_commit) && |
485 | (root && root != root->fs_info->tree_root) && | 531 | (root && root != root->fs_info->tree_root) && |
486 | btrfs_test_opt(root, SPACE_CACHE)) { | 532 | btrfs_test_opt(root, SPACE_CACHE)) { |
487 | spin_lock(&cache->lock); | ||
488 | if (cache->cached != BTRFS_CACHE_NO) { | ||
489 | spin_unlock(&cache->lock); | ||
490 | return 0; | ||
491 | } | ||
492 | cache->cached = BTRFS_CACHE_STARTED; | ||
493 | spin_unlock(&cache->lock); | ||
494 | |||
495 | ret = load_free_space_cache(fs_info, cache); | 533 | ret = load_free_space_cache(fs_info, cache); |
496 | 534 | ||
497 | spin_lock(&cache->lock); | 535 | spin_lock(&cache->lock); |
498 | if (ret == 1) { | 536 | if (ret == 1) { |
537 | cache->caching_ctl = NULL; | ||
499 | cache->cached = BTRFS_CACHE_FINISHED; | 538 | cache->cached = BTRFS_CACHE_FINISHED; |
500 | cache->last_byte_to_unpin = (u64)-1; | 539 | cache->last_byte_to_unpin = (u64)-1; |
501 | } else { | 540 | } else { |
502 | cache->cached = BTRFS_CACHE_NO; | 541 | if (load_cache_only) { |
542 | cache->caching_ctl = NULL; | ||
543 | cache->cached = BTRFS_CACHE_NO; | ||
544 | } else { | ||
545 | cache->cached = BTRFS_CACHE_STARTED; | ||
546 | } | ||
503 | } | 547 | } |
504 | spin_unlock(&cache->lock); | 548 | spin_unlock(&cache->lock); |
549 | wake_up(&caching_ctl->wait); | ||
505 | if (ret == 1) { | 550 | if (ret == 1) { |
551 | put_caching_control(caching_ctl); | ||
506 | free_excluded_extents(fs_info->extent_root, cache); | 552 | free_excluded_extents(fs_info->extent_root, cache); |
507 | return 0; | 553 | return 0; |
508 | } | 554 | } |
555 | } else { | ||
556 | /* | ||
557 | * We are not going to do the fast caching, set cached to the | ||
558 | * appropriate value and wakeup any waiters. | ||
559 | */ | ||
560 | spin_lock(&cache->lock); | ||
561 | if (load_cache_only) { | ||
562 | cache->caching_ctl = NULL; | ||
563 | cache->cached = BTRFS_CACHE_NO; | ||
564 | } else { | ||
565 | cache->cached = BTRFS_CACHE_STARTED; | ||
566 | } | ||
567 | spin_unlock(&cache->lock); | ||
568 | wake_up(&caching_ctl->wait); | ||
509 | } | 569 | } |
510 | 570 | ||
511 | if (load_cache_only) | 571 | if (load_cache_only) { |
512 | return 0; | 572 | put_caching_control(caching_ctl); |
513 | |||
514 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); | ||
515 | BUG_ON(!caching_ctl); | ||
516 | |||
517 | INIT_LIST_HEAD(&caching_ctl->list); | ||
518 | mutex_init(&caching_ctl->mutex); | ||
519 | init_waitqueue_head(&caching_ctl->wait); | ||
520 | caching_ctl->block_group = cache; | ||
521 | caching_ctl->progress = cache->key.objectid; | ||
522 | /* one for caching kthread, one for caching block group list */ | ||
523 | atomic_set(&caching_ctl->count, 2); | ||
524 | caching_ctl->work.func = caching_thread; | ||
525 | |||
526 | spin_lock(&cache->lock); | ||
527 | if (cache->cached != BTRFS_CACHE_NO) { | ||
528 | spin_unlock(&cache->lock); | ||
529 | kfree(caching_ctl); | ||
530 | return 0; | 573 | return 0; |
531 | } | 574 | } |
532 | cache->caching_ctl = caching_ctl; | ||
533 | cache->cached = BTRFS_CACHE_STARTED; | ||
534 | spin_unlock(&cache->lock); | ||
535 | 575 | ||
536 | down_write(&fs_info->extent_commit_sem); | 576 | down_write(&fs_info->extent_commit_sem); |
577 | atomic_inc(&caching_ctl->count); | ||
537 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); | 578 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); |
538 | up_write(&fs_info->extent_commit_sem); | 579 | up_write(&fs_info->extent_commit_sem); |
539 | 580 | ||
@@ -3847,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root, | |||
3847 | return ret; | 3888 | return ret; |
3848 | } | 3889 | } |
3849 | 3890 | ||
3850 | int btrfs_block_rsv_refill(struct btrfs_root *root, | 3891 | static inline int __btrfs_block_rsv_refill(struct btrfs_root *root, |
3851 | struct btrfs_block_rsv *block_rsv, | 3892 | struct btrfs_block_rsv *block_rsv, |
3852 | u64 min_reserved) | 3893 | u64 min_reserved, int flush) |
3853 | { | 3894 | { |
3854 | u64 num_bytes = 0; | 3895 | u64 num_bytes = 0; |
3855 | int ret = -ENOSPC; | 3896 | int ret = -ENOSPC; |
@@ -3868,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, | |||
3868 | if (!ret) | 3909 | if (!ret) |
3869 | return 0; | 3910 | return 0; |
3870 | 3911 | ||
3871 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); | 3912 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); |
3872 | if (!ret) { | 3913 | if (!ret) { |
3873 | block_rsv_add_bytes(block_rsv, num_bytes, 0); | 3914 | block_rsv_add_bytes(block_rsv, num_bytes, 0); |
3874 | return 0; | 3915 | return 0; |
@@ -3877,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, | |||
3877 | return ret; | 3918 | return ret; |
3878 | } | 3919 | } |
3879 | 3920 | ||
3921 | int btrfs_block_rsv_refill(struct btrfs_root *root, | ||
3922 | struct btrfs_block_rsv *block_rsv, | ||
3923 | u64 min_reserved) | ||
3924 | { | ||
3925 | return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1); | ||
3926 | } | ||
3927 | |||
3928 | int btrfs_block_rsv_refill_noflush(struct btrfs_root *root, | ||
3929 | struct btrfs_block_rsv *block_rsv, | ||
3930 | u64 min_reserved) | ||
3931 | { | ||
3932 | return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0); | ||
3933 | } | ||
3934 | |||
3880 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | 3935 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, |
3881 | struct btrfs_block_rsv *dst_rsv, | 3936 | struct btrfs_block_rsv *dst_rsv, |
3882 | u64 num_bytes) | 3937 | u64 num_bytes) |
@@ -5178,13 +5233,15 @@ search: | |||
5178 | } | 5233 | } |
5179 | 5234 | ||
5180 | have_block_group: | 5235 | have_block_group: |
5181 | if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { | 5236 | cached = block_group_cache_done(block_group); |
5237 | if (unlikely(!cached)) { | ||
5182 | u64 free_percent; | 5238 | u64 free_percent; |
5183 | 5239 | ||
5240 | found_uncached_bg = true; | ||
5184 | ret = cache_block_group(block_group, trans, | 5241 | ret = cache_block_group(block_group, trans, |
5185 | orig_root, 1); | 5242 | orig_root, 1); |
5186 | if (block_group->cached == BTRFS_CACHE_FINISHED) | 5243 | if (block_group->cached == BTRFS_CACHE_FINISHED) |
5187 | goto have_block_group; | 5244 | goto alloc; |
5188 | 5245 | ||
5189 | free_percent = btrfs_block_group_used(&block_group->item); | 5246 | free_percent = btrfs_block_group_used(&block_group->item); |
5190 | free_percent *= 100; | 5247 | free_percent *= 100; |
@@ -5206,7 +5263,6 @@ have_block_group: | |||
5206 | orig_root, 0); | 5263 | orig_root, 0); |
5207 | BUG_ON(ret); | 5264 | BUG_ON(ret); |
5208 | } | 5265 | } |
5209 | found_uncached_bg = true; | ||
5210 | 5266 | ||
5211 | /* | 5267 | /* |
5212 | * If loop is set for cached only, try the next block | 5268 | * If loop is set for cached only, try the next block |
@@ -5216,17 +5272,14 @@ have_block_group: | |||
5216 | goto loop; | 5272 | goto loop; |
5217 | } | 5273 | } |
5218 | 5274 | ||
5219 | cached = block_group_cache_done(block_group); | 5275 | alloc: |
5220 | if (unlikely(!cached)) | ||
5221 | found_uncached_bg = true; | ||
5222 | |||
5223 | if (unlikely(block_group->ro)) | 5276 | if (unlikely(block_group->ro)) |
5224 | goto loop; | 5277 | goto loop; |
5225 | 5278 | ||
5226 | spin_lock(&block_group->free_space_ctl->tree_lock); | 5279 | spin_lock(&block_group->free_space_ctl->tree_lock); |
5227 | if (cached && | 5280 | if (cached && |
5228 | block_group->free_space_ctl->free_space < | 5281 | block_group->free_space_ctl->free_space < |
5229 | num_bytes + empty_size) { | 5282 | num_bytes + empty_cluster + empty_size) { |
5230 | spin_unlock(&block_group->free_space_ctl->tree_lock); | 5283 | spin_unlock(&block_group->free_space_ctl->tree_lock); |
5231 | goto loop; | 5284 | goto loop; |
5232 | } | 5285 | } |
@@ -5247,12 +5300,10 @@ have_block_group: | |||
5247 | * people trying to start a new cluster | 5300 | * people trying to start a new cluster |
5248 | */ | 5301 | */ |
5249 | spin_lock(&last_ptr->refill_lock); | 5302 | spin_lock(&last_ptr->refill_lock); |
5250 | if (last_ptr->block_group && | 5303 | if (!last_ptr->block_group || |
5251 | (last_ptr->block_group->ro || | 5304 | last_ptr->block_group->ro || |
5252 | !block_group_bits(last_ptr->block_group, data))) { | 5305 | !block_group_bits(last_ptr->block_group, data)) |
5253 | offset = 0; | ||
5254 | goto refill_cluster; | 5306 | goto refill_cluster; |
5255 | } | ||
5256 | 5307 | ||
5257 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, | 5308 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, |
5258 | num_bytes, search_start); | 5309 | num_bytes, search_start); |
@@ -5303,7 +5354,7 @@ refill_cluster: | |||
5303 | /* allocate a cluster in this block group */ | 5354 | /* allocate a cluster in this block group */ |
5304 | ret = btrfs_find_space_cluster(trans, root, | 5355 | ret = btrfs_find_space_cluster(trans, root, |
5305 | block_group, last_ptr, | 5356 | block_group, last_ptr, |
5306 | offset, num_bytes, | 5357 | search_start, num_bytes, |
5307 | empty_cluster + empty_size); | 5358 | empty_cluster + empty_size); |
5308 | if (ret == 0) { | 5359 | if (ret == 0) { |
5309 | /* | 5360 | /* |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1f87c4d0e7a0..be1bf627a14b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2285,16 +2285,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2285 | clean_io_failure(start, page); | 2285 | clean_io_failure(start, page); |
2286 | } | 2286 | } |
2287 | if (!uptodate) { | 2287 | if (!uptodate) { |
2288 | u64 failed_mirror; | 2288 | int failed_mirror; |
2289 | failed_mirror = (u64)bio->bi_bdev; | 2289 | failed_mirror = (int)(unsigned long)bio->bi_bdev; |
2290 | if (tree->ops && tree->ops->readpage_io_failed_hook) | 2290 | /* |
2291 | ret = tree->ops->readpage_io_failed_hook( | 2291 | * The generic bio_readpage_error handles errors the |
2292 | bio, page, start, end, | 2292 | * following way: If possible, new read requests are |
2293 | failed_mirror, state); | 2293 | * created and submitted and will end up in |
2294 | else | 2294 | * end_bio_extent_readpage as well (if we're lucky, not |
2295 | ret = bio_readpage_error(bio, page, start, end, | 2295 | * in the !uptodate case). In that case it returns 0 and |
2296 | failed_mirror, NULL); | 2296 | * we just go on with the next page in our bio. If it |
2297 | * can't handle the error it will return -EIO and we | ||
2298 | * remain responsible for that page. | ||
2299 | */ | ||
2300 | ret = bio_readpage_error(bio, page, start, end, | ||
2301 | failed_mirror, NULL); | ||
2297 | if (ret == 0) { | 2302 | if (ret == 0) { |
2303 | error_handled: | ||
2298 | uptodate = | 2304 | uptodate = |
2299 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 2305 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
2300 | if (err) | 2306 | if (err) |
@@ -2302,6 +2308,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2302 | uncache_state(&cached); | 2308 | uncache_state(&cached); |
2303 | continue; | 2309 | continue; |
2304 | } | 2310 | } |
2311 | if (tree->ops && tree->ops->readpage_io_failed_hook) { | ||
2312 | ret = tree->ops->readpage_io_failed_hook( | ||
2313 | bio, page, start, end, | ||
2314 | failed_mirror, state); | ||
2315 | if (ret == 0) | ||
2316 | goto error_handled; | ||
2317 | } | ||
2305 | } | 2318 | } |
2306 | 2319 | ||
2307 | if (uptodate) { | 2320 | if (uptodate) { |
@@ -3366,6 +3379,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3366 | return -ENOMEM; | 3379 | return -ENOMEM; |
3367 | path->leave_spinning = 1; | 3380 | path->leave_spinning = 1; |
3368 | 3381 | ||
3382 | start = ALIGN(start, BTRFS_I(inode)->root->sectorsize); | ||
3383 | len = ALIGN(len, BTRFS_I(inode)->root->sectorsize); | ||
3384 | |||
3369 | /* | 3385 | /* |
3370 | * lookup the last file extent. We're not using i_size here | 3386 | * lookup the last file extent. We're not using i_size here |
3371 | * because there might be preallocation past i_size | 3387 | * because there might be preallocation past i_size |
@@ -3413,7 +3429,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3413 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3429 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
3414 | &cached_state, GFP_NOFS); | 3430 | &cached_state, GFP_NOFS); |
3415 | 3431 | ||
3416 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | 3432 | em = get_extent_skip_holes(inode, start, last_for_get_extent, |
3417 | get_extent); | 3433 | get_extent); |
3418 | if (!em) | 3434 | if (!em) |
3419 | goto out; | 3435 | goto out; |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index feb9be0e23bc..7604c3001322 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -70,7 +70,7 @@ struct extent_io_ops { | |||
70 | unsigned long bio_flags); | 70 | unsigned long bio_flags); |
71 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); | 71 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); |
72 | int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, | 72 | int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, |
73 | u64 start, u64 end, u64 failed_mirror, | 73 | u64 start, u64 end, int failed_mirror, |
74 | struct extent_state *state); | 74 | struct extent_state *state); |
75 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, | 75 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, |
76 | u64 start, u64 end, | 76 | u64 start, u64 end, |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 181760f9d2ab..ec23d43d0c35 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, | |||
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | for (i = 0; i < io_ctl->num_pages; i++) { | ||
355 | clear_page_dirty_for_io(io_ctl->pages[i]); | ||
356 | set_page_extent_mapped(io_ctl->pages[i]); | ||
357 | } | ||
358 | |||
354 | return 0; | 359 | return 0; |
355 | } | 360 | } |
356 | 361 | ||
@@ -1465,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1465 | { | 1470 | { |
1466 | info->offset = offset_to_bitmap(ctl, offset); | 1471 | info->offset = offset_to_bitmap(ctl, offset); |
1467 | info->bytes = 0; | 1472 | info->bytes = 0; |
1473 | INIT_LIST_HEAD(&info->list); | ||
1468 | link_free_space(ctl, info); | 1474 | link_free_space(ctl, info); |
1469 | ctl->total_bitmaps++; | 1475 | ctl->total_bitmaps++; |
1470 | 1476 | ||
@@ -1844,7 +1850,13 @@ again: | |||
1844 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | 1850 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1845 | 1, 0); | 1851 | 1, 0); |
1846 | if (!info) { | 1852 | if (!info) { |
1847 | WARN_ON(1); | 1853 | /* the tree logging code might be calling us before we |
1854 | * have fully loaded the free space rbtree for this | ||
1855 | * block group. So it is possible the entry won't | ||
1856 | * be in the rbtree yet at all. The caching code | ||
1857 | * will make sure not to put it in the rbtree if | ||
1858 | * the logging code has pinned it. | ||
1859 | */ | ||
1848 | goto out_lock; | 1860 | goto out_lock; |
1849 | } | 1861 | } |
1850 | } | 1862 | } |
@@ -2308,6 +2320,7 @@ again: | |||
2308 | 2320 | ||
2309 | if (!found) { | 2321 | if (!found) { |
2310 | start = i; | 2322 | start = i; |
2323 | cluster->max_size = 0; | ||
2311 | found = true; | 2324 | found = true; |
2312 | } | 2325 | } |
2313 | 2326 | ||
@@ -2451,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2451 | { | 2464 | { |
2452 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2465 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2453 | struct btrfs_free_space *entry; | 2466 | struct btrfs_free_space *entry; |
2454 | struct rb_node *node; | ||
2455 | int ret = -ENOSPC; | 2467 | int ret = -ENOSPC; |
2468 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); | ||
2456 | 2469 | ||
2457 | if (ctl->total_bitmaps == 0) | 2470 | if (ctl->total_bitmaps == 0) |
2458 | return -ENOSPC; | 2471 | return -ENOSPC; |
2459 | 2472 | ||
2460 | /* | 2473 | /* |
2461 | * First check our cached list of bitmaps and see if there is an entry | 2474 | * The bitmap that covers offset won't be in the list unless offset |
2462 | * here that will work. | 2475 | * is just its start offset. |
2463 | */ | 2476 | */ |
2477 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | ||
2478 | if (entry->offset != bitmap_offset) { | ||
2479 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); | ||
2480 | if (entry && list_empty(&entry->list)) | ||
2481 | list_add(&entry->list, bitmaps); | ||
2482 | } | ||
2483 | |||
2464 | list_for_each_entry(entry, bitmaps, list) { | 2484 | list_for_each_entry(entry, bitmaps, list) { |
2465 | if (entry->bytes < min_bytes) | 2485 | if (entry->bytes < min_bytes) |
2466 | continue; | 2486 | continue; |
@@ -2471,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2471 | } | 2491 | } |
2472 | 2492 | ||
2473 | /* | 2493 | /* |
2474 | * If we do have entries on our list and we are here then we didn't find | 2494 | * The bitmaps list has all the bitmaps that record free space |
2475 | * anything, so go ahead and get the next entry after the last entry in | 2495 | * starting after offset, so no more search is required. |
2476 | * this list and start the search from there. | ||
2477 | */ | 2496 | */ |
2478 | if (!list_empty(bitmaps)) { | 2497 | return -ENOSPC; |
2479 | entry = list_entry(bitmaps->prev, struct btrfs_free_space, | ||
2480 | list); | ||
2481 | node = rb_next(&entry->offset_index); | ||
2482 | if (!node) | ||
2483 | return -ENOSPC; | ||
2484 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2485 | goto search; | ||
2486 | } | ||
2487 | |||
2488 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); | ||
2489 | if (!entry) | ||
2490 | return -ENOSPC; | ||
2491 | |||
2492 | search: | ||
2493 | node = &entry->offset_index; | ||
2494 | do { | ||
2495 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2496 | node = rb_next(&entry->offset_index); | ||
2497 | if (!entry->bitmap) | ||
2498 | continue; | ||
2499 | if (entry->bytes < min_bytes) | ||
2500 | continue; | ||
2501 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2502 | bytes, min_bytes); | ||
2503 | } while (ret && node); | ||
2504 | |||
2505 | return ret; | ||
2506 | } | 2498 | } |
2507 | 2499 | ||
2508 | /* | 2500 | /* |
@@ -2520,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2520 | u64 offset, u64 bytes, u64 empty_size) | 2512 | u64 offset, u64 bytes, u64 empty_size) |
2521 | { | 2513 | { |
2522 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2514 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2523 | struct list_head bitmaps; | ||
2524 | struct btrfs_free_space *entry, *tmp; | 2515 | struct btrfs_free_space *entry, *tmp; |
2516 | LIST_HEAD(bitmaps); | ||
2525 | u64 min_bytes; | 2517 | u64 min_bytes; |
2526 | int ret; | 2518 | int ret; |
2527 | 2519 | ||
@@ -2560,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2560 | goto out; | 2552 | goto out; |
2561 | } | 2553 | } |
2562 | 2554 | ||
2563 | INIT_LIST_HEAD(&bitmaps); | ||
2564 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, | 2555 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
2565 | bytes, min_bytes); | 2556 | bytes, min_bytes); |
2566 | if (ret) | 2557 | if (ret) |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 116ab67a06df..2c984f7d4c2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
3490 | * doing the truncate. | 3490 | * doing the truncate. |
3491 | */ | 3491 | */ |
3492 | while (1) { | 3492 | while (1) { |
3493 | ret = btrfs_block_rsv_refill(root, rsv, min_size); | 3493 | ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); |
3494 | 3494 | ||
3495 | /* | 3495 | /* |
3496 | * Try and steal from the global reserve since we will | 3496 | * Try and steal from the global reserve since we will |
@@ -6794,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt, | |||
6794 | struct dentry *dentry, struct kstat *stat) | 6794 | struct dentry *dentry, struct kstat *stat) |
6795 | { | 6795 | { |
6796 | struct inode *inode = dentry->d_inode; | 6796 | struct inode *inode = dentry->d_inode; |
6797 | u32 blocksize = inode->i_sb->s_blocksize; | ||
6798 | |||
6797 | generic_fillattr(inode, stat); | 6799 | generic_fillattr(inode, stat); |
6798 | stat->dev = BTRFS_I(inode)->root->anon_dev; | 6800 | stat->dev = BTRFS_I(inode)->root->anon_dev; |
6799 | stat->blksize = PAGE_CACHE_SIZE; | 6801 | stat->blksize = PAGE_CACHE_SIZE; |
6800 | stat->blocks = (inode_get_bytes(inode) + | 6802 | stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + |
6801 | BTRFS_I(inode)->delalloc_bytes) >> 9; | 6803 | ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; |
6802 | return 0; | 6804 | return 0; |
6803 | } | 6805 | } |
6804 | 6806 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4a34c472f126..72d461656f60 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1216 | *devstr = '\0'; | 1216 | *devstr = '\0'; |
1217 | devstr = vol_args->name; | 1217 | devstr = vol_args->name; |
1218 | devid = simple_strtoull(devstr, &end, 10); | 1218 | devid = simple_strtoull(devstr, &end, 10); |
1219 | printk(KERN_INFO "resizing devid %llu\n", | 1219 | printk(KERN_INFO "btrfs: resizing devid %llu\n", |
1220 | (unsigned long long)devid); | 1220 | (unsigned long long)devid); |
1221 | } | 1221 | } |
1222 | device = btrfs_find_device(root, devid, NULL, NULL); | 1222 | device = btrfs_find_device(root, devid, NULL, NULL); |
1223 | if (!device) { | 1223 | if (!device) { |
1224 | printk(KERN_INFO "resizer unable to find device %llu\n", | 1224 | printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", |
1225 | (unsigned long long)devid); | 1225 | (unsigned long long)devid); |
1226 | ret = -EINVAL; | 1226 | ret = -EINVAL; |
1227 | goto out_unlock; | 1227 | goto out_unlock; |
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1267 | do_div(new_size, root->sectorsize); | 1267 | do_div(new_size, root->sectorsize); |
1268 | new_size *= root->sectorsize; | 1268 | new_size *= root->sectorsize; |
1269 | 1269 | ||
1270 | printk(KERN_INFO "new size for %s is %llu\n", | 1270 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", |
1271 | device->name, (unsigned long long)new_size); | 1271 | device->name, (unsigned long long)new_size); |
1272 | 1272 | ||
1273 | if (new_size > old_size) { | 1273 | if (new_size > old_size) { |
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1278 | } | 1278 | } |
1279 | ret = btrfs_grow_device(trans, device, new_size); | 1279 | ret = btrfs_grow_device(trans, device, new_size); |
1280 | btrfs_commit_transaction(trans, root); | 1280 | btrfs_commit_transaction(trans, root); |
1281 | } else { | 1281 | } else if (new_size < old_size) { |
1282 | ret = btrfs_shrink_device(device, new_size); | 1282 | ret = btrfs_shrink_device(device, new_size); |
1283 | } | 1283 | } |
1284 | 1284 | ||
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) | |||
2930 | goto out; | 2930 | goto out; |
2931 | 2931 | ||
2932 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) { | 2932 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) { |
2933 | rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; | 2933 | rel_ptr = ipath->fspath->val[i] - |
2934 | (u64)(unsigned long)ipath->fspath->val; | ||
2934 | ipath->fspath->val[i] = rel_ptr; | 2935 | ipath->fspath->val[i] = rel_ptr; |
2935 | } | 2936 | } |
2936 | 2937 | ||
2937 | ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); | 2938 | ret = copy_to_user((void *)(unsigned long)ipa->fspath, |
2939 | (void *)(unsigned long)ipath->fspath, size); | ||
2938 | if (ret) { | 2940 | if (ret) { |
2939 | ret = -EFAULT; | 2941 | ret = -EFAULT; |
2940 | goto out; | 2942 | goto out; |
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
3017 | if (ret < 0) | 3019 | if (ret < 0) |
3018 | goto out; | 3020 | goto out; |
3019 | 3021 | ||
3020 | ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); | 3022 | ret = copy_to_user((void *)(unsigned long)loi->inodes, |
3023 | (void *)(unsigned long)inodes, size); | ||
3021 | if (ret) | 3024 | if (ret) |
3022 | ret = -EFAULT; | 3025 | ret = -EFAULT; |
3023 | 3026 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f4190f22edfb..c27bcb67f330 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
256 | btrfs_release_path(swarn->path); | 256 | btrfs_release_path(swarn->path); |
257 | 257 | ||
258 | ipath = init_ipath(4096, local_root, swarn->path); | 258 | ipath = init_ipath(4096, local_root, swarn->path); |
259 | if (IS_ERR(ipath)) { | ||
260 | ret = PTR_ERR(ipath); | ||
261 | ipath = NULL; | ||
262 | goto err; | ||
263 | } | ||
259 | ret = paths_from_inode(inum, ipath); | 264 | ret = paths_from_inode(inum, ipath); |
260 | 265 | ||
261 | if (ret < 0) | 266 | if (ret < 0) |
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
272 | swarn->logical, swarn->dev->name, | 277 | swarn->logical, swarn->dev->name, |
273 | (unsigned long long)swarn->sector, root, inum, offset, | 278 | (unsigned long long)swarn->sector, root, inum, offset, |
274 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 279 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
275 | (char *)ipath->fspath->val[i]); | 280 | (char *)(unsigned long)ipath->fspath->val[i]); |
276 | 281 | ||
277 | free_ipath(ipath); | 282 | free_ipath(ipath); |
278 | return 0; | 283 | return 0; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 17ee7fc5e64e..e28ad4baf483 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1057,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1057 | int i = 0, nr_devices; | 1057 | int i = 0, nr_devices; |
1058 | int ret; | 1058 | int ret; |
1059 | 1059 | ||
1060 | nr_devices = fs_info->fs_devices->rw_devices; | 1060 | nr_devices = fs_info->fs_devices->open_devices; |
1061 | BUG_ON(!nr_devices); | 1061 | BUG_ON(!nr_devices); |
1062 | 1062 | ||
1063 | devices_info = kmalloc(sizeof(*devices_info) * nr_devices, | 1063 | devices_info = kmalloc(sizeof(*devices_info) * nr_devices, |
@@ -1079,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1079 | else | 1079 | else |
1080 | min_stripe_size = BTRFS_STRIPE_LEN; | 1080 | min_stripe_size = BTRFS_STRIPE_LEN; |
1081 | 1081 | ||
1082 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { | 1082 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
1083 | if (!device->in_fs_metadata) | 1083 | if (!device->in_fs_metadata || !device->bdev) |
1084 | continue; | 1084 | continue; |
1085 | 1085 | ||
1086 | avail_space = device->total_bytes - device->bytes_used; | 1086 | avail_space = device->total_bytes - device->bytes_used; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6a0574e923bc..81376d94cd3c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
785 | 785 | ||
786 | btrfs_save_ino_cache(root, trans); | 786 | btrfs_save_ino_cache(root, trans); |
787 | 787 | ||
788 | /* see comments in should_cow_block() */ | ||
789 | root->force_cow = 0; | ||
790 | smp_wmb(); | ||
791 | |||
788 | if (root->commit_root != root->node) { | 792 | if (root->commit_root != root->node) { |
789 | mutex_lock(&root->fs_commit_mutex); | 793 | mutex_lock(&root->fs_commit_mutex); |
790 | switch_commit_root(root); | 794 | switch_commit_root(root); |
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
947 | btrfs_tree_unlock(old); | 951 | btrfs_tree_unlock(old); |
948 | free_extent_buffer(old); | 952 | free_extent_buffer(old); |
949 | 953 | ||
954 | /* see comments in should_cow_block() */ | ||
955 | root->force_cow = 1; | ||
956 | smp_wmb(); | ||
957 | |||
950 | btrfs_set_root_node(new_root_item, tmp); | 958 | btrfs_set_root_node(new_root_item, tmp); |
951 | /* record when the snapshot was created in key.offset */ | 959 | /* record when the snapshot was created in key.offset */ |
952 | key.offset = trans->transid; | 960 | key.offset = trans->transid; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ab5b1c49f352..78f2d4d4f37f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -100,6 +100,12 @@ struct btrfs_device { | |||
100 | struct reada_zone *reada_curr_zone; | 100 | struct reada_zone *reada_curr_zone; |
101 | struct radix_tree_root reada_zones; | 101 | struct radix_tree_root reada_zones; |
102 | struct radix_tree_root reada_extents; | 102 | struct radix_tree_root reada_extents; |
103 | |||
104 | /* for sending down flush barriers */ | ||
105 | struct bio *flush_bio; | ||
106 | struct completion flush_wait; | ||
107 | int nobarriers; | ||
108 | |||
103 | }; | 109 | }; |
104 | 110 | ||
105 | struct btrfs_fs_devices { | 111 | struct btrfs_fs_devices { |