aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2018-05-02 08:19:33 -0400
committerDavid Sterba <dsterba@suse.com>2018-05-30 13:01:44 -0400
commitc5794e51784a0a96dd82e8f955570a7eccf27e5d (patch)
tree4415984aa73ae253d024fe7a52eb4cbc85e33448
parent1c8d0175df47364aa55c568b65ed7a3aee5b9a6d (diff)
btrfs: Factor out write portion of btrfs_get_blocks_direct
Now that the read side is extracted into its own function, do the same to the write side. This leaves btrfs_get_blocks_direct_write with the sole purpose of handling common locking required. Also flip the condition in btrfs_get_blocks_direct_write so that the write case comes first and we check for if (Create) rather than if (!create). This is purely subjective but I believe makes reading a bit more "linear". No functional changes. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/inode.c207
1 files changed, 108 insertions, 99 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4f8fb1130cf3..880431ae5e59 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7561,6 +7561,104 @@ static int btrfs_get_blocks_direct_read(struct extent_map *em,
7561 return 0; 7561 return 0;
7562} 7562}
7563 7563
7564static int btrfs_get_blocks_direct_write(struct extent_map **map,
7565 struct buffer_head *bh_result,
7566 struct inode *inode,
7567 struct btrfs_dio_data *dio_data,
7568 u64 start, u64 len)
7569{
7570 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7571 struct extent_map *em = *map;
7572 int ret = 0;
7573
7574 /*
7575 * We don't allocate a new extent in the following cases
7576 *
7577 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7578 * existing extent.
7579 * 2) The extent is marked as PREALLOC. We're good to go here and can
7580 * just use the extent.
7581 *
7582 */
7583 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7584 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7585 em->block_start != EXTENT_MAP_HOLE)) {
7586 int type;
7587 u64 block_start, orig_start, orig_block_len, ram_bytes;
7588
7589 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7590 type = BTRFS_ORDERED_PREALLOC;
7591 else
7592 type = BTRFS_ORDERED_NOCOW;
7593 len = min(len, em->len - (start - em->start));
7594 block_start = em->block_start + (start - em->start);
7595
7596 if (can_nocow_extent(inode, start, &len, &orig_start,
7597 &orig_block_len, &ram_bytes) == 1 &&
7598 btrfs_inc_nocow_writers(fs_info, block_start)) {
7599 struct extent_map *em2;
7600
7601 em2 = btrfs_create_dio_extent(inode, start, len,
7602 orig_start, block_start,
7603 len, orig_block_len,
7604 ram_bytes, type);
7605 btrfs_dec_nocow_writers(fs_info, block_start);
7606 if (type == BTRFS_ORDERED_PREALLOC) {
7607 free_extent_map(em);
7608 *map = em = em2;
7609 }
7610
7611 if (em2 && IS_ERR(em2)) {
7612 ret = PTR_ERR(em2);
7613 goto out;
7614 }
7615 /*
7616 * For inode marked NODATACOW or extent marked PREALLOC,
7617 * use the existing or preallocated extent, so does not
7618 * need to adjust btrfs_space_info's bytes_may_use.
7619 */
7620 btrfs_free_reserved_data_space_noquota(inode, start,
7621 len);
7622 goto skip_cow;
7623 }
7624 }
7625
7626 /* this will cow the extent */
7627 len = bh_result->b_size;
7628 free_extent_map(em);
7629 *map = em = btrfs_new_extent_direct(inode, start, len);
7630 if (IS_ERR(em)) {
7631 ret = PTR_ERR(em);
7632 goto out;
7633 }
7634
7635 len = min(len, em->len - (start - em->start));
7636
7637skip_cow:
7638 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7639 inode->i_blkbits;
7640 bh_result->b_size = len;
7641 bh_result->b_bdev = em->bdev;
7642 set_buffer_mapped(bh_result);
7643
7644 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7645 set_buffer_new(bh_result);
7646
7647 /*
7648 * Need to update the i_size under the extent lock so buffered
7649 * readers will get the updated i_size when we unlock.
7650 */
7651 if (!dio_data->overwrite && start + len > i_size_read(inode))
7652 i_size_write(inode, start + len);
7653
7654 WARN_ON(dio_data->reserve < len);
7655 dio_data->reserve -= len;
7656 dio_data->unsubmitted_oe_range_end = start + len;
7657 current->journal_info = dio_data;
7658out:
7659 return ret;
7660}
7661
7564static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7662static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7565 struct buffer_head *bh_result, int create) 7663 struct buffer_head *bh_result, int create)
7566{ 7664{
@@ -7629,7 +7727,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7629 goto unlock_err; 7727 goto unlock_err;
7630 } 7728 }
7631 7729
7632 if (!create) { 7730 if (create) {
7731 ret = btrfs_get_blocks_direct_write(&em, bh_result, inode,
7732 dio_data, start, len);
7733 if (ret < 0)
7734 goto unlock_err;
7735
7736 /* clear and unlock the entire range */
7737 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7738 unlock_bits, 1, 0, &cached_state);
7739 } else {
7633 ret = btrfs_get_blocks_direct_read(em, bh_result, inode, 7740 ret = btrfs_get_blocks_direct_read(em, bh_result, inode,
7634 start, len); 7741 start, len);
7635 /* Can be negative only if we read from a hole */ 7742 /* Can be negative only if we read from a hole */
@@ -7650,106 +7757,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7650 } else { 7757 } else {
7651 free_extent_state(cached_state); 7758 free_extent_state(cached_state);
7652 } 7759 }
7653 free_extent_map(em);
7654 return 0;
7655 } 7760 }
7656 7761
7657 /*
7658 * We don't allocate a new extent in the following cases
7659 *
7660 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7661 * existing extent.
7662 * 2) The extent is marked as PREALLOC. We're good to go here and can
7663 * just use the extent.
7664 *
7665 */
7666 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7667 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7668 em->block_start != EXTENT_MAP_HOLE)) {
7669 int type;
7670 u64 block_start, orig_start, orig_block_len, ram_bytes;
7671
7672 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7673 type = BTRFS_ORDERED_PREALLOC;
7674 else
7675 type = BTRFS_ORDERED_NOCOW;
7676 len = min(len, em->len - (start - em->start));
7677 block_start = em->block_start + (start - em->start);
7678
7679 if (can_nocow_extent(inode, start, &len, &orig_start,
7680 &orig_block_len, &ram_bytes) == 1 &&
7681 btrfs_inc_nocow_writers(fs_info, block_start)) {
7682 struct extent_map *em2;
7683
7684 em2 = btrfs_create_dio_extent(inode, start, len,
7685 orig_start, block_start,
7686 len, orig_block_len,
7687 ram_bytes, type);
7688 btrfs_dec_nocow_writers(fs_info, block_start);
7689 if (type == BTRFS_ORDERED_PREALLOC) {
7690 free_extent_map(em);
7691 em = em2;
7692 }
7693 if (em2 && IS_ERR(em2)) {
7694 ret = PTR_ERR(em2);
7695 goto unlock_err;
7696 }
7697 /*
7698 * For inode marked NODATACOW or extent marked PREALLOC,
7699 * use the existing or preallocated extent, so does not
7700 * need to adjust btrfs_space_info's bytes_may_use.
7701 */
7702 btrfs_free_reserved_data_space_noquota(inode,
7703 start, len);
7704 goto unlock;
7705 }
7706 }
7707
7708 /*
7709 * this will cow the extent, reset the len in case we changed
7710 * it above
7711 */
7712 len = bh_result->b_size;
7713 free_extent_map(em);
7714 em = btrfs_new_extent_direct(inode, start, len);
7715 if (IS_ERR(em)) {
7716 ret = PTR_ERR(em);
7717 goto unlock_err;
7718 }
7719 len = min(len, em->len - (start - em->start));
7720unlock:
7721 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7722 inode->i_blkbits;
7723 bh_result->b_size = len;
7724 bh_result->b_bdev = em->bdev;
7725 set_buffer_mapped(bh_result);
7726 if (create) {
7727 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7728 set_buffer_new(bh_result);
7729
7730 /*
7731 * Need to update the i_size under the extent lock so buffered
7732 * readers will get the updated i_size when we unlock.
7733 */
7734 if (!dio_data->overwrite && start + len > i_size_read(inode))
7735 i_size_write(inode, start + len);
7736
7737 WARN_ON(dio_data->reserve < len);
7738 dio_data->reserve -= len;
7739 dio_data->unsubmitted_oe_range_end = start + len;
7740 current->journal_info = dio_data;
7741 }
7742
7743 /*
7744 * In the case of write we need to clear and unlock the entire range,
7745 * in the case of read we need to unlock only the end area that we
7746 * aren't using if there is any left over space.
7747 */
7748 if (lockstart < lockend) {
7749 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7750 lockend, unlock_bits, 1, 0,
7751 &cached_state);
7752 }
7753 free_extent_map(em); 7762 free_extent_map(em);
7754 7763
7755 return 0; 7764 return 0;