aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ordered-data.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-16 18:12:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-16 18:12:56 -0400
commita18f8775419d3df282dd83efdb51c5a64d092f31 (patch)
tree1e0abc5c1d30e8bc58dc23099017eca496992fd2 /fs/btrfs/ordered-data.c
parent3eb514866f20c5eb74637279774b6d73b855480a (diff)
parente02d48eaaed77f6c36916a7aa65c451e1f9d9aab (diff)
Merge tag 'for-5.3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "Highlights: - chunks that have been trimmed and unchanged since last mount are tracked and skipped on repeated trims - use hw assissed crc32c on more arches, speedups if native instructions or optimized implementation is available - the RAID56 incompat bit is automatically removed when the last block group of that type is removed Fixes: - fsync fix for reflink on NODATACOW files that could lead to ENOSPC - fix data loss after inode eviction, renaming it, and fsync it - fix fsync not persisting dentry deletions due to inode evictions - update ctime/mtime/iversion after hole punching - fix compression type validation (reported by KASAN) - send won't be allowed to start when relocation is in progress, this can cause spurious errors or produce incorrect send stream Core: - new tracepoints for space update - tree-checker: better check for end of extents for some tree items - preparatory work for more checksum algorithms - run delayed iput at unlink time and don't push the work to cleaner thread where it's not properly throttled - wrap block mapping to structures and helpers, base for further refactoring - split large files, part 1: - space info handling - block group reservations - delayed refs - delayed allocation - other cleanups and refactoring" * tag 'for-5.3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (103 commits) btrfs: fix memory leak of path on error return path btrfs: move the subvolume reservation stuff out of extent-tree.c btrfs: migrate the delalloc space stuff to it's own home btrfs: migrate btrfs_trans_release_chunk_metadata btrfs: migrate the delayed refs rsv code btrfs: Evaluate io_tree in find_lock_delalloc_range() btrfs: migrate the global_block_rsv helpers to block-rsv.c btrfs: migrate the block-rsv code to block-rsv.c btrfs: stop using block_rsv_release_bytes everywhere btrfs: cleanup the target logic in __btrfs_block_rsv_release btrfs: export __btrfs_block_rsv_release btrfs: export btrfs_block_rsv_add_bytes btrfs: move btrfs_block_rsv definitions into it's own header btrfs: Simplify update of space_info in __reserve_metadata_bytes() btrfs: unexport can_overcommit btrfs: move reserve_metadata_bytes and supporting code to space-info.c btrfs: move dump_space_info to space-info.c btrfs: export block_rsv_use_bytes btrfs: move btrfs_space_info_add_*_bytes to space-info.c btrfs: move the space info update macro to space-info.h ...
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r--fs/btrfs/ordered-data.c56
1 files changed, 52 insertions, 4 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 52889da69113..1744ba8b2754 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -13,6 +13,7 @@
13#include "extent_io.h" 13#include "extent_io.h"
14#include "disk-io.h" 14#include "disk-io.h"
15#include "compression.h" 15#include "compression.h"
16#include "delalloc-space.h"
16 17
17static struct kmem_cache *btrfs_ordered_extent_cache; 18static struct kmem_cache *btrfs_ordered_extent_cache;
18 19
@@ -924,14 +925,16 @@ out:
924 * be reclaimed before their checksum is actually put into the btree 925 * be reclaimed before their checksum is actually put into the btree
925 */ 926 */
926int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, 927int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
927 u32 *sum, int len) 928 u8 *sum, int len)
928{ 929{
930 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
929 struct btrfs_ordered_sum *ordered_sum; 931 struct btrfs_ordered_sum *ordered_sum;
930 struct btrfs_ordered_extent *ordered; 932 struct btrfs_ordered_extent *ordered;
931 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 933 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
932 unsigned long num_sectors; 934 unsigned long num_sectors;
933 unsigned long i; 935 unsigned long i;
934 u32 sectorsize = btrfs_inode_sectorsize(inode); 936 u32 sectorsize = btrfs_inode_sectorsize(inode);
937 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
935 int index = 0; 938 int index = 0;
936 939
937 ordered = btrfs_lookup_ordered_extent(inode, offset); 940 ordered = btrfs_lookup_ordered_extent(inode, offset);
@@ -947,10 +950,10 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
947 num_sectors = ordered_sum->len >> 950 num_sectors = ordered_sum->len >>
948 inode->i_sb->s_blocksize_bits; 951 inode->i_sb->s_blocksize_bits;
949 num_sectors = min_t(int, len - index, num_sectors - i); 952 num_sectors = min_t(int, len - index, num_sectors - i);
950 memcpy(sum + index, ordered_sum->sums + i, 953 memcpy(sum + index, ordered_sum->sums + i * csum_size,
951 num_sectors); 954 num_sectors * csum_size);
952 955
953 index += (int)num_sectors; 956 index += (int)num_sectors * csum_size;
954 if (index == len) 957 if (index == len)
955 goto out; 958 goto out;
956 disk_bytenr += num_sectors * sectorsize; 959 disk_bytenr += num_sectors * sectorsize;
@@ -962,6 +965,51 @@ out:
962 return index; 965 return index;
963} 966}
964 967
968/*
969 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
970 * ordered extents in it are run to completion.
971 *
972 * @tree: IO tree used for locking out other users of the range
973 * @inode: Inode whose ordered tree is to be searched
974 * @start: Beginning of range to flush
975 * @end: Last byte of range to lock
976 * @cached_state: If passed, will return the extent state responsible for the
977 * locked range. It's the caller's responsibility to free the cached state.
978 *
979 * This function always returns with the given range locked, ensuring after it's
980 * called no order extent can be pending.
981 */
982void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
983 struct btrfs_inode *inode, u64 start,
984 u64 end,
985 struct extent_state **cached_state)
986{
987 struct btrfs_ordered_extent *ordered;
988 struct extent_state *cachedp = NULL;
989
990 if (cached_state)
991 cachedp = *cached_state;
992
993 while (1) {
994 lock_extent_bits(tree, start, end, &cachedp);
995 ordered = btrfs_lookup_ordered_range(inode, start,
996 end - start + 1);
997 if (!ordered) {
998 /*
999 * If no external cached_state has been passed then
1000 * decrement the extra ref taken for cachedp since we
1001 * aren't exposing it outside of this function
1002 */
1003 if (!cached_state)
1004 refcount_dec(&cachedp->refs);
1005 break;
1006 }
1007 unlock_extent_cached(tree, start, end, &cachedp);
1008 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1009 btrfs_put_ordered_extent(ordered);
1010 }
1011}
1012
965int __init ordered_data_init(void) 1013int __init ordered_data_init(void)
966{ 1014{
967 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1015 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",