summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-08-21 13:12:29 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-08-21 19:58:18 -0400
commitdaa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch)
treebe913e8e3745bb367d2ba371598f447649102cfc /fs/btrfs/inode.c
parent6869b7b206595ae0e326f59719090351eb8f4f5d (diff)
parentfba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff)
Merge branch 'odp_fixes' into hmm.git
From rdma.git Jason Gunthorpe says: ==================== This is a collection of general cleanups for ODP to clarify some of the flows around umem creation and use of the interval tree. ==================== The branch is based on v5.3-rc5 due to dependencies, and is being taken into hmm.git due to dependencies in the next patches. * odp_fixes: RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr RDMA/mlx5: Use ib_umem_start instead of umem.address RDMA/core: Make invalidate_range a device operation RDMA/odp: Use kvcalloc for the dma_list and page_list RDMA/odp: Check for overflow when computing the umem_odp end RDMA/odp: Provide ib_umem_odp_release() to undo the allocs RDMA/odp: Split creating a umem_odp from ib_umem_get RDMA/odp: Make the three ways to create a umem_odp clear RMDA/odp: Consolidate umem_odp initialization RDMA/odp: Make it clearer when a umem is an implicit ODP umem RDMA/odp: Iterate over the whole rbtree directly RDMA/odp: Use the common interval tree library instead of generic RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1af069a9a0c7..ee582a36653d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -395,10 +395,31 @@ static noinline int add_async_extent(struct async_chunk *cow,
395 return 0; 395 return 0;
396} 396}
397 397
398/*
399 * Check if the inode has flags compatible with compression
400 */
401static inline bool inode_can_compress(struct inode *inode)
402{
403 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
404 BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
405 return false;
406 return true;
407}
408
409/*
410 * Check if the inode needs to be submitted to compression, based on mount
411 * options, defragmentation, properties or heuristics.
412 */
398static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) 413static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
399{ 414{
400 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 415 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
401 416
417 if (!inode_can_compress(inode)) {
418 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
419 KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
420 btrfs_ino(BTRFS_I(inode)));
421 return 0;
422 }
402 /* force compress */ 423 /* force compress */
403 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 424 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
404 return 1; 425 return 1;
@@ -1631,7 +1652,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
1631 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1652 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1632 ret = run_delalloc_nocow(inode, locked_page, start, end, 1653 ret = run_delalloc_nocow(inode, locked_page, start, end,
1633 page_started, 0, nr_written); 1654 page_started, 0, nr_written);
1634 } else if (!inode_need_compress(inode, start, end)) { 1655 } else if (!inode_can_compress(inode) ||
1656 !inode_need_compress(inode, start, end)) {
1635 ret = cow_file_range(inode, locked_page, start, end, end, 1657 ret = cow_file_range(inode, locked_page, start, end, end,
1636 page_started, nr_written, 1, NULL); 1658 page_started, nr_written, 1, NULL);
1637 } else { 1659 } else {