diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 13:12:29 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 19:58:18 -0400 |
commit | daa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch) | |
tree | be913e8e3745bb367d2ba371598f447649102cfc /fs/dax.c | |
parent | 6869b7b206595ae0e326f59719090351eb8f4f5d (diff) | |
parent | fba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff) |
Merge branch 'odp_fixes' into hmm.git
From rdma.git
Jason Gunthorpe says:
====================
This is a collection of general cleanups for ODP to clarify some of the
flows around umem creation and use of the interval tree.
====================
The branch is based on v5.3-rc5 due to dependencies, and is being taken
into hmm.git due to dependencies in the next patches.
* odp_fixes:
RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr
RDMA/mlx5: Use ib_umem_start instead of umem.address
RDMA/core: Make invalidate_range a device operation
RDMA/odp: Use kvcalloc for the dma_list and page_list
RDMA/odp: Check for overflow when computing the umem_odp end
RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
RDMA/odp: Split creating a umem_odp from ib_umem_get
RDMA/odp: Make the three ways to create a umem_odp clear
RMDA/odp: Consolidate umem_odp initialization
RDMA/odp: Make it clearer when a umem is an implicit ODP umem
RDMA/odp: Iterate over the whole rbtree directly
RDMA/odp: Use the common interval tree library instead of generic
RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) | |||
266 | static void put_unlocked_entry(struct xa_state *xas, void *entry) | 266 | static void put_unlocked_entry(struct xa_state *xas, void *entry) |
267 | { | 267 | { |
268 | /* If we were the only waiter woken, wake the next one */ | 268 | /* If we were the only waiter woken, wake the next one */ |
269 | if (entry && dax_is_conflict(entry)) | 269 | if (entry && !dax_is_conflict(entry)) |
270 | dax_wake_entry(xas, entry, false); | 270 | dax_wake_entry(xas, entry, false); |
271 | } | 271 | } |
272 | 272 | ||
@@ -600,7 +600,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) | |||
600 | * guaranteed to either see new references or prevent new | 600 | * guaranteed to either see new references or prevent new |
601 | * references from being established. | 601 | * references from being established. |
602 | */ | 602 | */ |
603 | unmap_mapping_range(mapping, 0, 0, 1); | 603 | unmap_mapping_range(mapping, 0, 0, 0); |
604 | 604 | ||
605 | xas_lock_irq(&xas); | 605 | xas_lock_irq(&xas); |
606 | xas_for_each(&xas, entry, ULONG_MAX) { | 606 | xas_for_each(&xas, entry, ULONG_MAX) { |