diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 13:12:29 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-21 19:58:18 -0400 |
commit | daa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch) | |
tree | be913e8e3745bb367d2ba371598f447649102cfc /net/dsa/slave.c | |
parent | 6869b7b206595ae0e326f59719090351eb8f4f5d (diff) | |
parent | fba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff) |
Merge branch 'odp_fixes' into hmm.git
From rdma.git
Jason Gunthorpe says:
====================
This is a collection of general cleanups for ODP to clarify some of the
flows around umem creation and use of the interval tree.
====================
The branch is based on v5.3-rc5 due to dependencies, and is being taken
into hmm.git due to dependencies in the next patches.
* odp_fixes:
RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr
RDMA/mlx5: Use ib_umem_start instead of umem.address
RDMA/core: Make invalidate_range a device operation
RDMA/odp: Use kvcalloc for the dma_list and page_list
RDMA/odp: Check for overflow when computing the umem_odp end
RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
RDMA/odp: Split creating a umem_odp from ib_umem_get
RDMA/odp: Make the three ways to create a umem_odp clear
RMDA/odp: Consolidate umem_odp initialization
RDMA/odp: Make it clearer when a umem is an implicit ODP umem
RDMA/odp: Iterate over the whole rbtree directly
RDMA/odp: Use the common interval tree library instead of generic
RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'net/dsa/slave.c')
-rw-r--r-- | net/dsa/slave.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 614c38ece104..33f41178afcc 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -951,7 +951,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, | |||
951 | struct flow_block_offload *f) | 951 | struct flow_block_offload *f) |
952 | { | 952 | { |
953 | struct flow_block_cb *block_cb; | 953 | struct flow_block_cb *block_cb; |
954 | tc_setup_cb_t *cb; | 954 | flow_setup_cb_t *cb; |
955 | 955 | ||
956 | if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) | 956 | if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
957 | cb = dsa_slave_setup_tc_block_cb_ig; | 957 | cb = dsa_slave_setup_tc_block_cb_ig; |
@@ -967,7 +967,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, | |||
967 | if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) | 967 | if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) |
968 | return -EBUSY; | 968 | return -EBUSY; |
969 | 969 | ||
970 | block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL); | 970 | block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); |
971 | if (IS_ERR(block_cb)) | 971 | if (IS_ERR(block_cb)) |
972 | return PTR_ERR(block_cb); | 972 | return PTR_ERR(block_cb); |
973 | 973 | ||
@@ -975,7 +975,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev, | |||
975 | list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); | 975 | list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); |
976 | return 0; | 976 | return 0; |
977 | case FLOW_BLOCK_UNBIND: | 977 | case FLOW_BLOCK_UNBIND: |
978 | block_cb = flow_block_cb_lookup(f, cb, dev); | 978 | block_cb = flow_block_cb_lookup(f->block, cb, dev); |
979 | if (!block_cb) | 979 | if (!block_cb) |
980 | return -ENOENT; | 980 | return -ENOENT; |
981 | 981 | ||