diff options
author | Matan Barak <matanb@mellanox.com> | 2014-09-11 06:18:37 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-09-22 11:47:47 -0400 |
commit | 4ff0acca7344c93fd9ed778b4c3ce16d95c594e4 (patch) | |
tree | 10db33af6a400a7863ee1868058ed05d55823cd8 | |
parent | 50e2ec9105fe1045298924a6c7b9ce870a0b6c13 (diff) |
mlx4: Correct error flows in rereg_mr
This patch addresses feedback from Sagi Grimberg on the rereg_mr
implementation of mlx4. The following are fixed:
1. Set the correct pd_flags
2. Make sure we change the iova and size MR fields only after
successful write and allocation of the MTTs.
3. Make the error checking more robust
Fixes: e630664c8383 ("mlx4_core: Add helper functions to support MR re-registration")
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 33 |
2 files changed, 27 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 9b0e80e59b08..8f9325cfc85d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
234 | 0); | 234 | 0); |
235 | if (IS_ERR(mmr->umem)) { | 235 | if (IS_ERR(mmr->umem)) { |
236 | err = PTR_ERR(mmr->umem); | 236 | err = PTR_ERR(mmr->umem); |
237 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ | ||
237 | mmr->umem = NULL; | 238 | mmr->umem = NULL; |
238 | goto release_mpt_entry; | 239 | goto release_mpt_entry; |
239 | } | 240 | } |
240 | n = ib_umem_page_count(mmr->umem); | 241 | n = ib_umem_page_count(mmr->umem); |
241 | shift = ilog2(mmr->umem->page_size); | 242 | shift = ilog2(mmr->umem->page_size); |
242 | 243 | ||
243 | mmr->mmr.iova = virt_addr; | ||
244 | mmr->mmr.size = length; | ||
245 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, | 244 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
246 | virt_addr, length, n, shift, | 245 | virt_addr, length, n, shift, |
247 | *pmpt_entry); | 246 | *pmpt_entry); |
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
249 | ib_umem_release(mmr->umem); | 248 | ib_umem_release(mmr->umem); |
250 | goto release_mpt_entry; | 249 | goto release_mpt_entry; |
251 | } | 250 | } |
251 | mmr->mmr.iova = virt_addr; | ||
252 | mmr->mmr.size = length; | ||
252 | 253 | ||
253 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); | 254 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); |
254 | if (err) { | 255 | if (err) { |
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, | |||
262 | * return a failure. But dereg_mr will free the resources. | 263 | * return a failure. But dereg_mr will free the resources. |
263 | */ | 264 | */ |
264 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); | 265 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); |
266 | if (!err && flags & IB_MR_REREG_ACCESS) | ||
267 | mmr->mmr.access = mr_access_flags; | ||
265 | 268 | ||
266 | release_mpt_entry: | 269 | release_mpt_entry: |
267 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); | 270 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 7d717eccb7b0..193a6adb5d04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
299 | } | 299 | } |
300 | 300 | ||
301 | /* Must protect against concurrent access */ | ||
301 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | 302 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, |
302 | struct mlx4_mpt_entry ***mpt_entry) | 303 | struct mlx4_mpt_entry ***mpt_entry) |
303 | { | 304 | { |
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
305 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | 306 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); |
306 | struct mlx4_cmd_mailbox *mailbox = NULL; | 307 | struct mlx4_cmd_mailbox *mailbox = NULL; |
307 | 308 | ||
308 | /* Make sure that at this point we have single-threaded access only */ | ||
309 | |||
310 | if (mmr->enabled != MLX4_MPT_EN_HW) | 309 | if (mmr->enabled != MLX4_MPT_EN_HW) |
311 | return -EINVAL; | 310 | return -EINVAL; |
312 | 311 | ||
313 | err = mlx4_HW2SW_MPT(dev, NULL, key); | 312 | err = mlx4_HW2SW_MPT(dev, NULL, key); |
314 | |||
315 | if (err) { | 313 | if (err) { |
316 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | 314 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); |
317 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | 315 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); |
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
333 | 0, MLX4_CMD_QUERY_MPT, | 331 | 0, MLX4_CMD_QUERY_MPT, |
334 | MLX4_CMD_TIME_CLASS_B, | 332 | MLX4_CMD_TIME_CLASS_B, |
335 | MLX4_CMD_WRAPPED); | 333 | MLX4_CMD_WRAPPED); |
336 | |||
337 | if (err) | 334 | if (err) |
338 | goto free_mailbox; | 335 | goto free_mailbox; |
339 | 336 | ||
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | |||
378 | err = mlx4_SW2HW_MPT(dev, mailbox, key); | 375 | err = mlx4_SW2HW_MPT(dev, mailbox, key); |
379 | } | 376 | } |
380 | 377 | ||
381 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | 378 | if (!err) { |
382 | if (!err) | 379 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; |
383 | mmr->enabled = MLX4_MPT_EN_HW; | 380 | mmr->enabled = MLX4_MPT_EN_HW; |
381 | } | ||
384 | return err; | 382 | return err; |
385 | } | 383 | } |
386 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | 384 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); |
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | |||
400 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | 398 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, |
401 | u32 pdn) | 399 | u32 pdn) |
402 | { | 400 | { |
403 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); | 401 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; |
404 | /* The wrapper function will put the slave's id here */ | 402 | /* The wrapper function will put the slave's id here */ |
405 | if (mlx4_is_mfunc(dev)) | 403 | if (mlx4_is_mfunc(dev)) |
406 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | 404 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; |
407 | mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | | 405 | |
406 | mpt_entry->pd_flags = cpu_to_be32(pd_flags | | ||
408 | (pdn & MLX4_MPT_PD_MASK) | 407 | (pdn & MLX4_MPT_PD_MASK) |
409 | | MLX4_MPT_PD_FLAG_EN_INV); | 408 | | MLX4_MPT_PD_FLAG_EN_INV); |
410 | return 0; | 409 | return 0; |
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
600 | { | 599 | { |
601 | int err; | 600 | int err; |
602 | 601 | ||
603 | mpt_entry->start = cpu_to_be64(mr->iova); | 602 | mpt_entry->start = cpu_to_be64(iova); |
604 | mpt_entry->length = cpu_to_be64(mr->size); | 603 | mpt_entry->length = cpu_to_be64(size); |
605 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | 604 | mpt_entry->entity_size = cpu_to_be32(page_shift); |
606 | 605 | ||
607 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | 606 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
608 | if (err) | 607 | if (err) |
609 | return err; | 608 | return err; |
610 | 609 | ||
610 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | | ||
611 | MLX4_MPT_PD_FLAG_EN_INV); | ||
612 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | | ||
613 | MLX4_MPT_FLAG_SW_OWNS); | ||
611 | if (mr->mtt.order < 0) { | 614 | if (mr->mtt.order < 0) { |
612 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | 615 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); |
613 | mpt_entry->mtt_addr = 0; | 616 | mpt_entry->mtt_addr = 0; |
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
617 | if (mr->mtt.page_shift == 0) | 620 | if (mr->mtt.page_shift == 0) |
618 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | 621 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); |
619 | } | 622 | } |
623 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | ||
624 | /* fast register MR in free state */ | ||
625 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | ||
626 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | ||
627 | MLX4_MPT_PD_FLAG_RAE); | ||
628 | } else { | ||
629 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | ||
630 | } | ||
620 | mr->enabled = MLX4_MPT_EN_SW; | 631 | mr->enabled = MLX4_MPT_EN_SW; |
621 | 632 | ||
622 | return 0; | 633 | return 0; |