diff options
| author | Matan Barak <matanb@mellanox.com> | 2014-07-31 04:01:29 -0400 |
|---|---|---|
| committer | Roland Dreier <roland@purestorage.com> | 2014-08-01 18:11:13 -0400 |
| commit | e630664c8383f300c4146d7613d61e5a8eb1f8e3 (patch) | |
| tree | e3ec77b2c8b34bc022d0014622e4d392a33aed43 | |
| parent | 7e6edb9b2e0bcfb2a588db390c44d120213c57ae (diff) | |
mlx4_core: Add helper functions to support MR re-registration
Add few helper functions to support a mechanism of getting an MPT,
modifying it and updating the HCA with the modified object.
The code takes 2 paths, one for directly changing the MPT (and
sometimes its related MTTs) and another one which queries the MPT and
updates the HCA via fw command SW2HW_MPT. The first path is used in
native mode; the second path is slower and is used only in SRIOV.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 160 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 26 | ||||
| -rw-r--r-- | include/linux/mlx4/device.h | 16 |
4 files changed, 202 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 1d8af7336807..b40d587974fa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
| @@ -279,6 +279,8 @@ struct mlx4_icm_table { | |||
| 279 | #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) | 279 | #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) |
| 280 | #define MLX4_MPT_FLAG_REGION (1 << 8) | 280 | #define MLX4_MPT_FLAG_REGION (1 << 8) |
| 281 | 281 | ||
| 282 | #define MLX4_MPT_PD_MASK (0x1FFFFUL) | ||
| 283 | #define MLX4_MPT_PD_VF_MASK (0xFE0000UL) | ||
| 282 | #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) | 284 | #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) |
| 283 | #define MLX4_MPT_PD_FLAG_RAE (1 << 28) | 285 | #define MLX4_MPT_PD_FLAG_RAE (1 << 28) |
| 284 | #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) | 286 | #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2839abb878a6..7d717eccb7b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
| @@ -298,6 +298,131 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
| 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 298 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 302 | struct mlx4_mpt_entry ***mpt_entry) | ||
| 303 | { | ||
| 304 | int err; | ||
| 305 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | ||
| 306 | struct mlx4_cmd_mailbox *mailbox = NULL; | ||
| 307 | |||
| 308 | /* Make sure that at this point we have single-threaded access only */ | ||
| 309 | |||
| 310 | if (mmr->enabled != MLX4_MPT_EN_HW) | ||
| 311 | return -EINVAL; | ||
| 312 | |||
| 313 | err = mlx4_HW2SW_MPT(dev, NULL, key); | ||
| 314 | |||
| 315 | if (err) { | ||
| 316 | mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); | ||
| 317 | mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); | ||
| 318 | return err; | ||
| 319 | } | ||
| 320 | |||
| 321 | mmr->enabled = MLX4_MPT_EN_SW; | ||
| 322 | |||
| 323 | if (!mlx4_is_mfunc(dev)) { | ||
| 324 | **mpt_entry = mlx4_table_find( | ||
| 325 | &mlx4_priv(dev)->mr_table.dmpt_table, | ||
| 326 | key, NULL); | ||
| 327 | } else { | ||
| 328 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 329 | if (IS_ERR_OR_NULL(mailbox)) | ||
| 330 | return PTR_ERR(mailbox); | ||
| 331 | |||
| 332 | err = mlx4_cmd_box(dev, 0, mailbox->dma, key, | ||
| 333 | 0, MLX4_CMD_QUERY_MPT, | ||
| 334 | MLX4_CMD_TIME_CLASS_B, | ||
| 335 | MLX4_CMD_WRAPPED); | ||
| 336 | |||
| 337 | if (err) | ||
| 338 | goto free_mailbox; | ||
| 339 | |||
| 340 | *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; | ||
| 341 | } | ||
| 342 | |||
| 343 | if (!(*mpt_entry) || !(**mpt_entry)) { | ||
| 344 | err = -ENOMEM; | ||
| 345 | goto free_mailbox; | ||
| 346 | } | ||
| 347 | |||
| 348 | return 0; | ||
| 349 | |||
| 350 | free_mailbox: | ||
| 351 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 352 | return err; | ||
| 353 | } | ||
| 354 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); | ||
| 355 | |||
| 356 | int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 357 | struct mlx4_mpt_entry **mpt_entry) | ||
| 358 | { | ||
| 359 | int err; | ||
| 360 | |||
| 361 | if (!mlx4_is_mfunc(dev)) { | ||
| 362 | /* Make sure any changes to this entry are flushed */ | ||
| 363 | wmb(); | ||
| 364 | |||
| 365 | *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; | ||
| 366 | |||
| 367 | /* Make sure the new status is written */ | ||
| 368 | wmb(); | ||
| 369 | |||
| 370 | err = mlx4_SYNC_TPT(dev); | ||
| 371 | } else { | ||
| 372 | int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); | ||
| 373 | |||
| 374 | struct mlx4_cmd_mailbox *mailbox = | ||
| 375 | container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, | ||
| 376 | buf); | ||
| 377 | |||
| 378 | err = mlx4_SW2HW_MPT(dev, mailbox, key); | ||
| 379 | } | ||
| 380 | |||
| 381 | mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; | ||
| 382 | if (!err) | ||
| 383 | mmr->enabled = MLX4_MPT_EN_HW; | ||
| 384 | return err; | ||
| 385 | } | ||
| 386 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); | ||
| 387 | |||
| 388 | void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, | ||
| 389 | struct mlx4_mpt_entry **mpt_entry) | ||
| 390 | { | ||
| 391 | if (mlx4_is_mfunc(dev)) { | ||
| 392 | struct mlx4_cmd_mailbox *mailbox = | ||
| 393 | container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, | ||
| 394 | buf); | ||
| 395 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 396 | } | ||
| 397 | } | ||
| 398 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); | ||
| 399 | |||
| 400 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | ||
| 401 | u32 pdn) | ||
| 402 | { | ||
| 403 | u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); | ||
| 404 | /* The wrapper function will put the slave's id here */ | ||
| 405 | if (mlx4_is_mfunc(dev)) | ||
| 406 | pd_flags &= ~MLX4_MPT_PD_VF_MASK; | ||
| 407 | mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | | ||
| 408 | (pdn & MLX4_MPT_PD_MASK) | ||
| 409 | | MLX4_MPT_PD_FLAG_EN_INV); | ||
| 410 | return 0; | ||
| 411 | } | ||
| 412 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); | ||
| 413 | |||
| 414 | int mlx4_mr_hw_change_access(struct mlx4_dev *dev, | ||
| 415 | struct mlx4_mpt_entry *mpt_entry, | ||
| 416 | u32 access) | ||
| 417 | { | ||
| 418 | u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | | ||
| 419 | (access & MLX4_PERM_MASK); | ||
| 420 | |||
| 421 | mpt_entry->flags = cpu_to_be32(flags); | ||
| 422 | return 0; | ||
| 423 | } | ||
| 424 | EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); | ||
| 425 | |||
| 301 | static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, | 426 | static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, |
| 302 | u64 iova, u64 size, u32 access, int npages, | 427 | u64 iova, u64 size, u32 access, int npages, |
| 303 | int page_shift, struct mlx4_mr *mr) | 428 | int page_shift, struct mlx4_mr *mr) |
| @@ -463,6 +588,41 @@ int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) | |||
| 463 | } | 588 | } |
| 464 | EXPORT_SYMBOL_GPL(mlx4_mr_free); | 589 | EXPORT_SYMBOL_GPL(mlx4_mr_free); |
| 465 | 590 | ||
| 591 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) | ||
| 592 | { | ||
| 593 | mlx4_mtt_cleanup(dev, &mr->mtt); | ||
| 594 | } | ||
| 595 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); | ||
| 596 | |||
| 597 | int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | ||
| 598 | u64 iova, u64 size, int npages, | ||
| 599 | int page_shift, struct mlx4_mpt_entry *mpt_entry) | ||
| 600 | { | ||
| 601 | int err; | ||
| 602 | |||
| 603 | mpt_entry->start = cpu_to_be64(mr->iova); | ||
| 604 | mpt_entry->length = cpu_to_be64(mr->size); | ||
| 605 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | ||
| 606 | |||
| 607 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | ||
| 608 | if (err) | ||
| 609 | return err; | ||
| 610 | |||
| 611 | if (mr->mtt.order < 0) { | ||
| 612 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | ||
| 613 | mpt_entry->mtt_addr = 0; | ||
| 614 | } else { | ||
| 615 | mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, | ||
| 616 | &mr->mtt)); | ||
| 617 | if (mr->mtt.page_shift == 0) | ||
| 618 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); | ||
| 619 | } | ||
| 620 | mr->enabled = MLX4_MPT_EN_SW; | ||
| 621 | |||
| 622 | return 0; | ||
| 623 | } | ||
| 624 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); | ||
| 625 | |||
| 466 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | 626 | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) |
| 467 | { | 627 | { |
| 468 | struct mlx4_cmd_mailbox *mailbox; | 628 | struct mlx4_cmd_mailbox *mailbox; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 0efc1368e5a8..1089367fed22 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -2613,12 +2613,34 @@ int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, | |||
| 2613 | if (err) | 2613 | if (err) |
| 2614 | return err; | 2614 | return err; |
| 2615 | 2615 | ||
| 2616 | if (mpt->com.from_state != RES_MPT_HW) { | 2616 | if (mpt->com.from_state == RES_MPT_MAPPED) { |
| 2617 | /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do | ||
| 2618 | * that, the VF must read the MPT. But since the MPT entry memory is not | ||
| 2619 | * in the VF's virtual memory space, it must use QUERY_MPT to obtain the | ||
| 2620 | * entry contents. To guarantee that the MPT cannot be changed, the driver | ||
| 2621 | * must perform HW2SW_MPT before this query and return the MPT entry to HW | ||
| 2622 | * ownership fofollowing the change. The change here allows the VF to | ||
| 2623 | * perform QUERY_MPT also when the entry is in SW ownership. | ||
| 2624 | */ | ||
| 2625 | struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( | ||
| 2626 | &mlx4_priv(dev)->mr_table.dmpt_table, | ||
| 2627 | mpt->key, NULL); | ||
| 2628 | |||
| 2629 | if (NULL == mpt_entry || NULL == outbox->buf) { | ||
| 2630 | err = -EINVAL; | ||
| 2631 | goto out; | ||
| 2632 | } | ||
| 2633 | |||
| 2634 | memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); | ||
| 2635 | |||
| 2636 | err = 0; | ||
| 2637 | } else if (mpt->com.from_state == RES_MPT_HW) { | ||
| 2638 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); | ||
| 2639 | } else { | ||
| 2617 | err = -EBUSY; | 2640 | err = -EBUSY; |
| 2618 | goto out; | 2641 | goto out; |
| 2619 | } | 2642 | } |
| 2620 | 2643 | ||
| 2621 | err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); | ||
| 2622 | 2644 | ||
| 2623 | out: | 2645 | out: |
| 2624 | put_res(dev, slave, id, RES_MPT); | 2646 | put_res(dev, slave, id, RES_MPT); |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 35b51e7af886..bac002167ace 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -262,6 +262,7 @@ enum { | |||
| 262 | MLX4_PERM_REMOTE_WRITE = 1 << 13, | 262 | MLX4_PERM_REMOTE_WRITE = 1 << 13, |
| 263 | MLX4_PERM_ATOMIC = 1 << 14, | 263 | MLX4_PERM_ATOMIC = 1 << 14, |
| 264 | MLX4_PERM_BIND_MW = 1 << 15, | 264 | MLX4_PERM_BIND_MW = 1 << 15, |
| 265 | MLX4_PERM_MASK = 0xFC00 | ||
| 265 | }; | 266 | }; |
| 266 | 267 | ||
| 267 | enum { | 268 | enum { |
| @@ -1243,4 +1244,19 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); | |||
| 1243 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); | 1244 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); |
| 1244 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, | 1245 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, |
| 1245 | int enable); | 1246 | int enable); |
| 1247 | int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 1248 | struct mlx4_mpt_entry ***mpt_entry); | ||
| 1249 | int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, | ||
| 1250 | struct mlx4_mpt_entry **mpt_entry); | ||
| 1251 | int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, | ||
| 1252 | u32 pdn); | ||
| 1253 | int mlx4_mr_hw_change_access(struct mlx4_dev *dev, | ||
| 1254 | struct mlx4_mpt_entry *mpt_entry, | ||
| 1255 | u32 access); | ||
| 1256 | void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, | ||
| 1257 | struct mlx4_mpt_entry **mpt_entry); | ||
| 1258 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr); | ||
| 1259 | int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | ||
| 1260 | u64 iova, u64 size, int npages, | ||
| 1261 | int page_shift, struct mlx4_mpt_entry *mpt_entry); | ||
| 1246 | #endif /* MLX4_DEVICE_H */ | 1262 | #endif /* MLX4_DEVICE_H */ |
