diff options
author | Eli Cohen <eli@dev.mellanox.co.il> | 2014-01-30 06:49:48 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-02-07 02:00:48 -0500 |
commit | 78c0f98cc9dd46824fa66f35f14ea24ba733d145 (patch) | |
tree | 16954ba1b3c48438748dcb954c01695eb3902005 /drivers/infiniband/hw/mlx5/main.c | |
parent | 9e65dc371b5c8d7476c81353137efc13cc1bdabd (diff) |
IB/mlx5: Fix binary compatibility with libmlx5
Commit c1be5232d21d ("Fix micro UAR allocator") broke binary compatibility
between libmlx5 and mlx5_ib since it defines a different value to the number
of micro UARs per page, leading to wrong calculation in libmlx5. This patch
defines struct mlx5_ib_alloc_ucontext_req_v2 as an extension to struct
mlx5_ib_alloc_ucontext_req. The extended size is determined in mlx5_ib_alloc_ucontext()
and in case of old library we use uuarn 0 which works fine -- this is
acheived due to create_user_qp() falling back from high to medium then to
low class where low class will return 0. For new libraries we use the
more sophisticated allocation algorithm.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Reviewed-by: Yann Droneaud <ydroneaud@opteya.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9660d093f8cf..f4ef4a24d410 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -536,24 +536,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
536 | struct ib_udata *udata) | 536 | struct ib_udata *udata) |
537 | { | 537 | { |
538 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 538 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
539 | struct mlx5_ib_alloc_ucontext_req req; | 539 | struct mlx5_ib_alloc_ucontext_req_v2 req; |
540 | struct mlx5_ib_alloc_ucontext_resp resp; | 540 | struct mlx5_ib_alloc_ucontext_resp resp; |
541 | struct mlx5_ib_ucontext *context; | 541 | struct mlx5_ib_ucontext *context; |
542 | struct mlx5_uuar_info *uuari; | 542 | struct mlx5_uuar_info *uuari; |
543 | struct mlx5_uar *uars; | 543 | struct mlx5_uar *uars; |
544 | int gross_uuars; | 544 | int gross_uuars; |
545 | int num_uars; | 545 | int num_uars; |
546 | int ver; | ||
546 | int uuarn; | 547 | int uuarn; |
547 | int err; | 548 | int err; |
548 | int i; | 549 | int i; |
550 | int reqlen; | ||
549 | 551 | ||
550 | if (!dev->ib_active) | 552 | if (!dev->ib_active) |
551 | return ERR_PTR(-EAGAIN); | 553 | return ERR_PTR(-EAGAIN); |
552 | 554 | ||
553 | err = ib_copy_from_udata(&req, udata, sizeof(req)); | 555 | memset(&req, 0, sizeof(req)); |
556 | reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); | ||
557 | if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | ||
558 | ver = 0; | ||
559 | else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) | ||
560 | ver = 2; | ||
561 | else | ||
562 | return ERR_PTR(-EINVAL); | ||
563 | |||
564 | err = ib_copy_from_udata(&req, udata, reqlen); | ||
554 | if (err) | 565 | if (err) |
555 | return ERR_PTR(err); | 566 | return ERR_PTR(err); |
556 | 567 | ||
568 | if (req.flags || req.reserved) | ||
569 | return ERR_PTR(-EINVAL); | ||
570 | |||
557 | if (req.total_num_uuars > MLX5_MAX_UUARS) | 571 | if (req.total_num_uuars > MLX5_MAX_UUARS) |
558 | return ERR_PTR(-ENOMEM); | 572 | return ERR_PTR(-ENOMEM); |
559 | 573 | ||
@@ -626,6 +640,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
626 | if (err) | 640 | if (err) |
627 | goto out_uars; | 641 | goto out_uars; |
628 | 642 | ||
643 | uuari->ver = ver; | ||
629 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; | 644 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; |
630 | uuari->uars = uars; | 645 | uuari->uars = uars; |
631 | uuari->num_uars = num_uars; | 646 | uuari->num_uars = num_uars; |