aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r--drivers/infiniband/hw/mlx5/main.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 306534109627..bf900579ac08 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -46,8 +46,8 @@
46#include "mlx5_ib.h" 46#include "mlx5_ib.h"
47 47
48#define DRIVER_NAME "mlx5_ib" 48#define DRIVER_NAME "mlx5_ib"
49#define DRIVER_VERSION "1.0" 49#define DRIVER_VERSION "2.2-1"
50#define DRIVER_RELDATE "June 2013" 50#define DRIVER_RELDATE "Feb 2014"
51 51
52MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 52MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
53MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 53MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
262 IB_DEVICE_PORT_ACTIVE_EVENT | 262 IB_DEVICE_PORT_ACTIVE_EVENT |
263 IB_DEVICE_SYS_IMAGE_GUID | 263 IB_DEVICE_SYS_IMAGE_GUID |
264 IB_DEVICE_RC_RNR_NAK_GEN | 264 IB_DEVICE_RC_RNR_NAK_GEN;
265 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
266 flags = dev->mdev.caps.flags; 265 flags = dev->mdev.caps.flags;
267 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 266 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
268 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 267 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,34 +535,51 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
536 struct ib_udata *udata) 535 struct ib_udata *udata)
537{ 536{
538 struct mlx5_ib_dev *dev = to_mdev(ibdev); 537 struct mlx5_ib_dev *dev = to_mdev(ibdev);
539 struct mlx5_ib_alloc_ucontext_req req; 538 struct mlx5_ib_alloc_ucontext_req_v2 req;
540 struct mlx5_ib_alloc_ucontext_resp resp; 539 struct mlx5_ib_alloc_ucontext_resp resp;
541 struct mlx5_ib_ucontext *context; 540 struct mlx5_ib_ucontext *context;
542 struct mlx5_uuar_info *uuari; 541 struct mlx5_uuar_info *uuari;
543 struct mlx5_uar *uars; 542 struct mlx5_uar *uars;
543 int gross_uuars;
544 int num_uars; 544 int num_uars;
545 int ver;
545 int uuarn; 546 int uuarn;
546 int err; 547 int err;
547 int i; 548 int i;
549 int reqlen;
548 550
549 if (!dev->ib_active) 551 if (!dev->ib_active)
550 return ERR_PTR(-EAGAIN); 552 return ERR_PTR(-EAGAIN);
551 553
552 err = ib_copy_from_udata(&req, udata, sizeof(req)); 554 memset(&req, 0, sizeof(req));
555 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
556 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
557 ver = 0;
558 else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
559 ver = 2;
560 else
561 return ERR_PTR(-EINVAL);
562
563 err = ib_copy_from_udata(&req, udata, reqlen);
553 if (err) 564 if (err)
554 return ERR_PTR(err); 565 return ERR_PTR(err);
555 566
567 if (req.flags || req.reserved)
568 return ERR_PTR(-EINVAL);
569
556 if (req.total_num_uuars > MLX5_MAX_UUARS) 570 if (req.total_num_uuars > MLX5_MAX_UUARS)
557 return ERR_PTR(-ENOMEM); 571 return ERR_PTR(-ENOMEM);
558 572
559 if (req.total_num_uuars == 0) 573 if (req.total_num_uuars == 0)
560 return ERR_PTR(-EINVAL); 574 return ERR_PTR(-EINVAL);
561 575
562 req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE); 576 req.total_num_uuars = ALIGN(req.total_num_uuars,
577 MLX5_NON_FP_BF_REGS_PER_PAGE);
563 if (req.num_low_latency_uuars > req.total_num_uuars - 1) 578 if (req.num_low_latency_uuars > req.total_num_uuars - 1)
564 return ERR_PTR(-EINVAL); 579 return ERR_PTR(-EINVAL);
565 580
566 num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE; 581 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
582 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
567 resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; 583 resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
568 resp.bf_reg_size = dev->mdev.caps.bf_reg_size; 584 resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
569 resp.cache_line_size = L1_CACHE_BYTES; 585 resp.cache_line_size = L1_CACHE_BYTES;
@@ -585,7 +601,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
585 goto out_ctx; 601 goto out_ctx;
586 } 602 }
587 603
588 uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars), 604 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
589 sizeof(*uuari->bitmap), 605 sizeof(*uuari->bitmap),
590 GFP_KERNEL); 606 GFP_KERNEL);
591 if (!uuari->bitmap) { 607 if (!uuari->bitmap) {
@@ -595,13 +611,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
595 /* 611 /*
596 * clear all fast path uuars 612 * clear all fast path uuars
597 */ 613 */
598 for (i = 0; i < req.total_num_uuars; i++) { 614 for (i = 0; i < gross_uuars; i++) {
599 uuarn = i & 3; 615 uuarn = i & 3;
600 if (uuarn == 2 || uuarn == 3) 616 if (uuarn == 2 || uuarn == 3)
601 set_bit(i, uuari->bitmap); 617 set_bit(i, uuari->bitmap);
602 } 618 }
603 619
604 uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL); 620 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
605 if (!uuari->count) { 621 if (!uuari->count) {
606 err = -ENOMEM; 622 err = -ENOMEM;
607 goto out_bitmap; 623 goto out_bitmap;
@@ -623,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
623 if (err) 639 if (err)
624 goto out_uars; 640 goto out_uars;
625 641
642 uuari->ver = ver;
626 uuari->num_low_latency_uuars = req.num_low_latency_uuars; 643 uuari->num_low_latency_uuars = req.num_low_latency_uuars;
627 uuari->uars = uars; 644 uuari->uars = uars;
628 uuari->num_uars = num_uars; 645 uuari->num_uars = num_uars;