diff options
author | Eli Cohen <eli@dev.mellanox.co.il> | 2014-01-14 10:45:18 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-01-23 02:23:50 -0500 |
commit | bde51583f49bd87e452e9504d489926638046b11 (patch) | |
tree | 4b2f685b9c06304c03711d78110e98807820660d | |
parent | 3bdb31f688276505ede23280885948e934304674 (diff) |
IB/mlx5: Add support for resize CQ
Implement resize CQ which is a mandatory verb in mlx5.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 282 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/user.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/cq.c | 4 | ||||
-rw-r--r-- | include/linux/mlx5/cq.h | 12 | ||||
-rw-r--r-- | include/linux/mlx5/device.h | 2 |
6 files changed, 284 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b4c122eab484..50b03a8067e5 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -73,14 +73,24 @@ static void *get_cqe(struct mlx5_ib_cq *cq, int n) | |||
73 | return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); | 73 | return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); |
74 | } | 74 | } |
75 | 75 | ||
76 | static u8 sw_ownership_bit(int n, int nent) | ||
77 | { | ||
78 | return (n & nent) ? 1 : 0; | ||
79 | } | ||
80 | |||
76 | static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) | 81 | static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) |
77 | { | 82 | { |
78 | void *cqe = get_cqe(cq, n & cq->ibcq.cqe); | 83 | void *cqe = get_cqe(cq, n & cq->ibcq.cqe); |
79 | struct mlx5_cqe64 *cqe64; | 84 | struct mlx5_cqe64 *cqe64; |
80 | 85 | ||
81 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | 86 | cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; |
82 | return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ | 87 | |
83 | !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; | 88 | if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && |
89 | !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { | ||
90 | return cqe; | ||
91 | } else { | ||
92 | return NULL; | ||
93 | } | ||
84 | } | 94 | } |
85 | 95 | ||
86 | static void *next_cqe_sw(struct mlx5_ib_cq *cq) | 96 | static void *next_cqe_sw(struct mlx5_ib_cq *cq) |
@@ -351,6 +361,11 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | |||
351 | qp->sq.last_poll = tail; | 361 | qp->sq.last_poll = tail; |
352 | } | 362 | } |
353 | 363 | ||
364 | static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) | ||
365 | { | ||
366 | mlx5_buf_free(&dev->mdev, &buf->buf); | ||
367 | } | ||
368 | |||
354 | static int mlx5_poll_one(struct mlx5_ib_cq *cq, | 369 | static int mlx5_poll_one(struct mlx5_ib_cq *cq, |
355 | struct mlx5_ib_qp **cur_qp, | 370 | struct mlx5_ib_qp **cur_qp, |
356 | struct ib_wc *wc) | 371 | struct ib_wc *wc) |
@@ -366,6 +381,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, | |||
366 | void *cqe; | 381 | void *cqe; |
367 | int idx; | 382 | int idx; |
368 | 383 | ||
384 | repoll: | ||
369 | cqe = next_cqe_sw(cq); | 385 | cqe = next_cqe_sw(cq); |
370 | if (!cqe) | 386 | if (!cqe) |
371 | return -EAGAIN; | 387 | return -EAGAIN; |
@@ -379,7 +395,18 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, | |||
379 | */ | 395 | */ |
380 | rmb(); | 396 | rmb(); |
381 | 397 | ||
382 | /* TBD: resize CQ */ | 398 | opcode = cqe64->op_own >> 4; |
399 | if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { | ||
400 | if (likely(cq->resize_buf)) { | ||
401 | free_cq_buf(dev, &cq->buf); | ||
402 | cq->buf = *cq->resize_buf; | ||
403 | kfree(cq->resize_buf); | ||
404 | cq->resize_buf = NULL; | ||
405 | goto repoll; | ||
406 | } else { | ||
407 | mlx5_ib_warn(dev, "unexpected resize cqe\n"); | ||
408 | } | ||
409 | } | ||
383 | 410 | ||
384 | qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; | 411 | qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; |
385 | if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { | 412 | if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { |
@@ -398,7 +425,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, | |||
398 | } | 425 | } |
399 | 426 | ||
400 | wc->qp = &(*cur_qp)->ibqp; | 427 | wc->qp = &(*cur_qp)->ibqp; |
401 | opcode = cqe64->op_own >> 4; | ||
402 | switch (opcode) { | 428 | switch (opcode) { |
403 | case MLX5_CQE_REQ: | 429 | case MLX5_CQE_REQ: |
404 | wq = &(*cur_qp)->sq; | 430 | wq = &(*cur_qp)->sq; |
@@ -503,15 +529,11 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, | |||
503 | return err; | 529 | return err; |
504 | 530 | ||
505 | buf->cqe_size = cqe_size; | 531 | buf->cqe_size = cqe_size; |
532 | buf->nent = nent; | ||
506 | 533 | ||
507 | return 0; | 534 | return 0; |
508 | } | 535 | } |
509 | 536 | ||
510 | static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) | ||
511 | { | ||
512 | mlx5_buf_free(&dev->mdev, &buf->buf); | ||
513 | } | ||
514 | |||
515 | static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, | 537 | static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, |
516 | struct ib_ucontext *context, struct mlx5_ib_cq *cq, | 538 | struct ib_ucontext *context, struct mlx5_ib_cq *cq, |
517 | int entries, struct mlx5_create_cq_mbox_in **cqb, | 539 | int entries, struct mlx5_create_cq_mbox_in **cqb, |
@@ -576,16 +598,16 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) | |||
576 | ib_umem_release(cq->buf.umem); | 598 | ib_umem_release(cq->buf.umem); |
577 | } | 599 | } |
578 | 600 | ||
579 | static void init_cq_buf(struct mlx5_ib_cq *cq, int nent) | 601 | static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) |
580 | { | 602 | { |
581 | int i; | 603 | int i; |
582 | void *cqe; | 604 | void *cqe; |
583 | struct mlx5_cqe64 *cqe64; | 605 | struct mlx5_cqe64 *cqe64; |
584 | 606 | ||
585 | for (i = 0; i < nent; i++) { | 607 | for (i = 0; i < buf->nent; i++) { |
586 | cqe = get_cqe(cq, i); | 608 | cqe = get_cqe_from_buf(buf, i, buf->cqe_size); |
587 | cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64; | 609 | cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; |
588 | cqe64->op_own = 0xf1; | 610 | cqe64->op_own = MLX5_CQE_INVALID << 4; |
589 | } | 611 | } |
590 | } | 612 | } |
591 | 613 | ||
@@ -610,7 +632,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |||
610 | if (err) | 632 | if (err) |
611 | goto err_db; | 633 | goto err_db; |
612 | 634 | ||
613 | init_cq_buf(cq, entries); | 635 | init_cq_buf(cq, &cq->buf); |
614 | 636 | ||
615 | *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; | 637 | *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; |
616 | *cqb = mlx5_vzalloc(*inlen); | 638 | *cqb = mlx5_vzalloc(*inlen); |
@@ -836,7 +858,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |||
836 | in->ctx.cq_period = cpu_to_be16(cq_period); | 858 | in->ctx.cq_period = cpu_to_be16(cq_period); |
837 | in->ctx.cq_max_count = cpu_to_be16(cq_count); | 859 | in->ctx.cq_max_count = cpu_to_be16(cq_count); |
838 | in->field_select = cpu_to_be32(fsel); | 860 | in->field_select = cpu_to_be32(fsel); |
839 | err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in); | 861 | err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in)); |
840 | kfree(in); | 862 | kfree(in); |
841 | 863 | ||
842 | if (err) | 864 | if (err) |
@@ -845,9 +867,235 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |||
845 | return err; | 867 | return err; |
846 | } | 868 | } |
847 | 869 | ||
870 | static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | ||
871 | int entries, struct ib_udata *udata, int *npas, | ||
872 | int *page_shift, int *cqe_size) | ||
873 | { | ||
874 | struct mlx5_ib_resize_cq ucmd; | ||
875 | struct ib_umem *umem; | ||
876 | int err; | ||
877 | int npages; | ||
878 | struct ib_ucontext *context = cq->buf.umem->context; | ||
879 | |||
880 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) | ||
881 | return -EFAULT; | ||
882 | |||
883 | umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, | ||
884 | IB_ACCESS_LOCAL_WRITE, 1); | ||
885 | if (IS_ERR(umem)) { | ||
886 | err = PTR_ERR(umem); | ||
887 | return err; | ||
888 | } | ||
889 | |||
890 | mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, | ||
891 | npas, NULL); | ||
892 | |||
893 | cq->resize_umem = umem; | ||
894 | *cqe_size = ucmd.cqe_size; | ||
895 | |||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | static void un_resize_user(struct mlx5_ib_cq *cq) | ||
900 | { | ||
901 | ib_umem_release(cq->resize_umem); | ||
902 | } | ||
903 | |||
904 | static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | ||
905 | int entries, int cqe_size) | ||
906 | { | ||
907 | int err; | ||
908 | |||
909 | cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); | ||
910 | if (!cq->resize_buf) | ||
911 | return -ENOMEM; | ||
912 | |||
913 | err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); | ||
914 | if (err) | ||
915 | goto ex; | ||
916 | |||
917 | init_cq_buf(cq, cq->resize_buf); | ||
918 | |||
919 | return 0; | ||
920 | |||
921 | ex: | ||
922 | kfree(cq->resize_buf); | ||
923 | return err; | ||
924 | } | ||
925 | |||
926 | static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) | ||
927 | { | ||
928 | free_cq_buf(dev, cq->resize_buf); | ||
929 | cq->resize_buf = NULL; | ||
930 | } | ||
931 | |||
932 | static int copy_resize_cqes(struct mlx5_ib_cq *cq) | ||
933 | { | ||
934 | struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | ||
935 | struct mlx5_cqe64 *scqe64; | ||
936 | struct mlx5_cqe64 *dcqe64; | ||
937 | void *start_cqe; | ||
938 | void *scqe; | ||
939 | void *dcqe; | ||
940 | int ssize; | ||
941 | int dsize; | ||
942 | int i; | ||
943 | u8 sw_own; | ||
944 | |||
945 | ssize = cq->buf.cqe_size; | ||
946 | dsize = cq->resize_buf->cqe_size; | ||
947 | if (ssize != dsize) { | ||
948 | mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); | ||
949 | return -EINVAL; | ||
950 | } | ||
951 | |||
952 | i = cq->mcq.cons_index; | ||
953 | scqe = get_sw_cqe(cq, i); | ||
954 | scqe64 = ssize == 64 ? scqe : scqe + 64; | ||
955 | start_cqe = scqe; | ||
956 | if (!scqe) { | ||
957 | mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); | ||
958 | return -EINVAL; | ||
959 | } | ||
960 | |||
961 | while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { | ||
962 | dcqe = get_cqe_from_buf(cq->resize_buf, | ||
963 | (i + 1) & (cq->resize_buf->nent), | ||
964 | dsize); | ||
965 | dcqe64 = dsize == 64 ? dcqe : dcqe + 64; | ||
966 | sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); | ||
967 | memcpy(dcqe, scqe, dsize); | ||
968 | dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; | ||
969 | |||
970 | ++i; | ||
971 | scqe = get_sw_cqe(cq, i); | ||
972 | scqe64 = ssize == 64 ? scqe : scqe + 64; | ||
973 | if (!scqe) { | ||
974 | mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); | ||
975 | return -EINVAL; | ||
976 | } | ||
977 | |||
978 | if (scqe == start_cqe) { | ||
979 | pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", | ||
980 | cq->mcq.cqn); | ||
981 | return -ENOMEM; | ||
982 | } | ||
983 | } | ||
984 | ++cq->mcq.cons_index; | ||
985 | return 0; | ||
986 | } | ||
987 | |||
848 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | 988 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) |
849 | { | 989 | { |
850 | return -ENOSYS; | 990 | struct mlx5_ib_dev *dev = to_mdev(ibcq->device); |
991 | struct mlx5_ib_cq *cq = to_mcq(ibcq); | ||
992 | struct mlx5_modify_cq_mbox_in *in; | ||
993 | int err; | ||
994 | int npas; | ||
995 | int page_shift; | ||
996 | int inlen; | ||
997 | int uninitialized_var(cqe_size); | ||
998 | unsigned long flags; | ||
999 | |||
1000 | if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { | ||
1001 | pr_info("Firmware does not support resize CQ\n"); | ||
1002 | return -ENOSYS; | ||
1003 | } | ||
1004 | |||
1005 | if (entries < 1) | ||
1006 | return -EINVAL; | ||
1007 | |||
1008 | entries = roundup_pow_of_two(entries + 1); | ||
1009 | if (entries > dev->mdev.caps.max_cqes + 1) | ||
1010 | return -EINVAL; | ||
1011 | |||
1012 | if (entries == ibcq->cqe + 1) | ||
1013 | return 0; | ||
1014 | |||
1015 | mutex_lock(&cq->resize_mutex); | ||
1016 | if (udata) { | ||
1017 | err = resize_user(dev, cq, entries, udata, &npas, &page_shift, | ||
1018 | &cqe_size); | ||
1019 | } else { | ||
1020 | cqe_size = 64; | ||
1021 | err = resize_kernel(dev, cq, entries, cqe_size); | ||
1022 | if (!err) { | ||
1023 | npas = cq->resize_buf->buf.npages; | ||
1024 | page_shift = cq->resize_buf->buf.page_shift; | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | if (err) | ||
1029 | goto ex; | ||
1030 | |||
1031 | inlen = sizeof(*in) + npas * sizeof(in->pas[0]); | ||
1032 | in = mlx5_vzalloc(inlen); | ||
1033 | if (!in) { | ||
1034 | err = -ENOMEM; | ||
1035 | goto ex_resize; | ||
1036 | } | ||
1037 | |||
1038 | if (udata) | ||
1039 | mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, | ||
1040 | in->pas, 0); | ||
1041 | else | ||
1042 | mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); | ||
1043 | |||
1044 | in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | | ||
1045 | MLX5_MODIFY_CQ_MASK_PG_OFFSET | | ||
1046 | MLX5_MODIFY_CQ_MASK_PG_SIZE); | ||
1047 | in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; | ||
1048 | in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; | ||
1049 | in->ctx.page_offset = 0; | ||
1050 | in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); | ||
1051 | in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); | ||
1052 | in->cqn = cpu_to_be32(cq->mcq.cqn); | ||
1053 | |||
1054 | err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen); | ||
1055 | if (err) | ||
1056 | goto ex_alloc; | ||
1057 | |||
1058 | if (udata) { | ||
1059 | cq->ibcq.cqe = entries - 1; | ||
1060 | ib_umem_release(cq->buf.umem); | ||
1061 | cq->buf.umem = cq->resize_umem; | ||
1062 | cq->resize_umem = NULL; | ||
1063 | } else { | ||
1064 | struct mlx5_ib_cq_buf tbuf; | ||
1065 | int resized = 0; | ||
1066 | |||
1067 | spin_lock_irqsave(&cq->lock, flags); | ||
1068 | if (cq->resize_buf) { | ||
1069 | err = copy_resize_cqes(cq); | ||
1070 | if (!err) { | ||
1071 | tbuf = cq->buf; | ||
1072 | cq->buf = *cq->resize_buf; | ||
1073 | kfree(cq->resize_buf); | ||
1074 | cq->resize_buf = NULL; | ||
1075 | resized = 1; | ||
1076 | } | ||
1077 | } | ||
1078 | cq->ibcq.cqe = entries - 1; | ||
1079 | spin_unlock_irqrestore(&cq->lock, flags); | ||
1080 | if (resized) | ||
1081 | free_cq_buf(dev, &tbuf); | ||
1082 | } | ||
1083 | mutex_unlock(&cq->resize_mutex); | ||
1084 | |||
1085 | mlx5_vfree(in); | ||
1086 | return 0; | ||
1087 | |||
1088 | ex_alloc: | ||
1089 | mlx5_vfree(in); | ||
1090 | |||
1091 | ex_resize: | ||
1092 | if (udata) | ||
1093 | un_resize_user(cq); | ||
1094 | else | ||
1095 | un_resize_kernel(dev, cq); | ||
1096 | ex: | ||
1097 | mutex_unlock(&cq->resize_mutex); | ||
1098 | return err; | ||
851 | } | 1099 | } |
852 | 1100 | ||
853 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) | 1101 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5acef30a4998..389e31965773 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -195,6 +195,7 @@ struct mlx5_ib_cq_buf { | |||
195 | struct mlx5_buf buf; | 195 | struct mlx5_buf buf; |
196 | struct ib_umem *umem; | 196 | struct ib_umem *umem; |
197 | int cqe_size; | 197 | int cqe_size; |
198 | int nent; | ||
198 | }; | 199 | }; |
199 | 200 | ||
200 | enum mlx5_ib_qp_flags { | 201 | enum mlx5_ib_qp_flags { |
@@ -220,7 +221,7 @@ struct mlx5_ib_cq { | |||
220 | /* protect resize cq | 221 | /* protect resize cq |
221 | */ | 222 | */ |
222 | struct mutex resize_mutex; | 223 | struct mutex resize_mutex; |
223 | struct mlx5_ib_cq_resize *resize_buf; | 224 | struct mlx5_ib_cq_buf *resize_buf; |
224 | struct ib_umem *resize_umem; | 225 | struct ib_umem *resize_umem; |
225 | int cqe_size; | 226 | int cqe_size; |
226 | }; | 227 | }; |
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index a886de3e593c..32a2a5dfc523 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h | |||
@@ -93,6 +93,9 @@ struct mlx5_ib_create_cq_resp { | |||
93 | 93 | ||
94 | struct mlx5_ib_resize_cq { | 94 | struct mlx5_ib_resize_cq { |
95 | __u64 buf_addr; | 95 | __u64 buf_addr; |
96 | __u16 cqe_size; | ||
97 | __u16 reserved0; | ||
98 | __u32 reserved1; | ||
96 | }; | 99 | }; |
97 | 100 | ||
98 | struct mlx5_ib_create_srq { | 101 | struct mlx5_ib_create_srq { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index e6fedcf94182..43c5f4809526 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c | |||
@@ -201,14 +201,14 @@ EXPORT_SYMBOL(mlx5_core_query_cq); | |||
201 | 201 | ||
202 | 202 | ||
203 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | 203 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
204 | struct mlx5_modify_cq_mbox_in *in) | 204 | struct mlx5_modify_cq_mbox_in *in, int in_sz) |
205 | { | 205 | { |
206 | struct mlx5_modify_cq_mbox_out out; | 206 | struct mlx5_modify_cq_mbox_out out; |
207 | int err; | 207 | int err; |
208 | 208 | ||
209 | memset(&out, 0, sizeof(out)); | 209 | memset(&out, 0, sizeof(out)); |
210 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); | 210 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); |
211 | err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); | 211 | err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); |
212 | if (err) | 212 | if (err) |
213 | return err; | 213 | return err; |
214 | 214 | ||
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index c3cf5a46abce..2202c7f72b75 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h | |||
@@ -79,9 +79,10 @@ enum { | |||
79 | MLX5_CQE_RESP_SEND = 2, | 79 | MLX5_CQE_RESP_SEND = 2, |
80 | MLX5_CQE_RESP_SEND_IMM = 3, | 80 | MLX5_CQE_RESP_SEND_IMM = 3, |
81 | MLX5_CQE_RESP_SEND_INV = 4, | 81 | MLX5_CQE_RESP_SEND_INV = 4, |
82 | MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */ | 82 | MLX5_CQE_RESIZE_CQ = 5, |
83 | MLX5_CQE_REQ_ERR = 13, | 83 | MLX5_CQE_REQ_ERR = 13, |
84 | MLX5_CQE_RESP_ERR = 14, | 84 | MLX5_CQE_RESP_ERR = 14, |
85 | MLX5_CQE_INVALID = 15, | ||
85 | }; | 86 | }; |
86 | 87 | ||
87 | enum { | 88 | enum { |
@@ -90,6 +91,13 @@ enum { | |||
90 | MLX5_CQ_MODIFY_OVERRUN = 1 << 2, | 91 | MLX5_CQ_MODIFY_OVERRUN = 1 << 2, |
91 | }; | 92 | }; |
92 | 93 | ||
94 | enum { | ||
95 | MLX5_CQ_OPMOD_RESIZE = 1, | ||
96 | MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0, | ||
97 | MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1, | ||
98 | MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2, | ||
99 | }; | ||
100 | |||
93 | struct mlx5_cq_modify_params { | 101 | struct mlx5_cq_modify_params { |
94 | int type; | 102 | int type; |
95 | union { | 103 | union { |
@@ -158,7 +166,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | |||
158 | int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | 166 | int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
159 | struct mlx5_query_cq_mbox_out *out); | 167 | struct mlx5_query_cq_mbox_out *out); |
160 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | 168 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
161 | struct mlx5_modify_cq_mbox_in *in); | 169 | struct mlx5_modify_cq_mbox_in *in, int in_sz); |
162 | int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | 170 | int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
163 | void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | 171 | void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
164 | 172 | ||
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dbb03caa8aed..87e23717df70 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -178,6 +178,7 @@ enum { | |||
178 | MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, | 178 | MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, |
179 | MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, | 179 | MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, |
180 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, | 180 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, |
181 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, | ||
181 | MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, | 182 | MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, |
182 | MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, | 183 | MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, |
183 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, | 184 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, |
@@ -710,6 +711,7 @@ struct mlx5_modify_cq_mbox_in { | |||
710 | 711 | ||
711 | struct mlx5_modify_cq_mbox_out { | 712 | struct mlx5_modify_cq_mbox_out { |
712 | struct mlx5_outbox_hdr hdr; | 713 | struct mlx5_outbox_hdr hdr; |
714 | u8 rsvd[8]; | ||
713 | }; | 715 | }; |
714 | 716 | ||
715 | struct mlx5_enable_hca_mbox_in { | 717 | struct mlx5_enable_hca_mbox_in { |