aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c86
-rw-r--r--include/rdma/ib_user_verbs.h9
8 files changed, 98 insertions, 16 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 63a74151c60b..ed45da892b1c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -708,7 +708,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
708 resp->wc[i].opcode = wc[i].opcode; 708 resp->wc[i].opcode = wc[i].opcode;
709 resp->wc[i].vendor_err = wc[i].vendor_err; 709 resp->wc[i].vendor_err = wc[i].vendor_err;
710 resp->wc[i].byte_len = wc[i].byte_len; 710 resp->wc[i].byte_len = wc[i].byte_len;
711 resp->wc[i].imm_data = wc[i].imm_data; 711 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
712 resp->wc[i].qp_num = wc[i].qp_num; 712 resp->wc[i].qp_num = wc[i].qp_num;
713 resp->wc[i].src_qp = wc[i].src_qp; 713 resp->wc[i].src_qp = wc[i].src_qp;
714 resp->wc[i].wc_flags = wc[i].wc_flags; 714 resp->wc[i].wc_flags = wc[i].wc_flags;
@@ -908,7 +908,12 @@ retry:
908 if (ret) 908 if (ret)
909 goto err_destroy; 909 goto err_destroy;
910 910
911 resp.qp_handle = uobj->uobject.id; 911 resp.qp_handle = uobj->uobject.id;
912 resp.max_recv_sge = attr.cap.max_recv_sge;
913 resp.max_send_sge = attr.cap.max_send_sge;
914 resp.max_recv_wr = attr.cap.max_recv_wr;
915 resp.max_send_wr = attr.cap.max_send_wr;
916 resp.max_inline_data = attr.cap.max_inline_data;
912 917
913 if (copy_to_user((void __user *) (unsigned long) cmd.response, 918 if (copy_to_user((void __user *) (unsigned long) cmd.response,
914 &resp, sizeof resp)) { 919 &resp, sizeof resp)) {
@@ -1135,7 +1140,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1135 next->num_sge = user_wr->num_sge; 1140 next->num_sge = user_wr->num_sge;
1136 next->opcode = user_wr->opcode; 1141 next->opcode = user_wr->opcode;
1137 next->send_flags = user_wr->send_flags; 1142 next->send_flags = user_wr->send_flags;
1138 next->imm_data = user_wr->imm_data; 1143 next->imm_data = (__be32 __force) user_wr->imm_data;
1139 1144
1140 if (qp->qp_type == IB_QPT_UD) { 1145 if (qp->qp_type == IB_QPT_UD) {
1141 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, 1146 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr,
@@ -1701,7 +1706,6 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1701 } 1706 }
1702 1707
1703 attr.max_wr = cmd.max_wr; 1708 attr.max_wr = cmd.max_wr;
1704 attr.max_sge = cmd.max_sge;
1705 attr.srq_limit = cmd.srq_limit; 1709 attr.srq_limit = cmd.srq_limit;
1706 1710
1707 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1711 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 49f211d55df7..9ed34587fc5c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1060,6 +1060,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1060 dev_lim->hca.arbel.resize_srq = field & 1; 1060 dev_lim->hca.arbel.resize_srq = field & 1;
1061 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); 1061 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
1062 dev_lim->max_sg = min_t(int, field, dev_lim->max_sg); 1062 dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
1063 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
1064 dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz);
1063 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); 1065 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
1064 dev_lim->mpt_entry_sz = size; 1066 dev_lim->mpt_entry_sz = size;
1065 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); 1067 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 808037f25c78..497ff794ef6a 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -131,6 +131,7 @@ struct mthca_limits {
131 int max_sg; 131 int max_sg;
132 int num_qps; 132 int num_qps;
133 int max_wqes; 133 int max_wqes;
134 int max_desc_sz;
134 int max_qp_init_rdma; 135 int max_qp_init_rdma;
135 int reserved_qps; 136 int reserved_qps;
136 int num_srqs; 137 int num_srqs;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 16594d1342df..147f248a8073 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -168,6 +168,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
168 mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; 168 mdev->limits.max_srq_wqes = dev_lim->max_srq_sz;
169 mdev->limits.reserved_srqs = dev_lim->reserved_srqs; 169 mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
170 mdev->limits.reserved_eecs = dev_lim->reserved_eecs; 170 mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
171 mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
171 /* 172 /*
172 * Subtract 1 from the limit because we need to allocate a 173 * Subtract 1 from the limit because we need to allocate a
173 * spare CQE so the HCA HW can tell the difference between an 174 * spare CQE so the HCA HW can tell the difference between an
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index e78259b2664b..4cc7e2846df1 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -616,11 +616,11 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
616 return ERR_PTR(err); 616 return ERR_PTR(err);
617 } 617 }
618 618
619 init_attr->cap.max_inline_data = 0;
620 init_attr->cap.max_send_wr = qp->sq.max; 619 init_attr->cap.max_send_wr = qp->sq.max;
621 init_attr->cap.max_recv_wr = qp->rq.max; 620 init_attr->cap.max_recv_wr = qp->rq.max;
622 init_attr->cap.max_send_sge = qp->sq.max_gs; 621 init_attr->cap.max_send_sge = qp->sq.max_gs;
623 init_attr->cap.max_recv_sge = qp->rq.max_gs; 622 init_attr->cap.max_recv_sge = qp->rq.max_gs;
623 init_attr->cap.max_inline_data = qp->max_inline_data;
624 624
625 return &qp->ibqp; 625 return &qp->ibqp;
626} 626}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index bcd4b01a339c..1e73947b4702 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -251,6 +251,7 @@ struct mthca_qp {
251 struct mthca_wq sq; 251 struct mthca_wq sq;
252 enum ib_sig_type sq_policy; 252 enum ib_sig_type sq_policy;
253 int send_wqe_offset; 253 int send_wqe_offset;
254 int max_inline_data;
254 255
255 u64 *wrid; 256 u64 *wrid;
256 union mthca_buf queue; 257 union mthca_buf queue;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 8852ea477c21..7f39af44b274 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -885,6 +885,48 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
885 return err; 885 return err;
886} 886}
887 887
888static void mthca_adjust_qp_caps(struct mthca_dev *dev,
889 struct mthca_pd *pd,
890 struct mthca_qp *qp)
891{
892 int max_data_size;
893
894 /*
895 * Calculate the maximum size of WQE s/g segments, excluding
896 * the next segment and other non-data segments.
897 */
898 max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) -
899 sizeof (struct mthca_next_seg);
900
901 switch (qp->transport) {
902 case MLX:
903 max_data_size -= 2 * sizeof (struct mthca_data_seg);
904 break;
905
906 case UD:
907 if (mthca_is_memfree(dev))
908 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
909 else
910 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
911 break;
912
913 default:
914 max_data_size -= sizeof (struct mthca_raddr_seg);
915 break;
916 }
917
918 /* We don't support inline data for kernel QPs (yet). */
919 if (!pd->ibpd.uobject)
920 qp->max_inline_data = 0;
921 else
922 qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
923
924 qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg);
925 qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
926 sizeof (struct mthca_next_seg)) /
927 sizeof (struct mthca_data_seg);
928}
929
888/* 930/*
889 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 931 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
890 * rq.max_gs and sq.max_gs must all be assigned. 932 * rq.max_gs and sq.max_gs must all be assigned.
@@ -902,27 +944,53 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
902 size = sizeof (struct mthca_next_seg) + 944 size = sizeof (struct mthca_next_seg) +
903 qp->rq.max_gs * sizeof (struct mthca_data_seg); 945 qp->rq.max_gs * sizeof (struct mthca_data_seg);
904 946
947 if (size > dev->limits.max_desc_sz)
948 return -EINVAL;
949
905 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; 950 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
906 qp->rq.wqe_shift++) 951 qp->rq.wqe_shift++)
907 ; /* nothing */ 952 ; /* nothing */
908 953
909 size = sizeof (struct mthca_next_seg) + 954 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
910 qp->sq.max_gs * sizeof (struct mthca_data_seg);
911 switch (qp->transport) { 955 switch (qp->transport) {
912 case MLX: 956 case MLX:
913 size += 2 * sizeof (struct mthca_data_seg); 957 size += 2 * sizeof (struct mthca_data_seg);
914 break; 958 break;
959
915 case UD: 960 case UD:
916 if (mthca_is_memfree(dev)) 961 size += mthca_is_memfree(dev) ?
917 size += sizeof (struct mthca_arbel_ud_seg); 962 sizeof (struct mthca_arbel_ud_seg) :
918 else 963 sizeof (struct mthca_tavor_ud_seg);
919 size += sizeof (struct mthca_tavor_ud_seg);
920 break; 964 break;
965
966 case UC:
967 size += sizeof (struct mthca_raddr_seg);
968 break;
969
970 case RC:
971 size += sizeof (struct mthca_raddr_seg);
972 /*
973 * An atomic op will require an atomic segment, a
974 * remote address segment and one scatter entry.
975 */
976 size = max_t(int, size,
977 sizeof (struct mthca_atomic_seg) +
978 sizeof (struct mthca_raddr_seg) +
979 sizeof (struct mthca_data_seg));
980 break;
981
921 default: 982 default:
922 /* bind seg is as big as atomic + raddr segs */ 983 break;
923 size += sizeof (struct mthca_bind_seg);
924 } 984 }
925 985
986 /* Make sure that we have enough space for a bind request */
987 size = max_t(int, size, sizeof (struct mthca_bind_seg));
988
989 size += sizeof (struct mthca_next_seg);
990
991 if (size > dev->limits.max_desc_sz)
992 return -EINVAL;
993
926 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; 994 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
927 qp->sq.wqe_shift++) 995 qp->sq.wqe_shift++)
928 ; /* nothing */ 996 ; /* nothing */
@@ -1066,6 +1134,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1066 return ret; 1134 return ret;
1067 } 1135 }
1068 1136
1137 mthca_adjust_qp_caps(dev, pd, qp);
1138
1069 /* 1139 /*
1070 * If this is a userspace QP, we're done now. The doorbells 1140 * If this is a userspace QP, we're done now. The doorbells
1071 * will be allocated and buffers will be initialized in 1141 * will be allocated and buffers will be initialized in
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 072f3a2edace..5ff1490c08db 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -43,7 +43,7 @@
43 * Increment this value if any changes that break userspace ABI 43 * Increment this value if any changes that break userspace ABI
44 * compatibility are made. 44 * compatibility are made.
45 */ 45 */
46#define IB_USER_VERBS_ABI_VERSION 3 46#define IB_USER_VERBS_ABI_VERSION 4
47 47
48enum { 48enum {
49 IB_USER_VERBS_CMD_GET_CONTEXT, 49 IB_USER_VERBS_CMD_GET_CONTEXT,
@@ -333,6 +333,11 @@ struct ib_uverbs_create_qp {
333struct ib_uverbs_create_qp_resp { 333struct ib_uverbs_create_qp_resp {
334 __u32 qp_handle; 334 __u32 qp_handle;
335 __u32 qpn; 335 __u32 qpn;
336 __u32 max_send_wr;
337 __u32 max_recv_wr;
338 __u32 max_send_sge;
339 __u32 max_recv_sge;
340 __u32 max_inline_data;
336}; 341};
337 342
338/* 343/*
@@ -552,9 +557,7 @@ struct ib_uverbs_modify_srq {
552 __u32 srq_handle; 557 __u32 srq_handle;
553 __u32 attr_mask; 558 __u32 attr_mask;
554 __u32 max_wr; 559 __u32 max_wr;
555 __u32 max_sge;
556 __u32 srq_limit; 560 __u32 srq_limit;
557 __u32 reserved;
558 __u64 driver_data[0]; 561 __u64 driver_data[0];
559}; 562};
560 563