diff options
author | Jack Morgenstein <jackm@mellanox.co.il> | 2005-11-09 14:26:07 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-11-10 13:22:50 -0500 |
commit | 77369ed31daac51f4827c50d30f233c45480235a (patch) | |
tree | 308dce81364b1cbb563942a1a57146c1808e8911 /drivers/infiniband | |
parent | ec914c52d6208d8752dfd85b48a9aff304911434 (diff) |
[IB] uverbs: have kernel return QP capabilities
Move the computation of QP capabilities (max scatter/gather entries,
max inline data, etc) into the kernel, and have the uverbs module
return the values as part of the create QP response. This keeps
precise knowledge of device limits in the low-level kernel driver.
This requires an ABI bump, so while we're making changes, get rid of
the max_sge parameter for the modify SRQ command -- it's not used and
shouldn't be there.
Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cmd.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_dev.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 86 |
7 files changed, 92 insertions, 13 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 63a74151c60b..ed45da892b1c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -708,7 +708,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, | |||
708 | resp->wc[i].opcode = wc[i].opcode; | 708 | resp->wc[i].opcode = wc[i].opcode; |
709 | resp->wc[i].vendor_err = wc[i].vendor_err; | 709 | resp->wc[i].vendor_err = wc[i].vendor_err; |
710 | resp->wc[i].byte_len = wc[i].byte_len; | 710 | resp->wc[i].byte_len = wc[i].byte_len; |
711 | resp->wc[i].imm_data = wc[i].imm_data; | 711 | resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; |
712 | resp->wc[i].qp_num = wc[i].qp_num; | 712 | resp->wc[i].qp_num = wc[i].qp_num; |
713 | resp->wc[i].src_qp = wc[i].src_qp; | 713 | resp->wc[i].src_qp = wc[i].src_qp; |
714 | resp->wc[i].wc_flags = wc[i].wc_flags; | 714 | resp->wc[i].wc_flags = wc[i].wc_flags; |
@@ -908,7 +908,12 @@ retry: | |||
908 | if (ret) | 908 | if (ret) |
909 | goto err_destroy; | 909 | goto err_destroy; |
910 | 910 | ||
911 | resp.qp_handle = uobj->uobject.id; | 911 | resp.qp_handle = uobj->uobject.id; |
912 | resp.max_recv_sge = attr.cap.max_recv_sge; | ||
913 | resp.max_send_sge = attr.cap.max_send_sge; | ||
914 | resp.max_recv_wr = attr.cap.max_recv_wr; | ||
915 | resp.max_send_wr = attr.cap.max_send_wr; | ||
916 | resp.max_inline_data = attr.cap.max_inline_data; | ||
912 | 917 | ||
913 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 918 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
914 | &resp, sizeof resp)) { | 919 | &resp, sizeof resp)) { |
@@ -1135,7 +1140,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
1135 | next->num_sge = user_wr->num_sge; | 1140 | next->num_sge = user_wr->num_sge; |
1136 | next->opcode = user_wr->opcode; | 1141 | next->opcode = user_wr->opcode; |
1137 | next->send_flags = user_wr->send_flags; | 1142 | next->send_flags = user_wr->send_flags; |
1138 | next->imm_data = user_wr->imm_data; | 1143 | next->imm_data = (__be32 __force) user_wr->imm_data; |
1139 | 1144 | ||
1140 | if (qp->qp_type == IB_QPT_UD) { | 1145 | if (qp->qp_type == IB_QPT_UD) { |
1141 | next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, | 1146 | next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr, |
@@ -1701,7 +1706,6 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | |||
1701 | } | 1706 | } |
1702 | 1707 | ||
1703 | attr.max_wr = cmd.max_wr; | 1708 | attr.max_wr = cmd.max_wr; |
1704 | attr.max_sge = cmd.max_sge; | ||
1705 | attr.srq_limit = cmd.srq_limit; | 1709 | attr.srq_limit = cmd.srq_limit; |
1706 | 1710 | ||
1707 | ret = ib_modify_srq(srq, &attr, cmd.attr_mask); | 1711 | ret = ib_modify_srq(srq, &attr, cmd.attr_mask); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 49f211d55df7..9ed34587fc5c 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1060,6 +1060,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
1060 | dev_lim->hca.arbel.resize_srq = field & 1; | 1060 | dev_lim->hca.arbel.resize_srq = field & 1; |
1061 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); | 1061 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); |
1062 | dev_lim->max_sg = min_t(int, field, dev_lim->max_sg); | 1062 | dev_lim->max_sg = min_t(int, field, dev_lim->max_sg); |
1063 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET); | ||
1064 | dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz); | ||
1063 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); | 1065 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); |
1064 | dev_lim->mpt_entry_sz = size; | 1066 | dev_lim->mpt_entry_sz = size; |
1065 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); | 1067 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 808037f25c78..497ff794ef6a 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -131,6 +131,7 @@ struct mthca_limits { | |||
131 | int max_sg; | 131 | int max_sg; |
132 | int num_qps; | 132 | int num_qps; |
133 | int max_wqes; | 133 | int max_wqes; |
134 | int max_desc_sz; | ||
134 | int max_qp_init_rdma; | 135 | int max_qp_init_rdma; |
135 | int reserved_qps; | 136 | int reserved_qps; |
136 | int num_srqs; | 137 | int num_srqs; |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 16594d1342df..147f248a8073 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -168,6 +168,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
168 | mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; | 168 | mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; |
169 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; | 169 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; |
170 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; | 170 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; |
171 | mdev->limits.max_desc_sz = dev_lim->max_desc_sz; | ||
171 | /* | 172 | /* |
172 | * Subtract 1 from the limit because we need to allocate a | 173 | * Subtract 1 from the limit because we need to allocate a |
173 | * spare CQE so the HCA HW can tell the difference between an | 174 | * spare CQE so the HCA HW can tell the difference between an |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index e78259b2664b..4cc7e2846df1 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -616,11 +616,11 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
616 | return ERR_PTR(err); | 616 | return ERR_PTR(err); |
617 | } | 617 | } |
618 | 618 | ||
619 | init_attr->cap.max_inline_data = 0; | ||
620 | init_attr->cap.max_send_wr = qp->sq.max; | 619 | init_attr->cap.max_send_wr = qp->sq.max; |
621 | init_attr->cap.max_recv_wr = qp->rq.max; | 620 | init_attr->cap.max_recv_wr = qp->rq.max; |
622 | init_attr->cap.max_send_sge = qp->sq.max_gs; | 621 | init_attr->cap.max_send_sge = qp->sq.max_gs; |
623 | init_attr->cap.max_recv_sge = qp->rq.max_gs; | 622 | init_attr->cap.max_recv_sge = qp->rq.max_gs; |
623 | init_attr->cap.max_inline_data = qp->max_inline_data; | ||
624 | 624 | ||
625 | return &qp->ibqp; | 625 | return &qp->ibqp; |
626 | } | 626 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index bcd4b01a339c..1e73947b4702 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -251,6 +251,7 @@ struct mthca_qp { | |||
251 | struct mthca_wq sq; | 251 | struct mthca_wq sq; |
252 | enum ib_sig_type sq_policy; | 252 | enum ib_sig_type sq_policy; |
253 | int send_wqe_offset; | 253 | int send_wqe_offset; |
254 | int max_inline_data; | ||
254 | 255 | ||
255 | u64 *wrid; | 256 | u64 *wrid; |
256 | union mthca_buf queue; | 257 | union mthca_buf queue; |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 8852ea477c21..7f39af44b274 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -885,6 +885,48 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
885 | return err; | 885 | return err; |
886 | } | 886 | } |
887 | 887 | ||
888 | static void mthca_adjust_qp_caps(struct mthca_dev *dev, | ||
889 | struct mthca_pd *pd, | ||
890 | struct mthca_qp *qp) | ||
891 | { | ||
892 | int max_data_size; | ||
893 | |||
894 | /* | ||
895 | * Calculate the maximum size of WQE s/g segments, excluding | ||
896 | * the next segment and other non-data segments. | ||
897 | */ | ||
898 | max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) - | ||
899 | sizeof (struct mthca_next_seg); | ||
900 | |||
901 | switch (qp->transport) { | ||
902 | case MLX: | ||
903 | max_data_size -= 2 * sizeof (struct mthca_data_seg); | ||
904 | break; | ||
905 | |||
906 | case UD: | ||
907 | if (mthca_is_memfree(dev)) | ||
908 | max_data_size -= sizeof (struct mthca_arbel_ud_seg); | ||
909 | else | ||
910 | max_data_size -= sizeof (struct mthca_tavor_ud_seg); | ||
911 | break; | ||
912 | |||
913 | default: | ||
914 | max_data_size -= sizeof (struct mthca_raddr_seg); | ||
915 | break; | ||
916 | } | ||
917 | |||
918 | /* We don't support inline data for kernel QPs (yet). */ | ||
919 | if (!pd->ibpd.uobject) | ||
920 | qp->max_inline_data = 0; | ||
921 | else | ||
922 | qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; | ||
923 | |||
924 | qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg); | ||
925 | qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - | ||
926 | sizeof (struct mthca_next_seg)) / | ||
927 | sizeof (struct mthca_data_seg); | ||
928 | } | ||
929 | |||
888 | /* | 930 | /* |
889 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, | 931 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, |
890 | * rq.max_gs and sq.max_gs must all be assigned. | 932 | * rq.max_gs and sq.max_gs must all be assigned. |
@@ -902,27 +944,53 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |||
902 | size = sizeof (struct mthca_next_seg) + | 944 | size = sizeof (struct mthca_next_seg) + |
903 | qp->rq.max_gs * sizeof (struct mthca_data_seg); | 945 | qp->rq.max_gs * sizeof (struct mthca_data_seg); |
904 | 946 | ||
947 | if (size > dev->limits.max_desc_sz) | ||
948 | return -EINVAL; | ||
949 | |||
905 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; | 950 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; |
906 | qp->rq.wqe_shift++) | 951 | qp->rq.wqe_shift++) |
907 | ; /* nothing */ | 952 | ; /* nothing */ |
908 | 953 | ||
909 | size = sizeof (struct mthca_next_seg) + | 954 | size = qp->sq.max_gs * sizeof (struct mthca_data_seg); |
910 | qp->sq.max_gs * sizeof (struct mthca_data_seg); | ||
911 | switch (qp->transport) { | 955 | switch (qp->transport) { |
912 | case MLX: | 956 | case MLX: |
913 | size += 2 * sizeof (struct mthca_data_seg); | 957 | size += 2 * sizeof (struct mthca_data_seg); |
914 | break; | 958 | break; |
959 | |||
915 | case UD: | 960 | case UD: |
916 | if (mthca_is_memfree(dev)) | 961 | size += mthca_is_memfree(dev) ? |
917 | size += sizeof (struct mthca_arbel_ud_seg); | 962 | sizeof (struct mthca_arbel_ud_seg) : |
918 | else | 963 | sizeof (struct mthca_tavor_ud_seg); |
919 | size += sizeof (struct mthca_tavor_ud_seg); | ||
920 | break; | 964 | break; |
965 | |||
966 | case UC: | ||
967 | size += sizeof (struct mthca_raddr_seg); | ||
968 | break; | ||
969 | |||
970 | case RC: | ||
971 | size += sizeof (struct mthca_raddr_seg); | ||
972 | /* | ||
973 | * An atomic op will require an atomic segment, a | ||
974 | * remote address segment and one scatter entry. | ||
975 | */ | ||
976 | size = max_t(int, size, | ||
977 | sizeof (struct mthca_atomic_seg) + | ||
978 | sizeof (struct mthca_raddr_seg) + | ||
979 | sizeof (struct mthca_data_seg)); | ||
980 | break; | ||
981 | |||
921 | default: | 982 | default: |
922 | /* bind seg is as big as atomic + raddr segs */ | 983 | break; |
923 | size += sizeof (struct mthca_bind_seg); | ||
924 | } | 984 | } |
925 | 985 | ||
986 | /* Make sure that we have enough space for a bind request */ | ||
987 | size = max_t(int, size, sizeof (struct mthca_bind_seg)); | ||
988 | |||
989 | size += sizeof (struct mthca_next_seg); | ||
990 | |||
991 | if (size > dev->limits.max_desc_sz) | ||
992 | return -EINVAL; | ||
993 | |||
926 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; | 994 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; |
927 | qp->sq.wqe_shift++) | 995 | qp->sq.wqe_shift++) |
928 | ; /* nothing */ | 996 | ; /* nothing */ |
@@ -1066,6 +1134,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1066 | return ret; | 1134 | return ret; |
1067 | } | 1135 | } |
1068 | 1136 | ||
1137 | mthca_adjust_qp_caps(dev, pd, qp); | ||
1138 | |||
1069 | /* | 1139 | /* |
1070 | * If this is a userspace QP, we're done now. The doorbells | 1140 | * If this is a userspace QP, we're done now. The doorbells |
1071 | * will be allocated and buffers will be initialized in | 1141 | * will be allocated and buffers will be initialized in |