aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@mellanox.co.il>2006-01-06 15:57:30 -0500
committerRoland Dreier <rolandd@cisco.com>2006-01-06 15:57:30 -0500
commit5b3bc7a68171138d52b1b62012c37ac888895460 (patch)
tree63960e2e3f8db5bbe7ef5c9d64e1c5ea3a9124f5 /drivers
parent466200562ccd80f728f7ef602d2b97b4fdedd566 (diff)
IB/mthca: max_inline_data handling tweaks
Fix a case where copying max_inline_data from a successful create_qp capabilities output to create_qp input could cause EINVAL error: mthca_set_qp_size must check max_inline_data directly against max_desc_sz; checking qp->sq.max_gs is wrong since max_inline_data depends on the qp type and does not involve max_sg. Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il> Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c62
1 files changed, 36 insertions, 26 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index ea45fa400fab..fd60cf3a5ba3 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -890,18 +890,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
890 return err; 890 return err;
891} 891}
892 892
893static void mthca_adjust_qp_caps(struct mthca_dev *dev, 893static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
894 struct mthca_pd *pd,
895 struct mthca_qp *qp)
896{ 894{
897 int max_data_size;
898
899 /* 895 /*
900 * Calculate the maximum size of WQE s/g segments, excluding 896 * Calculate the maximum size of WQE s/g segments, excluding
901 * the next segment and other non-data segments. 897 * the next segment and other non-data segments.
902 */ 898 */
903 max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) - 899 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
904 sizeof (struct mthca_next_seg);
905 900
906 switch (qp->transport) { 901 switch (qp->transport) {
907 case MLX: 902 case MLX:
@@ -920,11 +915,24 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
920 break; 915 break;
921 } 916 }
922 917
918 return max_data_size;
919}
920
921static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
922{
923 /* We don't support inline data for kernel QPs (yet). */ 923 /* We don't support inline data for kernel QPs (yet). */
924 if (!pd->ibpd.uobject) 924 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
925 qp->max_inline_data = 0; 925}
926 else 926
927 qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; 927static void mthca_adjust_qp_caps(struct mthca_dev *dev,
928 struct mthca_pd *pd,
929 struct mthca_qp *qp)
930{
931 int max_data_size = mthca_max_data_size(dev, qp,
932 min(dev->limits.max_desc_sz,
933 1 << qp->sq.wqe_shift));
934
935 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
928 936
929 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 937 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
930 max_data_size / sizeof (struct mthca_data_seg)); 938 max_data_size / sizeof (struct mthca_data_seg));
@@ -1191,13 +1199,23 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1191} 1199}
1192 1200
1193static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1201static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1194 struct mthca_qp *qp) 1202 struct mthca_pd *pd, struct mthca_qp *qp)
1195{ 1203{
1204 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1205
1196 /* Sanity check QP size before proceeding */ 1206 /* Sanity check QP size before proceeding */
1197 if (cap->max_send_wr > dev->limits.max_wqes || 1207 if (cap->max_send_wr > dev->limits.max_wqes ||
1198 cap->max_recv_wr > dev->limits.max_wqes || 1208 cap->max_recv_wr > dev->limits.max_wqes ||
1199 cap->max_send_sge > dev->limits.max_sg || 1209 cap->max_send_sge > dev->limits.max_sg ||
1200 cap->max_recv_sge > dev->limits.max_sg) 1210 cap->max_recv_sge > dev->limits.max_sg ||
1211 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1212 return -EINVAL;
1213
1214 /*
1215 * For MLX transport we need 2 extra S/G entries:
1216 * one for the header and one for the checksum at the end
1217 */
1218 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
1201 return -EINVAL; 1219 return -EINVAL;
1202 1220
1203 if (mthca_is_memfree(dev)) { 1221 if (mthca_is_memfree(dev)) {
@@ -1216,14 +1234,6 @@ static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1216 MTHCA_INLINE_CHUNK_SIZE) / 1234 MTHCA_INLINE_CHUNK_SIZE) /
1217 sizeof (struct mthca_data_seg)); 1235 sizeof (struct mthca_data_seg));
1218 1236
1219 /*
1220 * For MLX transport we need 2 extra S/G entries:
1221 * one for the header and one for the checksum at the end
1222 */
1223 if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
1224 qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
1225 return -EINVAL;
1226
1227 return 0; 1237 return 0;
1228} 1238}
1229 1239
@@ -1238,7 +1248,7 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1238{ 1248{
1239 int err; 1249 int err;
1240 1250
1241 err = mthca_set_qp_size(dev, cap, qp); 1251 err = mthca_set_qp_size(dev, cap, pd, qp);
1242 if (err) 1252 if (err)
1243 return err; 1253 return err;
1244 1254
@@ -1281,7 +1291,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1281 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1291 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1282 int err; 1292 int err;
1283 1293
1284 err = mthca_set_qp_size(dev, cap, &sqp->qp); 1294 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1285 if (err) 1295 if (err)
1286 return err; 1296 return err;
1287 1297