aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c236
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/user.h5
3 files changed, 174 insertions, 73 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5cd706908450..dc137dec2308 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -188,14 +188,32 @@ static int send_wqe_overhead(enum ib_qp_type type)
188 } 188 }
189} 189}
190 190
191static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 191static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
192 enum ib_qp_type type, struct mlx4_ib_qp *qp) 192 struct mlx4_ib_qp *qp)
193{ 193{
194 /* Sanity check QP size before proceeding */ 194 /* Sanity check RQ size before proceeding */
195 if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
196 cap->max_recv_sge > dev->dev->caps.max_rq_sg)
197 return -EINVAL;
198
199 qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0;
200
201 qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
202 sizeof (struct mlx4_wqe_data_seg)));
203 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
204
205 cap->max_recv_wr = qp->rq.max;
206 cap->max_recv_sge = qp->rq.max_gs;
207
208 return 0;
209}
210
211static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
212 enum ib_qp_type type, struct mlx4_ib_qp *qp)
213{
214 /* Sanity check SQ size before proceeding */
195 if (cap->max_send_wr > dev->dev->caps.max_wqes || 215 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
196 cap->max_recv_wr > dev->dev->caps.max_wqes ||
197 cap->max_send_sge > dev->dev->caps.max_sq_sg || 216 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
198 cap->max_recv_sge > dev->dev->caps.max_rq_sg ||
199 cap->max_inline_data + send_wqe_overhead(type) + 217 cap->max_inline_data + send_wqe_overhead(type) +
200 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 218 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
201 return -EINVAL; 219 return -EINVAL;
@@ -208,12 +226,7 @@ static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
208 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) 226 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
209 return -EINVAL; 227 return -EINVAL;
210 228
211 qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; 229 qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 1;
212 qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0;
213
214 qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
215 sizeof (struct mlx4_wqe_data_seg)));
216 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
217 230
218 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * 231 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
219 sizeof (struct mlx4_wqe_data_seg), 232 sizeof (struct mlx4_wqe_data_seg),
@@ -233,23 +246,31 @@ static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
233 qp->sq.offset = 0; 246 qp->sq.offset = 0;
234 } 247 }
235 248
236 cap->max_send_wr = qp->sq.max; 249 cap->max_send_wr = qp->sq.max;
237 cap->max_recv_wr = qp->rq.max; 250 cap->max_send_sge = qp->sq.max_gs;
238 cap->max_send_sge = qp->sq.max_gs;
239 cap->max_recv_sge = qp->rq.max_gs;
240 cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) - 251 cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) -
241 sizeof (struct mlx4_wqe_inline_seg); 252 sizeof (struct mlx4_wqe_inline_seg);
242 253
243 return 0; 254 return 0;
244} 255}
245 256
257static int set_user_sq_size(struct mlx4_ib_qp *qp,
258 struct mlx4_ib_create_qp *ucmd)
259{
260 qp->sq.max = 1 << ucmd->log_sq_bb_count;
261 qp->sq.wqe_shift = ucmd->log_sq_stride;
262
263 qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) +
264 (qp->sq.max << qp->sq.wqe_shift);
265
266 return 0;
267}
268
246static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 269static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
247 struct ib_qp_init_attr *init_attr, 270 struct ib_qp_init_attr *init_attr,
248 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 271 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
249{ 272{
250 struct mlx4_wqe_ctrl_seg *ctrl;
251 int err; 273 int err;
252 int i;
253 274
254 mutex_init(&qp->mutex); 275 mutex_init(&qp->mutex);
255 spin_lock_init(&qp->sq.lock); 276 spin_lock_init(&qp->sq.lock);
@@ -264,7 +285,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
264 qp->sq.head = 0; 285 qp->sq.head = 0;
265 qp->sq.tail = 0; 286 qp->sq.tail = 0;
266 287
267 err = set_qp_size(dev, &init_attr->cap, init_attr->qp_type, qp); 288 err = set_rq_size(dev, &init_attr->cap, qp);
268 if (err) 289 if (err)
269 goto err; 290 goto err;
270 291
@@ -276,6 +297,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
276 goto err; 297 goto err;
277 } 298 }
278 299
300 err = set_user_sq_size(qp, &ucmd);
301 if (err)
302 goto err;
303
279 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 304 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
280 qp->buf_size, 0); 305 qp->buf_size, 0);
281 if (IS_ERR(qp->umem)) { 306 if (IS_ERR(qp->umem)) {
@@ -292,16 +317,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
292 if (err) 317 if (err)
293 goto err_mtt; 318 goto err_mtt;
294 319
295 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 320 if (!init_attr->srq) {
296 ucmd.db_addr, &qp->db); 321 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
297 if (err) 322 ucmd.db_addr, &qp->db);
298 goto err_mtt; 323 if (err)
324 goto err_mtt;
325 }
299 } else { 326 } else {
300 err = mlx4_ib_db_alloc(dev, &qp->db, 0); 327 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
301 if (err) 328 if (err)
302 goto err; 329 goto err;
303 330
304 *qp->db.db = 0; 331 if (!init_attr->srq) {
332 err = mlx4_ib_db_alloc(dev, &qp->db, 0);
333 if (err)
334 goto err;
335
336 *qp->db.db = 0;
337 }
305 338
306 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { 339 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
307 err = -ENOMEM; 340 err = -ENOMEM;
@@ -317,11 +350,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
317 if (err) 350 if (err)
318 goto err_mtt; 351 goto err_mtt;
319 352
320 for (i = 0; i < qp->sq.max; ++i) {
321 ctrl = get_send_wqe(qp, i);
322 ctrl->owner_opcode = cpu_to_be32(1 << 31);
323 }
324
325 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); 353 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
326 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); 354 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
327 355
@@ -355,7 +383,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
355 return 0; 383 return 0;
356 384
357err_wrid: 385err_wrid:
358 if (pd->uobject) 386 if (pd->uobject && !init_attr->srq)
359 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 387 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
360 else { 388 else {
361 kfree(qp->sq.wrid); 389 kfree(qp->sq.wrid);
@@ -372,7 +400,7 @@ err_buf:
372 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 400 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
373 401
374err_db: 402err_db:
375 if (!pd->uobject) 403 if (!pd->uobject && !init_attr->srq)
376 mlx4_ib_db_free(dev, &qp->db); 404 mlx4_ib_db_free(dev, &qp->db);
377 405
378err: 406err:
@@ -450,14 +478,16 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
450 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 478 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
451 479
452 if (is_user) { 480 if (is_user) {
453 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 481 if (!qp->ibqp.srq)
454 &qp->db); 482 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
483 &qp->db);
455 ib_umem_release(qp->umem); 484 ib_umem_release(qp->umem);
456 } else { 485 } else {
457 kfree(qp->sq.wrid); 486 kfree(qp->sq.wrid);
458 kfree(qp->rq.wrid); 487 kfree(qp->rq.wrid);
459 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 488 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
460 mlx4_ib_db_free(dev, &qp->db); 489 if (!qp->ibqp.srq)
490 mlx4_ib_db_free(dev, &qp->db);
461 } 491 }
462} 492}
463 493
@@ -573,7 +603,7 @@ static int to_mlx4_st(enum ib_qp_type type)
573 } 603 }
574} 604}
575 605
576static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *attr, 606static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
577 int attr_mask) 607 int attr_mask)
578{ 608{
579 u8 dest_rd_atomic; 609 u8 dest_rd_atomic;
@@ -603,7 +633,7 @@ static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *att
603 return cpu_to_be32(hw_access_flags); 633 return cpu_to_be32(hw_access_flags);
604} 634}
605 635
606static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, struct ib_qp_attr *attr, 636static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,
607 int attr_mask) 637 int attr_mask)
608{ 638{
609 if (attr_mask & IB_QP_PKEY_INDEX) 639 if (attr_mask & IB_QP_PKEY_INDEX)
@@ -619,7 +649,7 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
619 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 649 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
620} 650}
621 651
622static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah, 652static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
623 struct mlx4_qp_path *path, u8 port) 653 struct mlx4_qp_path *path, u8 port)
624{ 654{
625 path->grh_mylmc = ah->src_path_bits & 0x7f; 655 path->grh_mylmc = ah->src_path_bits & 0x7f;
@@ -655,14 +685,14 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah,
655 return 0; 685 return 0;
656} 686}
657 687
658int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 688static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
659 int attr_mask, struct ib_udata *udata) 689 const struct ib_qp_attr *attr, int attr_mask,
690 enum ib_qp_state cur_state, enum ib_qp_state new_state)
660{ 691{
661 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 692 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
662 struct mlx4_ib_qp *qp = to_mqp(ibqp); 693 struct mlx4_ib_qp *qp = to_mqp(ibqp);
663 struct mlx4_qp_context *context; 694 struct mlx4_qp_context *context;
664 enum mlx4_qp_optpar optpar = 0; 695 enum mlx4_qp_optpar optpar = 0;
665 enum ib_qp_state cur_state, new_state;
666 int sqd_event; 696 int sqd_event;
667 int err = -EINVAL; 697 int err = -EINVAL;
668 698
@@ -670,34 +700,6 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
670 if (!context) 700 if (!context)
671 return -ENOMEM; 701 return -ENOMEM;
672 702
673 mutex_lock(&qp->mutex);
674
675 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
676 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
677
678 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
679 goto out;
680
681 if ((attr_mask & IB_QP_PKEY_INDEX) &&
682 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
683 goto out;
684 }
685
686 if ((attr_mask & IB_QP_PORT) &&
687 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
688 goto out;
689 }
690
691 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
692 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
693 goto out;
694 }
695
696 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
697 attr->max_dest_rd_atomic > 1 << dev->dev->caps.max_qp_dest_rdma) {
698 goto out;
699 }
700
701 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | 703 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
702 (to_mlx4_st(ibqp->qp_type) << 16)); 704 (to_mlx4_st(ibqp->qp_type) << 16));
703 context->flags |= cpu_to_be32(1 << 8); /* DE? */ 705 context->flags |= cpu_to_be32(1 << 8); /* DE? */
@@ -849,7 +851,7 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
849 if (ibqp->srq) 851 if (ibqp->srq)
850 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); 852 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
851 853
852 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 854 if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
853 context->db_rec_addr = cpu_to_be64(qp->db.dma); 855 context->db_rec_addr = cpu_to_be64(qp->db.dma);
854 856
855 if (cur_state == IB_QPS_INIT && 857 if (cur_state == IB_QPS_INIT &&
@@ -869,6 +871,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
869 else 871 else
870 sqd_event = 0; 872 sqd_event = 0;
871 873
874 /*
875 * Before passing a kernel QP to the HW, make sure that the
876 * ownership bits of the send queue are set so that the
877 * hardware doesn't start processing stale work requests.
878 */
879 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
880 struct mlx4_wqe_ctrl_seg *ctrl;
881 int i;
882
883 for (i = 0; i < qp->sq.max; ++i) {
884 ctrl = get_send_wqe(qp, i);
885 ctrl->owner_opcode = cpu_to_be32(1 << 31);
886 }
887 }
888
872 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), 889 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
873 to_mlx4_state(new_state), context, optpar, 890 to_mlx4_state(new_state), context, optpar,
874 sqd_event, &qp->mqp); 891 sqd_event, &qp->mqp);
@@ -916,15 +933,89 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
916 qp->rq.tail = 0; 933 qp->rq.tail = 0;
917 qp->sq.head = 0; 934 qp->sq.head = 0;
918 qp->sq.tail = 0; 935 qp->sq.tail = 0;
919 *qp->db.db = 0; 936 if (!ibqp->srq)
937 *qp->db.db = 0;
920 } 938 }
921 939
922out: 940out:
923 mutex_unlock(&qp->mutex);
924 kfree(context); 941 kfree(context);
925 return err; 942 return err;
926} 943}
927 944
945static const struct ib_qp_attr mlx4_ib_qp_attr = { .port_num = 1 };
946static const int mlx4_ib_qp_attr_mask_table[IB_QPT_UD + 1] = {
947 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
948 IB_QP_PORT |
949 IB_QP_QKEY),
950 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
951 IB_QP_PORT |
952 IB_QP_ACCESS_FLAGS),
953 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
954 IB_QP_PORT |
955 IB_QP_ACCESS_FLAGS),
956 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
957 IB_QP_QKEY),
958 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
959 IB_QP_QKEY),
960};
961
962int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
963 int attr_mask, struct ib_udata *udata)
964{
965 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
966 struct mlx4_ib_qp *qp = to_mqp(ibqp);
967 enum ib_qp_state cur_state, new_state;
968 int err = -EINVAL;
969
970 mutex_lock(&qp->mutex);
971
972 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
973 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
974
975 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
976 goto out;
977
978 if ((attr_mask & IB_QP_PKEY_INDEX) &&
979 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
980 goto out;
981 }
982
983 if ((attr_mask & IB_QP_PORT) &&
984 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
985 goto out;
986 }
987
988 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
989 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
990 goto out;
991 }
992
993 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
994 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
995 goto out;
996 }
997
998 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
999 err = 0;
1000 goto out;
1001 }
1002
1003 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
1004 err = __mlx4_ib_modify_qp(ibqp, &mlx4_ib_qp_attr,
1005 mlx4_ib_qp_attr_mask_table[ibqp->qp_type],
1006 IB_QPS_RESET, IB_QPS_INIT);
1007 if (err)
1008 goto out;
1009 cur_state = IB_QPS_INIT;
1010 }
1011
1012 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1013
1014out:
1015 mutex_unlock(&qp->mutex);
1016 return err;
1017}
1018
928static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1019static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
929 void *wqe) 1020 void *wqe)
930{ 1021{
@@ -952,6 +1043,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
952 (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff; 1043 (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
953 sqp->ud_header.grh.flow_label = 1044 sqp->ud_header.grh.flow_label =
954 ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff); 1045 ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
1046 sqp->ud_header.grh.hop_limit = ah->av.hop_limit;
955 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24, 1047 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
956 ah->av.gid_index, &sqp->ud_header.grh.source_gid); 1048 ah->av.gid_index, &sqp->ud_header.grh.source_gid);
957 memcpy(sqp->ud_header.grh.destination_gid.raw, 1049 memcpy(sqp->ud_header.grh.destination_gid.raw,
@@ -1192,7 +1284,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1192 */ 1284 */
1193 wmb(); 1285 wmb();
1194 1286
1195 if (wr->opcode < 0 || wr->opcode > ARRAY_SIZE(mlx4_ib_opcode)) { 1287 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
1196 err = -EINVAL; 1288 err = -EINVAL;
1197 goto out; 1289 goto out;
1198 } 1290 }
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 42ab4a801d6a..12fac1c8989d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -297,6 +297,12 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
297 break; 297 break;
298 } 298 }
299 299
300 if (unlikely(srq->head == srq->tail)) {
301 err = -ENOMEM;
302 *bad_wr = wr;
303 break;
304 }
305
300 srq->wrid[srq->head] = wr->wr_id; 306 srq->wrid[srq->head] = wr->wr_id;
301 307
302 next = get_wqe(srq, srq->head); 308 next = get_wqe(srq, srq->head);
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index 5b8eddc9fa83..88c72d56368b 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -39,7 +39,7 @@
39 * Increment this value if any changes that break userspace ABI 39 * Increment this value if any changes that break userspace ABI
40 * compatibility are made. 40 * compatibility are made.
41 */ 41 */
42#define MLX4_IB_UVERBS_ABI_VERSION 1 42#define MLX4_IB_UVERBS_ABI_VERSION 2
43 43
44/* 44/*
45 * Make sure that all structs defined in this file remain laid out so 45 * Make sure that all structs defined in this file remain laid out so
@@ -87,6 +87,9 @@ struct mlx4_ib_create_srq_resp {
87struct mlx4_ib_create_qp { 87struct mlx4_ib_create_qp {
88 __u64 buf_addr; 88 __u64 buf_addr;
89 __u64 db_addr; 89 __u64 db_addr;
90 __u8 log_sq_bb_count;
91 __u8 log_sq_stride;
92 __u8 reserved[6];
90}; 93};
91 94
92#endif /* MLX4_IB_USER_H */ 95#endif /* MLX4_IB_USER_H */