aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c131
1 files changed, 89 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 3a91d9d8dc51..a16f0c8e6f3f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -302,15 +302,14 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
302} 302}
303 303
304static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 304static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
305 int is_user, int has_srq, struct mlx4_ib_qp *qp) 305 int is_user, int has_rq, struct mlx4_ib_qp *qp)
306{ 306{
307 /* Sanity check RQ size before proceeding */ 307 /* Sanity check RQ size before proceeding */
308 if (cap->max_recv_wr > dev->dev->caps.max_wqes || 308 if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
309 cap->max_recv_sge > dev->dev->caps.max_rq_sg) 309 cap->max_recv_sge > dev->dev->caps.max_rq_sg)
310 return -EINVAL; 310 return -EINVAL;
311 311
312 if (has_srq) { 312 if (!has_rq) {
313 /* QPs attached to an SRQ should have no RQ */
314 if (cap->max_recv_wr) 313 if (cap->max_recv_wr)
315 return -EINVAL; 314 return -EINVAL;
316 315
@@ -463,6 +462,14 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
463 return 0; 462 return 0;
464} 463}
465 464
465static int qp_has_rq(struct ib_qp_init_attr *attr)
466{
467 if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
468 return 0;
469
470 return !attr->srq;
471}
472
466static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 473static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
467 struct ib_qp_init_attr *init_attr, 474 struct ib_qp_init_attr *init_attr,
468 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 475 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
@@ -479,7 +486,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
479 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 486 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
480 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 487 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
481 488
482 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); 489 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
483 if (err) 490 if (err)
484 goto err; 491 goto err;
485 492
@@ -513,7 +520,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
513 if (err) 520 if (err)
514 goto err_mtt; 521 goto err_mtt;
515 522
516 if (!init_attr->srq) { 523 if (qp_has_rq(init_attr)) {
517 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 524 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
518 ucmd.db_addr, &qp->db); 525 ucmd.db_addr, &qp->db);
519 if (err) 526 if (err)
@@ -532,7 +539,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
532 if (err) 539 if (err)
533 goto err; 540 goto err;
534 541
535 if (!init_attr->srq) { 542 if (qp_has_rq(init_attr)) {
536 err = mlx4_db_alloc(dev->dev, &qp->db, 0); 543 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
537 if (err) 544 if (err)
538 goto err; 545 goto err;
@@ -575,6 +582,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
575 if (err) 582 if (err)
576 goto err_qpn; 583 goto err_qpn;
577 584
585 if (init_attr->qp_type == IB_QPT_XRC_TGT)
586 qp->mqp.qpn |= (1 << 23);
587
578 /* 588 /*
579 * Hardware wants QPN written in big-endian order (after 589 * Hardware wants QPN written in big-endian order (after
580 * shifting) for send doorbell. Precompute this value to save 590 * shifting) for send doorbell. Precompute this value to save
@@ -592,9 +602,8 @@ err_qpn:
592 602
593err_wrid: 603err_wrid:
594 if (pd->uobject) { 604 if (pd->uobject) {
595 if (!init_attr->srq) 605 if (qp_has_rq(init_attr))
596 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), 606 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
597 &qp->db);
598 } else { 607 } else {
599 kfree(qp->sq.wrid); 608 kfree(qp->sq.wrid);
600 kfree(qp->rq.wrid); 609 kfree(qp->rq.wrid);
@@ -610,7 +619,7 @@ err_buf:
610 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 619 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
611 620
612err_db: 621err_db:
613 if (!pd->uobject && !init_attr->srq) 622 if (!pd->uobject && qp_has_rq(init_attr))
614 mlx4_db_free(dev->dev, &qp->db); 623 mlx4_db_free(dev->dev, &qp->db);
615 624
616err: 625err:
@@ -671,6 +680,33 @@ static void del_gid_entries(struct mlx4_ib_qp *qp)
671 } 680 }
672} 681}
673 682
683static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
684{
685 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
686 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
687 else
688 return to_mpd(qp->ibqp.pd);
689}
690
691static void get_cqs(struct mlx4_ib_qp *qp,
692 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
693{
694 switch (qp->ibqp.qp_type) {
695 case IB_QPT_XRC_TGT:
696 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
697 *recv_cq = *send_cq;
698 break;
699 case IB_QPT_XRC_INI:
700 *send_cq = to_mcq(qp->ibqp.send_cq);
701 *recv_cq = *send_cq;
702 break;
703 default:
704 *send_cq = to_mcq(qp->ibqp.send_cq);
705 *recv_cq = to_mcq(qp->ibqp.recv_cq);
706 break;
707 }
708}
709
674static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 710static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
675 int is_user) 711 int is_user)
676{ 712{
@@ -682,8 +718,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
682 printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", 718 printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
683 qp->mqp.qpn); 719 qp->mqp.qpn);
684 720
685 send_cq = to_mcq(qp->ibqp.send_cq); 721 get_cqs(qp, &send_cq, &recv_cq);
686 recv_cq = to_mcq(qp->ibqp.recv_cq);
687 722
688 mlx4_ib_lock_cqs(send_cq, recv_cq); 723 mlx4_ib_lock_cqs(send_cq, recv_cq);
689 724
@@ -706,7 +741,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
706 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 741 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
707 742
708 if (is_user) { 743 if (is_user) {
709 if (!qp->ibqp.srq) 744 if (qp->rq.wqe_cnt)
710 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 745 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
711 &qp->db); 746 &qp->db);
712 ib_umem_release(qp->umem); 747 ib_umem_release(qp->umem);
@@ -714,7 +749,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
714 kfree(qp->sq.wrid); 749 kfree(qp->sq.wrid);
715 kfree(qp->rq.wrid); 750 kfree(qp->rq.wrid);
716 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 751 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
717 if (!qp->ibqp.srq) 752 if (qp->rq.wqe_cnt)
718 mlx4_db_free(dev->dev, &qp->db); 753 mlx4_db_free(dev->dev, &qp->db);
719 } 754 }
720 755
@@ -725,10 +760,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
725 struct ib_qp_init_attr *init_attr, 760 struct ib_qp_init_attr *init_attr,
726 struct ib_udata *udata) 761 struct ib_udata *udata)
727{ 762{
728 struct mlx4_ib_dev *dev = to_mdev(pd->device);
729 struct mlx4_ib_sqp *sqp; 763 struct mlx4_ib_sqp *sqp;
730 struct mlx4_ib_qp *qp; 764 struct mlx4_ib_qp *qp;
731 int err; 765 int err;
766 u16 xrcdn = 0;
732 767
733 /* 768 /*
734 * We only support LSO and multicast loopback blocking, and 769 * We only support LSO and multicast loopback blocking, and
@@ -739,10 +774,20 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
739 return ERR_PTR(-EINVAL); 774 return ERR_PTR(-EINVAL);
740 775
741 if (init_attr->create_flags && 776 if (init_attr->create_flags &&
742 (pd->uobject || init_attr->qp_type != IB_QPT_UD)) 777 (udata || init_attr->qp_type != IB_QPT_UD))
743 return ERR_PTR(-EINVAL); 778 return ERR_PTR(-EINVAL);
744 779
745 switch (init_attr->qp_type) { 780 switch (init_attr->qp_type) {
781 case IB_QPT_XRC_TGT:
782 pd = to_mxrcd(init_attr->xrcd)->pd;
783 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
784 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
785 /* fall through */
786 case IB_QPT_XRC_INI:
787 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
788 return ERR_PTR(-ENOSYS);
789 init_attr->recv_cq = init_attr->send_cq;
790 /* fall through */
746 case IB_QPT_RC: 791 case IB_QPT_RC:
747 case IB_QPT_UC: 792 case IB_QPT_UC:
748 case IB_QPT_UD: 793 case IB_QPT_UD:
@@ -751,13 +796,14 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
751 if (!qp) 796 if (!qp)
752 return ERR_PTR(-ENOMEM); 797 return ERR_PTR(-ENOMEM);
753 798
754 err = create_qp_common(dev, pd, init_attr, udata, 0, qp); 799 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, qp);
755 if (err) { 800 if (err) {
756 kfree(qp); 801 kfree(qp);
757 return ERR_PTR(err); 802 return ERR_PTR(err);
758 } 803 }
759 804
760 qp->ibqp.qp_num = qp->mqp.qpn; 805 qp->ibqp.qp_num = qp->mqp.qpn;
806 qp->xrcdn = xrcdn;
761 807
762 break; 808 break;
763 } 809 }
@@ -765,7 +811,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
765 case IB_QPT_GSI: 811 case IB_QPT_GSI:
766 { 812 {
767 /* Userspace is not allowed to create special QPs: */ 813 /* Userspace is not allowed to create special QPs: */
768 if (pd->uobject) 814 if (udata)
769 return ERR_PTR(-EINVAL); 815 return ERR_PTR(-EINVAL);
770 816
771 sqp = kzalloc(sizeof *sqp, GFP_KERNEL); 817 sqp = kzalloc(sizeof *sqp, GFP_KERNEL);
@@ -774,8 +820,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
774 820
775 qp = &sqp->qp; 821 qp = &sqp->qp;
776 822
777 err = create_qp_common(dev, pd, init_attr, udata, 823 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
778 dev->dev->caps.sqp_start + 824 to_mdev(pd->device)->dev->caps.sqp_start +
779 (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) + 825 (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +
780 init_attr->port_num - 1, 826 init_attr->port_num - 1,
781 qp); 827 qp);
@@ -801,11 +847,13 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
801{ 847{
802 struct mlx4_ib_dev *dev = to_mdev(qp->device); 848 struct mlx4_ib_dev *dev = to_mdev(qp->device);
803 struct mlx4_ib_qp *mqp = to_mqp(qp); 849 struct mlx4_ib_qp *mqp = to_mqp(qp);
850 struct mlx4_ib_pd *pd;
804 851
805 if (is_qp0(dev, mqp)) 852 if (is_qp0(dev, mqp))
806 mlx4_CLOSE_PORT(dev->dev, mqp->port); 853 mlx4_CLOSE_PORT(dev->dev, mqp->port);
807 854
808 destroy_qp_common(dev, mqp, !!qp->pd->uobject); 855 pd = get_pd(mqp);
856 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
809 857
810 if (is_sqp(dev, mqp)) 858 if (is_sqp(dev, mqp))
811 kfree(to_msqp(mqp)); 859 kfree(to_msqp(mqp));
@@ -821,6 +869,8 @@ static int to_mlx4_st(enum ib_qp_type type)
821 case IB_QPT_RC: return MLX4_QP_ST_RC; 869 case IB_QPT_RC: return MLX4_QP_ST_RC;
822 case IB_QPT_UC: return MLX4_QP_ST_UC; 870 case IB_QPT_UC: return MLX4_QP_ST_UC;
823 case IB_QPT_UD: return MLX4_QP_ST_UD; 871 case IB_QPT_UD: return MLX4_QP_ST_UD;
872 case IB_QPT_XRC_INI:
873 case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
824 case IB_QPT_SMI: 874 case IB_QPT_SMI:
825 case IB_QPT_GSI: return MLX4_QP_ST_MLX; 875 case IB_QPT_GSI: return MLX4_QP_ST_MLX;
826 default: return -1; 876 default: return -1;
@@ -959,6 +1009,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
959{ 1009{
960 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 1010 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
961 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1011 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1012 struct mlx4_ib_pd *pd;
1013 struct mlx4_ib_cq *send_cq, *recv_cq;
962 struct mlx4_qp_context *context; 1014 struct mlx4_qp_context *context;
963 enum mlx4_qp_optpar optpar = 0; 1015 enum mlx4_qp_optpar optpar = 0;
964 int sqd_event; 1016 int sqd_event;
@@ -1014,8 +1066,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1014 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; 1066 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
1015 context->sq_size_stride |= qp->sq.wqe_shift - 4; 1067 context->sq_size_stride |= qp->sq.wqe_shift - 4;
1016 1068
1017 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1069 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
1018 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; 1070 context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
1071 context->xrcd = cpu_to_be32((u32) qp->xrcdn);
1072 }
1019 1073
1020 if (qp->ibqp.uobject) 1074 if (qp->ibqp.uobject)
1021 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1075 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
@@ -1079,8 +1133,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1079 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; 1133 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
1080 } 1134 }
1081 1135
1082 context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); 1136 pd = get_pd(qp);
1083 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); 1137 get_cqs(qp, &send_cq, &recv_cq);
1138 context->pd = cpu_to_be32(pd->pdn);
1139 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
1140 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
1141 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
1084 1142
1085 /* Set "fast registration enabled" for all kernel QPs */ 1143 /* Set "fast registration enabled" for all kernel QPs */
1086 if (!qp->ibqp.uobject) 1144 if (!qp->ibqp.uobject)
@@ -1106,8 +1164,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1106 if (attr_mask & IB_QP_SQ_PSN) 1164 if (attr_mask & IB_QP_SQ_PSN)
1107 context->next_send_psn = cpu_to_be32(attr->sq_psn); 1165 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1108 1166
1109 context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn);
1110
1111 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1167 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1112 if (attr->max_dest_rd_atomic) 1168 if (attr->max_dest_rd_atomic)
1113 context->params2 |= 1169 context->params2 |=
@@ -1130,8 +1186,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1130 if (attr_mask & IB_QP_RQ_PSN) 1186 if (attr_mask & IB_QP_RQ_PSN)
1131 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 1187 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1132 1188
1133 context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn);
1134
1135 if (attr_mask & IB_QP_QKEY) { 1189 if (attr_mask & IB_QP_QKEY) {
1136 context->qkey = cpu_to_be32(attr->qkey); 1190 context->qkey = cpu_to_be32(attr->qkey);
1137 optpar |= MLX4_QP_OPTPAR_Q_KEY; 1191 optpar |= MLX4_QP_OPTPAR_Q_KEY;
@@ -1140,7 +1194,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1140 if (ibqp->srq) 1194 if (ibqp->srq)
1141 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); 1195 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1142 1196
1143 if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1197 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1144 context->db_rec_addr = cpu_to_be64(qp->db.dma); 1198 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1145 1199
1146 if (cur_state == IB_QPS_INIT && 1200 if (cur_state == IB_QPS_INIT &&
@@ -1225,17 +1279,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1225 * entries and reinitialize the QP. 1279 * entries and reinitialize the QP.
1226 */ 1280 */
1227 if (new_state == IB_QPS_RESET && !ibqp->uobject) { 1281 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1228 mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, 1282 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1229 ibqp->srq ? to_msrq(ibqp->srq): NULL); 1283 ibqp->srq ? to_msrq(ibqp->srq): NULL);
1230 if (ibqp->send_cq != ibqp->recv_cq) 1284 if (send_cq != recv_cq)
1231 mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); 1285 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1232 1286
1233 qp->rq.head = 0; 1287 qp->rq.head = 0;
1234 qp->rq.tail = 0; 1288 qp->rq.tail = 0;
1235 qp->sq.head = 0; 1289 qp->sq.head = 0;
1236 qp->sq.tail = 0; 1290 qp->sq.tail = 0;
1237 qp->sq_next_wqe = 0; 1291 qp->sq_next_wqe = 0;
1238 if (!ibqp->srq) 1292 if (qp->rq.wqe_cnt)
1239 *qp->db.db = 0; 1293 *qp->db.db = 0;
1240 } 1294 }
1241 1295
@@ -1547,14 +1601,13 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
1547} 1601}
1548 1602
1549static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1603static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1550 struct ib_send_wr *wr, __be16 *vlan) 1604 struct ib_send_wr *wr)
1551{ 1605{
1552 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); 1606 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
1553 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1607 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1554 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1608 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1555 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; 1609 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
1556 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); 1610 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
1557 *vlan = dseg->vlan;
1558} 1611}
1559 1612
1560static void set_mlx_icrc_seg(void *dseg) 1613static void set_mlx_icrc_seg(void *dseg)
@@ -1657,7 +1710,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1657 __be32 uninitialized_var(lso_hdr_sz); 1710 __be32 uninitialized_var(lso_hdr_sz);
1658 __be32 blh; 1711 __be32 blh;
1659 int i; 1712 int i;
1660 __be16 vlan = cpu_to_be16(0xffff);
1661 1713
1662 spin_lock_irqsave(&qp->sq.lock, flags); 1714 spin_lock_irqsave(&qp->sq.lock, flags);
1663 1715
@@ -1761,7 +1813,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1761 break; 1813 break;
1762 1814
1763 case IB_QPT_UD: 1815 case IB_QPT_UD:
1764 set_datagram_seg(wqe, wr, &vlan); 1816 set_datagram_seg(wqe, wr);
1765 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1817 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1766 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1818 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1767 1819
@@ -1824,11 +1876,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1824 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1876 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1825 MLX4_WQE_CTRL_FENCE : 0) | size; 1877 MLX4_WQE_CTRL_FENCE : 0) | size;
1826 1878
1827 if (be16_to_cpu(vlan) < 0x1000) {
1828 ctrl->ins_vlan = 1 << 6;
1829 ctrl->vlan_tag = vlan;
1830 }
1831
1832 /* 1879 /*
1833 * Make sure descriptor is fully written before 1880 * Make sure descriptor is fully written before
1834 * setting ownership bit (because HW can start 1881 * setting ownership bit (because HW can start