aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/user_mad.c41
-rw-r--r--drivers/infiniband/core/uverbs.h11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c90
-rw-r--r--drivers/infiniband/core/uverbs_main.c21
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c34
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
9 files changed, 170 insertions, 55 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e73f81c22381..eb7f52537ccc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -310,7 +310,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
310 u8 method; 310 u8 method;
311 __be64 *tid; 311 __be64 *tid;
312 int ret, length, hdr_len, copy_offset; 312 int ret, length, hdr_len, copy_offset;
313 int rmpp_active = 0; 313 int rmpp_active, has_rmpp_header;
314 314
315 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR) 315 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
316 return -EINVAL; 316 return -EINVAL;
@@ -360,28 +360,31 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
360 } 360 }
361 361
362 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 362 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
363 if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) { 363 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
364 /* RMPP active */ 364 hdr_len = IB_MGMT_SA_HDR;
365 if (!agent->rmpp_version) {
366 ret = -EINVAL;
367 goto err_ah;
368 }
369
370 /* Validate that the management class can support RMPP */
371 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
372 hdr_len = IB_MGMT_SA_HDR;
373 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
374 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
375 hdr_len = IB_MGMT_VENDOR_HDR;
376 } else {
377 ret = -EINVAL;
378 goto err_ah;
379 }
380 rmpp_active = 1;
381 copy_offset = IB_MGMT_RMPP_HDR; 365 copy_offset = IB_MGMT_RMPP_HDR;
366 has_rmpp_header = 1;
367 } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
368 rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
369 hdr_len = IB_MGMT_VENDOR_HDR;
370 copy_offset = IB_MGMT_RMPP_HDR;
371 has_rmpp_header = 1;
382 } else { 372 } else {
383 hdr_len = IB_MGMT_MAD_HDR; 373 hdr_len = IB_MGMT_MAD_HDR;
384 copy_offset = IB_MGMT_MAD_HDR; 374 copy_offset = IB_MGMT_MAD_HDR;
375 has_rmpp_header = 0;
376 }
377
378 if (has_rmpp_header)
379 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
380 IB_MGMT_RMPP_FLAG_ACTIVE;
381 else
382 rmpp_active = 0;
383
384 /* Validate that the management class can support RMPP */
385 if (rmpp_active && !agent->rmpp_version) {
386 ret = -EINVAL;
387 goto err_ah;
385 } 388 }
386 389
387 packet->msg = ib_create_send_mad(agent, 390 packet->msg = ib_create_send_mad(agent,
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index ecb830127865..7114e3fbab00 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -105,12 +105,23 @@ struct ib_uverbs_event {
105 u32 *counter; 105 u32 *counter;
106}; 106};
107 107
108struct ib_uverbs_mcast_entry {
109 struct list_head list;
110 union ib_gid gid;
111 u16 lid;
112};
113
108struct ib_uevent_object { 114struct ib_uevent_object {
109 struct ib_uobject uobject; 115 struct ib_uobject uobject;
110 struct list_head event_list; 116 struct list_head event_list;
111 u32 events_reported; 117 u32 events_reported;
112}; 118};
113 119
120struct ib_uqp_object {
121 struct ib_uevent_object uevent;
122 struct list_head mcast_list;
123};
124
114struct ib_ucq_object { 125struct ib_ucq_object {
115 struct ib_uobject uobject; 126 struct ib_uobject uobject;
116 struct ib_uverbs_file *uverbs_file; 127 struct ib_uverbs_file *uverbs_file;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index ed45da892b1c..a57d021d435a 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -815,7 +815,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
815 struct ib_uverbs_create_qp cmd; 815 struct ib_uverbs_create_qp cmd;
816 struct ib_uverbs_create_qp_resp resp; 816 struct ib_uverbs_create_qp_resp resp;
817 struct ib_udata udata; 817 struct ib_udata udata;
818 struct ib_uevent_object *uobj; 818 struct ib_uqp_object *uobj;
819 struct ib_pd *pd; 819 struct ib_pd *pd;
820 struct ib_cq *scq, *rcq; 820 struct ib_cq *scq, *rcq;
821 struct ib_srq *srq; 821 struct ib_srq *srq;
@@ -866,10 +866,11 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
866 attr.cap.max_recv_sge = cmd.max_recv_sge; 866 attr.cap.max_recv_sge = cmd.max_recv_sge;
867 attr.cap.max_inline_data = cmd.max_inline_data; 867 attr.cap.max_inline_data = cmd.max_inline_data;
868 868
869 uobj->uobject.user_handle = cmd.user_handle; 869 uobj->uevent.uobject.user_handle = cmd.user_handle;
870 uobj->uobject.context = file->ucontext; 870 uobj->uevent.uobject.context = file->ucontext;
871 uobj->events_reported = 0; 871 uobj->uevent.events_reported = 0;
872 INIT_LIST_HEAD(&uobj->event_list); 872 INIT_LIST_HEAD(&uobj->uevent.event_list);
873 INIT_LIST_HEAD(&uobj->mcast_list);
873 874
874 qp = pd->device->create_qp(pd, &attr, &udata); 875 qp = pd->device->create_qp(pd, &attr, &udata);
875 if (IS_ERR(qp)) { 876 if (IS_ERR(qp)) {
@@ -882,7 +883,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
882 qp->send_cq = attr.send_cq; 883 qp->send_cq = attr.send_cq;
883 qp->recv_cq = attr.recv_cq; 884 qp->recv_cq = attr.recv_cq;
884 qp->srq = attr.srq; 885 qp->srq = attr.srq;
885 qp->uobject = &uobj->uobject; 886 qp->uobject = &uobj->uevent.uobject;
886 qp->event_handler = attr.event_handler; 887 qp->event_handler = attr.event_handler;
887 qp->qp_context = attr.qp_context; 888 qp->qp_context = attr.qp_context;
888 qp->qp_type = attr.qp_type; 889 qp->qp_type = attr.qp_type;
@@ -901,14 +902,14 @@ retry:
901 goto err_destroy; 902 goto err_destroy;
902 } 903 }
903 904
904 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 905 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id);
905 906
906 if (ret == -EAGAIN) 907 if (ret == -EAGAIN)
907 goto retry; 908 goto retry;
908 if (ret) 909 if (ret)
909 goto err_destroy; 910 goto err_destroy;
910 911
911 resp.qp_handle = uobj->uobject.id; 912 resp.qp_handle = uobj->uevent.uobject.id;
912 resp.max_recv_sge = attr.cap.max_recv_sge; 913 resp.max_recv_sge = attr.cap.max_recv_sge;
913 resp.max_send_sge = attr.cap.max_send_sge; 914 resp.max_send_sge = attr.cap.max_send_sge;
914 resp.max_recv_wr = attr.cap.max_recv_wr; 915 resp.max_recv_wr = attr.cap.max_recv_wr;
@@ -922,7 +923,7 @@ retry:
922 } 923 }
923 924
924 down(&file->mutex); 925 down(&file->mutex);
925 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 926 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list);
926 up(&file->mutex); 927 up(&file->mutex);
927 928
928 up(&ib_uverbs_idr_mutex); 929 up(&ib_uverbs_idr_mutex);
@@ -930,7 +931,7 @@ retry:
930 return in_len; 931 return in_len;
931 932
932err_idr: 933err_idr:
933 idr_remove(&ib_uverbs_qp_idr, uobj->uobject.id); 934 idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id);
934 935
935err_destroy: 936err_destroy:
936 ib_destroy_qp(qp); 937 ib_destroy_qp(qp);
@@ -1032,7 +1033,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1032 struct ib_uverbs_destroy_qp cmd; 1033 struct ib_uverbs_destroy_qp cmd;
1033 struct ib_uverbs_destroy_qp_resp resp; 1034 struct ib_uverbs_destroy_qp_resp resp;
1034 struct ib_qp *qp; 1035 struct ib_qp *qp;
1035 struct ib_uevent_object *uobj; 1036 struct ib_uqp_object *uobj;
1036 int ret = -EINVAL; 1037 int ret = -EINVAL;
1037 1038
1038 if (copy_from_user(&cmd, buf, sizeof cmd)) 1039 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1046,7 +1047,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1046 if (!qp || qp->uobject->context != file->ucontext) 1047 if (!qp || qp->uobject->context != file->ucontext)
1047 goto out; 1048 goto out;
1048 1049
1049 uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 1050 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1051
1052 if (!list_empty(&uobj->mcast_list)) {
1053 ret = -EBUSY;
1054 goto out;
1055 }
1050 1056
1051 ret = ib_destroy_qp(qp); 1057 ret = ib_destroy_qp(qp);
1052 if (ret) 1058 if (ret)
@@ -1055,12 +1061,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1055 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1061 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
1056 1062
1057 down(&file->mutex); 1063 down(&file->mutex);
1058 list_del(&uobj->uobject.list); 1064 list_del(&uobj->uevent.uobject.list);
1059 up(&file->mutex); 1065 up(&file->mutex);
1060 1066
1061 ib_uverbs_release_uevent(file, uobj); 1067 ib_uverbs_release_uevent(file, &uobj->uevent);
1062 1068
1063 resp.events_reported = uobj->events_reported; 1069 resp.events_reported = uobj->uevent.events_reported;
1064 1070
1065 kfree(uobj); 1071 kfree(uobj);
1066 1072
@@ -1542,6 +1548,8 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1542{ 1548{
1543 struct ib_uverbs_attach_mcast cmd; 1549 struct ib_uverbs_attach_mcast cmd;
1544 struct ib_qp *qp; 1550 struct ib_qp *qp;
1551 struct ib_uqp_object *uobj;
1552 struct ib_uverbs_mcast_entry *mcast;
1545 int ret = -EINVAL; 1553 int ret = -EINVAL;
1546 1554
1547 if (copy_from_user(&cmd, buf, sizeof cmd)) 1555 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1550,9 +1558,36 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1550 down(&ib_uverbs_idr_mutex); 1558 down(&ib_uverbs_idr_mutex);
1551 1559
1552 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1560 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1553 if (qp && qp->uobject->context == file->ucontext) 1561 if (!qp || qp->uobject->context != file->ucontext)
1554 ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1562 goto out;
1563
1564 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1565
1566 list_for_each_entry(mcast, &uobj->mcast_list, list)
1567 if (cmd.mlid == mcast->lid &&
1568 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1569 ret = 0;
1570 goto out;
1571 }
1555 1572
1573 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1574 if (!mcast) {
1575 ret = -ENOMEM;
1576 goto out;
1577 }
1578
1579 mcast->lid = cmd.mlid;
1580 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1581
1582 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1583 if (!ret) {
1584 uobj = container_of(qp->uobject, struct ib_uqp_object,
1585 uevent.uobject);
1586 list_add_tail(&mcast->list, &uobj->mcast_list);
1587 } else
1588 kfree(mcast);
1589
1590out:
1556 up(&ib_uverbs_idr_mutex); 1591 up(&ib_uverbs_idr_mutex);
1557 1592
1558 return ret ? ret : in_len; 1593 return ret ? ret : in_len;
@@ -1563,7 +1598,9 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1563 int out_len) 1598 int out_len)
1564{ 1599{
1565 struct ib_uverbs_detach_mcast cmd; 1600 struct ib_uverbs_detach_mcast cmd;
1601 struct ib_uqp_object *uobj;
1566 struct ib_qp *qp; 1602 struct ib_qp *qp;
1603 struct ib_uverbs_mcast_entry *mcast;
1567 int ret = -EINVAL; 1604 int ret = -EINVAL;
1568 1605
1569 if (copy_from_user(&cmd, buf, sizeof cmd)) 1606 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1572,9 +1609,24 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1572 down(&ib_uverbs_idr_mutex); 1609 down(&ib_uverbs_idr_mutex);
1573 1610
1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1611 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1575 if (qp && qp->uobject->context == file->ucontext) 1612 if (!qp || qp->uobject->context != file->ucontext)
1576 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1613 goto out;
1614
1615 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1616 if (ret)
1617 goto out;
1577 1618
1619 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1620
1621 list_for_each_entry(mcast, &uobj->mcast_list, list)
1622 if (cmd.mlid == mcast->lid &&
1623 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1624 list_del(&mcast->list);
1625 kfree(mcast);
1626 break;
1627 }
1628
1629out:
1578 up(&ib_uverbs_idr_mutex); 1630 up(&ib_uverbs_idr_mutex);
1579 1631
1580 return ret ? ret : in_len; 1632 return ret ? ret : in_len;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index de6581d7cb8d..81737bd6faea 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -160,6 +160,18 @@ void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
160 spin_unlock_irq(&file->async_file->lock); 160 spin_unlock_irq(&file->async_file->lock);
161} 161}
162 162
163static void ib_uverbs_detach_umcast(struct ib_qp *qp,
164 struct ib_uqp_object *uobj)
165{
166 struct ib_uverbs_mcast_entry *mcast, *tmp;
167
168 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
169 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
170 list_del(&mcast->list);
171 kfree(mcast);
172 }
173}
174
163static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, 175static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
164 struct ib_ucontext *context) 176 struct ib_ucontext *context)
165{ 177{
@@ -180,13 +192,14 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
180 192
181 list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { 193 list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
182 struct ib_qp *qp = idr_find(&ib_uverbs_qp_idr, uobj->id); 194 struct ib_qp *qp = idr_find(&ib_uverbs_qp_idr, uobj->id);
183 struct ib_uevent_object *uevent = 195 struct ib_uqp_object *uqp =
184 container_of(uobj, struct ib_uevent_object, uobject); 196 container_of(uobj, struct ib_uqp_object, uevent.uobject);
185 idr_remove(&ib_uverbs_qp_idr, uobj->id); 197 idr_remove(&ib_uverbs_qp_idr, uobj->id);
198 ib_uverbs_detach_umcast(qp, uqp);
186 ib_destroy_qp(qp); 199 ib_destroy_qp(qp);
187 list_del(&uobj->list); 200 list_del(&uobj->list);
188 ib_uverbs_release_uevent(file, uevent); 201 ib_uverbs_release_uevent(file, &uqp->uevent);
189 kfree(uevent); 202 kfree(uqp);
190 } 203 }
191 204
192 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { 205 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index dd4e13303e96..7450550db736 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -871,7 +871,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
871 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 871 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
872 872
873 mthca_wq_init(&qp->sq); 873 mthca_wq_init(&qp->sq);
874 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
875
874 mthca_wq_init(&qp->rq); 876 mthca_wq_init(&qp->rq);
877 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
875 878
876 if (mthca_is_memfree(dev)) { 879 if (mthca_is_memfree(dev)) {
877 *qp->sq.db = 0; 880 *qp->sq.db = 0;
@@ -1819,6 +1822,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1819{ 1822{
1820 struct mthca_dev *dev = to_mdev(ibqp->device); 1823 struct mthca_dev *dev = to_mdev(ibqp->device);
1821 struct mthca_qp *qp = to_mqp(ibqp); 1824 struct mthca_qp *qp = to_mqp(ibqp);
1825 __be32 doorbell[2];
1822 void *wqe; 1826 void *wqe;
1823 void *prev_wqe; 1827 void *prev_wqe;
1824 unsigned long flags; 1828 unsigned long flags;
@@ -1838,6 +1842,34 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1838 ind = qp->sq.head & (qp->sq.max - 1); 1842 ind = qp->sq.head & (qp->sq.max - 1);
1839 1843
1840 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1844 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1845 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1846 nreq = 0;
1847
1848 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1849 ((qp->sq.head & 0xffff) << 8) |
1850 f0 | op0);
1851 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1852
1853 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1854 size0 = 0;
1855
1856 /*
1857 * Make sure that descriptors are written before
1858 * doorbell record.
1859 */
1860 wmb();
1861 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1862
1863 /*
1864 * Make sure doorbell record is written before we
1865 * write MMIO send doorbell.
1866 */
1867 wmb();
1868 mthca_write64(doorbell,
1869 dev->kar + MTHCA_SEND_DOORBELL,
1870 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1871 }
1872
1841 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1873 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1842 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1874 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1843 " %d max, %d nreq)\n", qp->qpn, 1875 " %d max, %d nreq)\n", qp->qpn,
@@ -2014,8 +2046,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2014 2046
2015out: 2047out:
2016 if (likely(nreq)) { 2048 if (likely(nreq)) {
2017 __be32 doorbell[2];
2018
2019 doorbell[0] = cpu_to_be32((nreq << 24) | 2049 doorbell[0] = cpu_to_be32((nreq << 24) |
2020 ((qp->sq.head & 0xffff) << 8) | 2050 ((qp->sq.head & 0xffff) << 8) |
2021 f0 | op0); 2051 f0 | op0);
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index 73f1c0b9021e..e7d2c1e86199 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -50,7 +50,8 @@ enum {
50 50
51enum { 51enum {
52 MTHCA_INVAL_LKEY = 0x100, 52 MTHCA_INVAL_LKEY = 0x100,
53 MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256 53 MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256,
54 MTHCA_ARBEL_MAX_WQES_PER_SEND_DB = 255
54}; 55};
55 56
56struct mthca_next_seg { 57struct mthca_next_seg {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 54ef2fea530f..23885801b6d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -608,9 +608,13 @@ void ipoib_ib_dev_flush(void *_dev)
608 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 608 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
609 ipoib_ib_dev_up(dev); 609 ipoib_ib_dev_up(dev);
610 610
611 down(&priv->vlan_mutex);
612
611 /* Flush any child interfaces too */ 613 /* Flush any child interfaces too */
612 list_for_each_entry(cpriv, &priv->child_intfs, list) 614 list_for_each_entry(cpriv, &priv->child_intfs, list)
613 ipoib_ib_dev_flush(&cpriv->dev); 615 ipoib_ib_dev_flush(&cpriv->dev);
616
617 up(&priv->vlan_mutex);
614} 618}
615 619
616void ipoib_ib_dev_cleanup(struct net_device *dev) 620void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2fa30751f362..475d98fa9e26 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -94,8 +94,10 @@ int ipoib_open(struct net_device *dev)
94 if (ipoib_ib_dev_open(dev)) 94 if (ipoib_ib_dev_open(dev))
95 return -EINVAL; 95 return -EINVAL;
96 96
97 if (ipoib_ib_dev_up(dev)) 97 if (ipoib_ib_dev_up(dev)) {
98 ipoib_ib_dev_stop(dev);
98 return -EINVAL; 99 return -EINVAL;
100 }
99 101
100 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 102 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
101 struct ipoib_dev_priv *cpriv; 103 struct ipoib_dev_priv *cpriv;
@@ -398,9 +400,9 @@ static void path_rec_completion(int status,
398 while ((skb = __skb_dequeue(&neigh->queue))) 400 while ((skb = __skb_dequeue(&neigh->queue)))
399 __skb_queue_tail(&skqueue, skb); 401 __skb_queue_tail(&skqueue, skb);
400 } 402 }
401 } else 403 }
402 path->query = NULL;
403 404
405 path->query = NULL;
404 complete(&path->done); 406 complete(&path->done);
405 407
406 spin_unlock_irqrestore(&priv->lock, flags); 408 spin_unlock_irqrestore(&priv->lock, flags);
@@ -428,7 +430,6 @@ static struct ipoib_path *path_rec_create(struct net_device *dev,
428 skb_queue_head_init(&path->queue); 430 skb_queue_head_init(&path->queue);
429 431
430 INIT_LIST_HEAD(&path->neigh_list); 432 INIT_LIST_HEAD(&path->neigh_list);
431 init_completion(&path->done);
432 433
433 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); 434 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
434 path->pathrec.sgid = priv->local_gid; 435 path->pathrec.sgid = priv->local_gid;
@@ -446,6 +447,8 @@ static int path_rec_start(struct net_device *dev,
446 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n", 447 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
447 IPOIB_GID_ARG(path->pathrec.dgid)); 448 IPOIB_GID_ARG(path->pathrec.dgid));
448 449
450 init_completion(&path->done);
451
449 path->query_id = 452 path->query_id =
450 ib_sa_path_rec_get(priv->ca, priv->port, 453 ib_sa_path_rec_get(priv->ca, priv->port,
451 &path->pathrec, 454 &path->pathrec,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index c33ed87f9dff..ef3ee035bbc8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,20 +135,14 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
135 if (!mcast) 135 if (!mcast)
136 return NULL; 136 return NULL;
137 137
138 init_completion(&mcast->done);
139
140 mcast->dev = dev; 138 mcast->dev = dev;
141 mcast->created = jiffies; 139 mcast->created = jiffies;
142 mcast->backoff = 1; 140 mcast->backoff = 1;
143 mcast->logcount = 0;
144 141
145 INIT_LIST_HEAD(&mcast->list); 142 INIT_LIST_HEAD(&mcast->list);
146 INIT_LIST_HEAD(&mcast->neigh_list); 143 INIT_LIST_HEAD(&mcast->neigh_list);
147 skb_queue_head_init(&mcast->pkt_queue); 144 skb_queue_head_init(&mcast->pkt_queue);
148 145
149 mcast->ah = NULL;
150 mcast->query = NULL;
151
152 return mcast; 146 return mcast;
153} 147}
154 148
@@ -350,6 +344,8 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
350 rec.port_gid = priv->local_gid; 344 rec.port_gid = priv->local_gid;
351 rec.pkey = cpu_to_be16(priv->pkey); 345 rec.pkey = cpu_to_be16(priv->pkey);
352 346
347 init_completion(&mcast->done);
348
353 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 349 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
354 IB_SA_MCMEMBER_REC_MGID | 350 IB_SA_MCMEMBER_REC_MGID |
355 IB_SA_MCMEMBER_REC_PORT_GID | 351 IB_SA_MCMEMBER_REC_PORT_GID |
@@ -469,6 +465,8 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
469 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 465 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
470 } 466 }
471 467
468 init_completion(&mcast->done);
469
472 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, 470 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
473 mcast->backoff * 1000, GFP_ATOMIC, 471 mcast->backoff * 1000, GFP_ATOMIC,
474 ipoib_mcast_join_complete, 472 ipoib_mcast_join_complete,