aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-10-02 18:29:11 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-02 18:29:11 -0400
commit829d464e60151a525c7ba57e7acfc4fc297f7069 (patch)
tree6307966acabb65f332213cb886fddd4f0821c164 /drivers
parentb65d04a7859f4cfea1a40de260cb52d8f7897aff (diff)
parente5a010690141ab805b059ba10f7401b80e0be831 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/ehca: Tweak trace message format IB/ehca: Fix device registration IB/ipath: Fix RDMA reads RDMA/cma: Optimize error handling RDMA/cma: Eliminate unnecessary remove_list RDMA/cma: Set status correctly on route resolution error RDMA/cma: Fix device removal race RDMA/cma: Fix leak of cm_ids in case of failures
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/cma.c47
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c59
4 files changed, 80 insertions, 64 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1178bd434d1b..9ae4f3a67c70 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -874,23 +874,25 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
874 __u16 port; 874 __u16 port;
875 u8 ip_ver; 875 u8 ip_ver;
876 876
877 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
878 &ip_ver, &port, &src, &dst))
879 goto err;
880
877 id = rdma_create_id(listen_id->event_handler, listen_id->context, 881 id = rdma_create_id(listen_id->event_handler, listen_id->context,
878 listen_id->ps); 882 listen_id->ps);
879 if (IS_ERR(id)) 883 if (IS_ERR(id))
880 return NULL; 884 goto err;
885
886 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
887 ip_ver, port, src, dst);
881 888
882 rt = &id->route; 889 rt = &id->route;
883 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 890 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
884 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, GFP_KERNEL); 891 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
892 GFP_KERNEL);
885 if (!rt->path_rec) 893 if (!rt->path_rec)
886 goto err; 894 goto destroy_id;
887 895
888 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
889 &ip_ver, &port, &src, &dst))
890 goto err;
891
892 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
893 ip_ver, port, src, dst);
894 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 896 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
895 if (rt->num_paths == 2) 897 if (rt->num_paths == 2)
896 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 898 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
@@ -903,8 +905,10 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
903 id_priv = container_of(id, struct rdma_id_private, id); 905 id_priv = container_of(id, struct rdma_id_private, id);
904 id_priv->state = CMA_CONNECT; 906 id_priv->state = CMA_CONNECT;
905 return id_priv; 907 return id_priv;
906err: 908
909destroy_id:
907 rdma_destroy_id(id); 910 rdma_destroy_id(id);
911err:
908 return NULL; 912 return NULL;
909} 913}
910 914
@@ -932,6 +936,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
932 mutex_unlock(&lock); 936 mutex_unlock(&lock);
933 if (ret) { 937 if (ret) {
934 ret = -ENODEV; 938 ret = -ENODEV;
939 cma_exch(conn_id, CMA_DESTROYING);
935 cma_release_remove(conn_id); 940 cma_release_remove(conn_id);
936 rdma_destroy_id(&conn_id->id); 941 rdma_destroy_id(&conn_id->id);
937 goto out; 942 goto out;
@@ -1307,6 +1312,7 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1307 work->old_state = CMA_ROUTE_QUERY; 1312 work->old_state = CMA_ROUTE_QUERY;
1308 work->new_state = CMA_ADDR_RESOLVED; 1313 work->new_state = CMA_ADDR_RESOLVED;
1309 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1314 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1315 work->event.status = status;
1310 } 1316 }
1311 1317
1312 queue_work(cma_wq, &work->work); 1318 queue_work(cma_wq, &work->work);
@@ -1862,6 +1868,11 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
1862 1868
1863 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 1869 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
1864out: 1870out:
1871 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
1872 ib_destroy_cm_id(id_priv->cm_id.ib);
1873 id_priv->cm_id.ib = NULL;
1874 }
1875
1865 kfree(private_data); 1876 kfree(private_data);
1866 return ret; 1877 return ret;
1867} 1878}
@@ -1889,10 +1900,8 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
1889 cm_id->remote_addr = *sin; 1900 cm_id->remote_addr = *sin;
1890 1901
1891 ret = cma_modify_qp_rtr(&id_priv->id); 1902 ret = cma_modify_qp_rtr(&id_priv->id);
1892 if (ret) { 1903 if (ret)
1893 iw_destroy_cm_id(cm_id); 1904 goto out;
1894 return ret;
1895 }
1896 1905
1897 iw_param.ord = conn_param->initiator_depth; 1906 iw_param.ord = conn_param->initiator_depth;
1898 iw_param.ird = conn_param->responder_resources; 1907 iw_param.ird = conn_param->responder_resources;
@@ -1904,6 +1913,10 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
1904 iw_param.qpn = conn_param->qp_num; 1913 iw_param.qpn = conn_param->qp_num;
1905 ret = iw_cm_connect(cm_id, &iw_param); 1914 ret = iw_cm_connect(cm_id, &iw_param);
1906out: 1915out:
1916 if (ret && !IS_ERR(cm_id)) {
1917 iw_destroy_cm_id(cm_id);
1918 id_priv->cm_id.iw = NULL;
1919 }
1907 return ret; 1920 return ret;
1908} 1921}
1909 1922
@@ -2142,12 +2155,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2142 2155
2143static void cma_process_remove(struct cma_device *cma_dev) 2156static void cma_process_remove(struct cma_device *cma_dev)
2144{ 2157{
2145 struct list_head remove_list;
2146 struct rdma_id_private *id_priv; 2158 struct rdma_id_private *id_priv;
2147 int ret; 2159 int ret;
2148 2160
2149 INIT_LIST_HEAD(&remove_list);
2150
2151 mutex_lock(&lock); 2161 mutex_lock(&lock);
2152 while (!list_empty(&cma_dev->id_list)) { 2162 while (!list_empty(&cma_dev->id_list)) {
2153 id_priv = list_entry(cma_dev->id_list.next, 2163 id_priv = list_entry(cma_dev->id_list.next,
@@ -2158,8 +2168,7 @@ static void cma_process_remove(struct cma_device *cma_dev)
2158 continue; 2168 continue;
2159 } 2169 }
2160 2170
2161 list_del(&id_priv->list); 2171 list_del_init(&id_priv->list);
2162 list_add_tail(&id_priv->list, &remove_list);
2163 atomic_inc(&id_priv->refcount); 2172 atomic_inc(&id_priv->refcount);
2164 mutex_unlock(&lock); 2173 mutex_unlock(&lock);
2165 2174
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 2380994418a5..024d511c4b58 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -49,7 +49,7 @@
49MODULE_LICENSE("Dual BSD/GPL"); 49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 50MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
51MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 51MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
52MODULE_VERSION("SVNEHCA_0016"); 52MODULE_VERSION("SVNEHCA_0017");
53 53
54int ehca_open_aqp1 = 0; 54int ehca_open_aqp1 = 0;
55int ehca_debug_level = 0; 55int ehca_debug_level = 0;
@@ -239,7 +239,7 @@ init_node_guid1:
239 return ret; 239 return ret;
240} 240}
241 241
242int ehca_register_device(struct ehca_shca *shca) 242int ehca_init_device(struct ehca_shca *shca)
243{ 243{
244 int ret; 244 int ret;
245 245
@@ -317,11 +317,6 @@ int ehca_register_device(struct ehca_shca *shca)
317 /* shca->ib_device.process_mad = ehca_process_mad; */ 317 /* shca->ib_device.process_mad = ehca_process_mad; */
318 shca->ib_device.mmap = ehca_mmap; 318 shca->ib_device.mmap = ehca_mmap;
319 319
320 ret = ib_register_device(&shca->ib_device);
321 if (ret)
322 ehca_err(&shca->ib_device,
323 "ib_register_device() failed ret=%x", ret);
324
325 return ret; 320 return ret;
326} 321}
327 322
@@ -561,9 +556,9 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
561 goto probe1; 556 goto probe1;
562 } 557 }
563 558
564 ret = ehca_register_device(shca); 559 ret = ehca_init_device(shca);
565 if (ret) { 560 if (ret) {
566 ehca_gen_err("Cannot register Infiniband device"); 561 ehca_gen_err("Cannot init ehca device struct");
567 goto probe1; 562 goto probe1;
568 } 563 }
569 564
@@ -571,7 +566,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
571 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048); 566 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
572 if (ret) { 567 if (ret) {
573 ehca_err(&shca->ib_device, "Cannot create EQ."); 568 ehca_err(&shca->ib_device, "Cannot create EQ.");
574 goto probe2; 569 goto probe1;
575 } 570 }
576 571
577 ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513); 572 ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
@@ -600,6 +595,13 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
600 goto probe5; 595 goto probe5;
601 } 596 }
602 597
598 ret = ib_register_device(&shca->ib_device);
599 if (ret) {
600 ehca_err(&shca->ib_device,
601 "ib_register_device() failed ret=%x", ret);
602 goto probe6;
603 }
604
603 /* create AQP1 for port 1 */ 605 /* create AQP1 for port 1 */
604 if (ehca_open_aqp1 == 1) { 606 if (ehca_open_aqp1 == 1) {
605 shca->sport[0].port_state = IB_PORT_DOWN; 607 shca->sport[0].port_state = IB_PORT_DOWN;
@@ -607,7 +609,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
607 if (ret) { 609 if (ret) {
608 ehca_err(&shca->ib_device, 610 ehca_err(&shca->ib_device,
609 "Cannot create AQP1 for port 1."); 611 "Cannot create AQP1 for port 1.");
610 goto probe6; 612 goto probe7;
611 } 613 }
612 } 614 }
613 615
@@ -618,7 +620,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
618 if (ret) { 620 if (ret) {
619 ehca_err(&shca->ib_device, 621 ehca_err(&shca->ib_device,
620 "Cannot create AQP1 for port 2."); 622 "Cannot create AQP1 for port 2.");
621 goto probe7; 623 goto probe8;
622 } 624 }
623 } 625 }
624 626
@@ -630,12 +632,15 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
630 632
631 return 0; 633 return 0;
632 634
633probe7: 635probe8:
634 ret = ehca_destroy_aqp1(&shca->sport[0]); 636 ret = ehca_destroy_aqp1(&shca->sport[0]);
635 if (ret) 637 if (ret)
636 ehca_err(&shca->ib_device, 638 ehca_err(&shca->ib_device,
637 "Cannot destroy AQP1 for port 1. ret=%x", ret); 639 "Cannot destroy AQP1 for port 1. ret=%x", ret);
638 640
641probe7:
642 ib_unregister_device(&shca->ib_device);
643
639probe6: 644probe6:
640 ret = ehca_dereg_internal_maxmr(shca); 645 ret = ehca_dereg_internal_maxmr(shca);
641 if (ret) 646 if (ret)
@@ -660,9 +665,6 @@ probe3:
660 ehca_err(&shca->ib_device, 665 ehca_err(&shca->ib_device,
661 "Cannot destroy EQ. ret=%x", ret); 666 "Cannot destroy EQ. ret=%x", ret);
662 667
663probe2:
664 ib_unregister_device(&shca->ib_device);
665
666probe1: 668probe1:
667 ib_dealloc_device(&shca->ib_device); 669 ib_dealloc_device(&shca->ib_device);
668 670
@@ -750,7 +752,7 @@ int __init ehca_module_init(void)
750 int ret; 752 int ret;
751 753
752 printk(KERN_INFO "eHCA Infiniband Device Driver " 754 printk(KERN_INFO "eHCA Infiniband Device Driver "
753 "(Rel.: SVNEHCA_0016)\n"); 755 "(Rel.: SVNEHCA_0017)\n");
754 idr_init(&ehca_qp_idr); 756 idr_init(&ehca_qp_idr);
755 idr_init(&ehca_cq_idr); 757 idr_init(&ehca_cq_idr);
756 spin_lock_init(&ehca_qp_idr_lock); 758 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 9f56bb846d93..809da3ef706b 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -117,7 +117,7 @@ extern int ehca_debug_level;
117 unsigned int l = (unsigned int)(len); \ 117 unsigned int l = (unsigned int)(len); \
118 unsigned char *deb = (unsigned char*)(adr); \ 118 unsigned char *deb = (unsigned char*)(adr); \
119 for (x = 0; x < l; x += 16) { \ 119 for (x = 0; x < l; x += 16) { \
120 printk("EHCA_DMP:%s" format \ 120 printk("EHCA_DMP:%s " format \
121 " adr=%p ofs=%04x %016lx %016lx\n", \ 121 " adr=%p ofs=%04x %016lx %016lx\n", \
122 __FUNCTION__, ##args, deb, x, \ 122 __FUNCTION__, ##args, deb, x, \
123 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 123 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index a504cf67f272..ce6038743c5c 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -241,10 +241,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
241 * original work request since we may need to resend 241 * original work request since we may need to resend
242 * it. 242 * it.
243 */ 243 */
244 qp->s_sge.sge = wqe->sg_list[0]; 244 len = wqe->length;
245 qp->s_sge.sg_list = wqe->sg_list + 1;
246 qp->s_sge.num_sge = wqe->wr.num_sge;
247 qp->s_len = len = wqe->length;
248 ss = &qp->s_sge; 245 ss = &qp->s_sge;
249 bth2 = 0; 246 bth2 = 0;
250 switch (wqe->wr.opcode) { 247 switch (wqe->wr.opcode) {
@@ -368,14 +365,23 @@ int ipath_make_rc_req(struct ipath_qp *qp,
368 default: 365 default:
369 goto done; 366 goto done;
370 } 367 }
368 qp->s_sge.sge = wqe->sg_list[0];
369 qp->s_sge.sg_list = wqe->sg_list + 1;
370 qp->s_sge.num_sge = wqe->wr.num_sge;
371 qp->s_len = wqe->length;
371 if (newreq) { 372 if (newreq) {
372 qp->s_tail++; 373 qp->s_tail++;
373 if (qp->s_tail >= qp->s_size) 374 if (qp->s_tail >= qp->s_size)
374 qp->s_tail = 0; 375 qp->s_tail = 0;
375 } 376 }
376 bth2 |= qp->s_psn++ & IPATH_PSN_MASK; 377 bth2 |= qp->s_psn & IPATH_PSN_MASK;
377 if ((int)(qp->s_psn - qp->s_next_psn) > 0) 378 if (wqe->wr.opcode == IB_WR_RDMA_READ)
378 qp->s_next_psn = qp->s_psn; 379 qp->s_psn = wqe->lpsn + 1;
380 else {
381 qp->s_psn++;
382 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
383 qp->s_next_psn = qp->s_psn;
384 }
379 /* 385 /*
380 * Put the QP on the pending list so lost ACKs will cause 386 * Put the QP on the pending list so lost ACKs will cause
381 * a retry. More than one request can be pending so the 387 * a retry. More than one request can be pending so the
@@ -690,13 +696,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
690 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 696 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
691 struct ipath_ibdev *dev; 697 struct ipath_ibdev *dev;
692 698
693 /*
694 * If there are no requests pending, we are done.
695 */
696 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
697 qp->s_last == qp->s_tail)
698 goto done;
699
700 if (qp->s_retry == 0) { 699 if (qp->s_retry == 0) {
701 wc->wr_id = wqe->wr.wr_id; 700 wc->wr_id = wqe->wr.wr_id;
702 wc->status = IB_WC_RETRY_EXC_ERR; 701 wc->status = IB_WC_RETRY_EXC_ERR;
@@ -731,8 +730,6 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
731 dev->n_rc_resends += (int)qp->s_psn - (int)psn; 730 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
732 731
733 reset_psn(qp, psn); 732 reset_psn(qp, psn);
734
735done:
736 tasklet_hi_schedule(&qp->s_task); 733 tasklet_hi_schedule(&qp->s_task);
737 734
738bail: 735bail:
@@ -765,6 +762,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
765 struct ib_wc wc; 762 struct ib_wc wc;
766 struct ipath_swqe *wqe; 763 struct ipath_swqe *wqe;
767 int ret = 0; 764 int ret = 0;
765 u32 ack_psn;
768 766
769 /* 767 /*
770 * Remove the QP from the timeout queue (or RNR timeout queue). 768 * Remove the QP from the timeout queue (or RNR timeout queue).
@@ -777,26 +775,26 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
777 list_del_init(&qp->timerwait); 775 list_del_init(&qp->timerwait);
778 spin_unlock(&dev->pending_lock); 776 spin_unlock(&dev->pending_lock);
779 777
778 /* Nothing is pending to ACK/NAK. */
779 if (unlikely(qp->s_last == qp->s_tail))
780 goto bail;
781
780 /* 782 /*
781 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 783 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
782 * requests and implicitly NAK RDMA read and atomic requests issued 784 * requests and implicitly NAK RDMA read and atomic requests issued
783 * before the NAK'ed request. The MSN won't include the NAK'ed 785 * before the NAK'ed request. The MSN won't include the NAK'ed
784 * request but will include an ACK'ed request(s). 786 * request but will include an ACK'ed request(s).
785 */ 787 */
788 ack_psn = psn;
789 if (aeth >> 29)
790 ack_psn--;
786 wqe = get_swqe_ptr(qp, qp->s_last); 791 wqe = get_swqe_ptr(qp, qp->s_last);
787 792
788 /* Nothing is pending to ACK/NAK. */
789 if (qp->s_last == qp->s_tail)
790 goto bail;
791
792 /* 793 /*
793 * The MSN might be for a later WQE than the PSN indicates so 794 * The MSN might be for a later WQE than the PSN indicates so
794 * only complete WQEs that the PSN finishes. 795 * only complete WQEs that the PSN finishes.
795 */ 796 */
796 while (ipath_cmp24(psn, wqe->lpsn) >= 0) { 797 while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) {
797 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
798 if (ipath_cmp24(aeth, wqe->ssn) < 0)
799 break;
800 /* 798 /*
801 * If this request is a RDMA read or atomic, and the ACK is 799 * If this request is a RDMA read or atomic, and the ACK is
802 * for a later operation, this ACK NAKs the RDMA read or 800 * for a later operation, this ACK NAKs the RDMA read or
@@ -807,7 +805,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
807 * is sent but before the response is received. 805 * is sent but before the response is received.
808 */ 806 */
809 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 807 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
810 opcode != OP(RDMA_READ_RESPONSE_LAST)) || 808 (opcode != OP(RDMA_READ_RESPONSE_LAST) ||
809 ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
811 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 810 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
812 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 811 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
813 (opcode != OP(ATOMIC_ACKNOWLEDGE) || 812 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
@@ -825,6 +824,10 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
825 */ 824 */
826 goto bail; 825 goto bail;
827 } 826 }
827 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
828 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
829 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
830 tasklet_hi_schedule(&qp->s_task);
828 /* Post a send completion queue entry if requested. */ 831 /* Post a send completion queue entry if requested. */
829 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || 832 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
830 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 833 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
@@ -1055,7 +1058,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1055 /* no AETH, no ACK */ 1058 /* no AETH, no ACK */
1056 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1059 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1057 dev->n_rdma_seq++; 1060 dev->n_rdma_seq++;
1058 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1061 if (qp->s_last != qp->s_tail)
1062 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1059 goto ack_done; 1063 goto ack_done;
1060 } 1064 }
1061 rdma_read: 1065 rdma_read:
@@ -1091,7 +1095,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1091 /* ACKs READ req. */ 1095 /* ACKs READ req. */
1092 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1096 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1093 dev->n_rdma_seq++; 1097 dev->n_rdma_seq++;
1094 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1098 if (qp->s_last != qp->s_tail)
1099 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1095 goto ack_done; 1100 goto ack_done;
1096 } 1101 }
1097 /* FALLTHROUGH */ 1102 /* FALLTHROUGH */