diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-23 12:56:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-23 12:56:11 -0400 |
commit | 0b776eb5426752d4e53354ac89e3710d857e09a7 (patch) | |
tree | 1eebeeaabab90de5834b32e72d2e259dc8a4a635 /drivers/infiniband/ulp | |
parent | 0d6810091cdbd05efeb31654c6a41a6cbdfdd2c8 (diff) | |
parent | 77109cc2823f025ccd66ebd9b88fbab90437b2d8 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
mlx4_core: Increase command timeout for INIT_HCA to 10 seconds
IPoIB/cm: Use common CQ for CM send completions
IB/uverbs: Fix checking of userspace object ownership
IB/mlx4: Sanity check userspace send queue sizes
IPoIB: Rewrite "if (!likely(...))" as "if (unlikely(!(...)))"
IB/ehca: Enable large page MRs by default
IB/ehca: Change meaning of hca_cap_mr_pgsize
IB/ehca: Fix ehca_encode_hwpage_size() and alloc_fmr()
IB/ehca: Fix masking error in {,re}reg_phys_mr()
IB/ehca: Supply QP token for SRQ base QPs
IPoIB: Use round_jiffies() for ah_reap_task
RDMA/cma: Fix deadlock destroying listen requests
RDMA/cma: Add locking around QP accesses
IB/mthca: Avoid alignment traps when writing doorbells
mlx4_core: Kill mlx4_write64_raw()
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 15 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 114 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 52 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 4 |
4 files changed, 87 insertions, 98 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 1b3327ad6bc4..eb7edab0e836 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -84,9 +84,8 @@ enum { | |||
84 | IPOIB_MCAST_RUN = 6, | 84 | IPOIB_MCAST_RUN = 6, |
85 | IPOIB_STOP_REAPER = 7, | 85 | IPOIB_STOP_REAPER = 7, |
86 | IPOIB_MCAST_STARTED = 8, | 86 | IPOIB_MCAST_STARTED = 8, |
87 | IPOIB_FLAG_NETIF_STOPPED = 9, | 87 | IPOIB_FLAG_ADMIN_CM = 9, |
88 | IPOIB_FLAG_ADMIN_CM = 10, | 88 | IPOIB_FLAG_UMCAST = 10, |
89 | IPOIB_FLAG_UMCAST = 11, | ||
90 | 89 | ||
91 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 90 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
92 | 91 | ||
@@ -98,9 +97,9 @@ enum { | |||
98 | 97 | ||
99 | #define IPOIB_OP_RECV (1ul << 31) | 98 | #define IPOIB_OP_RECV (1ul << 31) |
100 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | 99 | #ifdef CONFIG_INFINIBAND_IPOIB_CM |
101 | #define IPOIB_CM_OP_SRQ (1ul << 30) | 100 | #define IPOIB_OP_CM (1ul << 30) |
102 | #else | 101 | #else |
103 | #define IPOIB_CM_OP_SRQ (0) | 102 | #define IPOIB_OP_CM (0) |
104 | #endif | 103 | #endif |
105 | 104 | ||
106 | /* structs */ | 105 | /* structs */ |
@@ -197,7 +196,6 @@ struct ipoib_cm_rx { | |||
197 | 196 | ||
198 | struct ipoib_cm_tx { | 197 | struct ipoib_cm_tx { |
199 | struct ib_cm_id *id; | 198 | struct ib_cm_id *id; |
200 | struct ib_cq *cq; | ||
201 | struct ib_qp *qp; | 199 | struct ib_qp *qp; |
202 | struct list_head list; | 200 | struct list_head list; |
203 | struct net_device *dev; | 201 | struct net_device *dev; |
@@ -294,6 +292,7 @@ struct ipoib_dev_priv { | |||
294 | unsigned tx_tail; | 292 | unsigned tx_tail; |
295 | struct ib_sge tx_sge; | 293 | struct ib_sge tx_sge; |
296 | struct ib_send_wr tx_wr; | 294 | struct ib_send_wr tx_wr; |
295 | unsigned tx_outstanding; | ||
297 | 296 | ||
298 | struct ib_wc ibwc[IPOIB_NUM_WC]; | 297 | struct ib_wc ibwc[IPOIB_NUM_WC]; |
299 | 298 | ||
@@ -504,6 +503,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx); | |||
504 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, | 503 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, |
505 | unsigned int mtu); | 504 | unsigned int mtu); |
506 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc); | 505 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc); |
506 | void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc); | ||
507 | #else | 507 | #else |
508 | 508 | ||
509 | struct ipoib_cm_tx; | 509 | struct ipoib_cm_tx; |
@@ -592,6 +592,9 @@ static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *w | |||
592 | { | 592 | { |
593 | } | 593 | } |
594 | 594 | ||
595 | static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | ||
596 | { | ||
597 | } | ||
595 | #endif | 598 | #endif |
596 | 599 | ||
597 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 600 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0a0dcb8fdfd1..87610772a979 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -87,7 +87,7 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id) | |||
87 | struct ib_recv_wr *bad_wr; | 87 | struct ib_recv_wr *bad_wr; |
88 | int i, ret; | 88 | int i, ret; |
89 | 89 | ||
90 | priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ; | 90 | priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; |
91 | 91 | ||
92 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) | 92 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) |
93 | priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; | 93 | priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; |
@@ -401,7 +401,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | |||
401 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | 401 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) |
402 | { | 402 | { |
403 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 403 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
404 | unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; | 404 | unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); |
405 | struct sk_buff *skb, *newskb; | 405 | struct sk_buff *skb, *newskb; |
406 | struct ipoib_cm_rx *p; | 406 | struct ipoib_cm_rx *p; |
407 | unsigned long flags; | 407 | unsigned long flags; |
@@ -412,7 +412,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
412 | wr_id, wc->status); | 412 | wr_id, wc->status); |
413 | 413 | ||
414 | if (unlikely(wr_id >= ipoib_recvq_size)) { | 414 | if (unlikely(wr_id >= ipoib_recvq_size)) { |
415 | if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~IPOIB_CM_OP_SRQ)) { | 415 | if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { |
416 | spin_lock_irqsave(&priv->lock, flags); | 416 | spin_lock_irqsave(&priv->lock, flags); |
417 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); | 417 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); |
418 | ipoib_cm_start_rx_drain(priv); | 418 | ipoib_cm_start_rx_drain(priv); |
@@ -434,7 +434,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
434 | goto repost; | 434 | goto repost; |
435 | } | 435 | } |
436 | 436 | ||
437 | if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) { | 437 | if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { |
438 | p = wc->qp->qp_context; | 438 | p = wc->qp->qp_context; |
439 | if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { | 439 | if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { |
440 | spin_lock_irqsave(&priv->lock, flags); | 440 | spin_lock_irqsave(&priv->lock, flags); |
@@ -498,7 +498,7 @@ static inline int post_send(struct ipoib_dev_priv *priv, | |||
498 | priv->tx_sge.addr = addr; | 498 | priv->tx_sge.addr = addr; |
499 | priv->tx_sge.length = len; | 499 | priv->tx_sge.length = len; |
500 | 500 | ||
501 | priv->tx_wr.wr_id = wr_id; | 501 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; |
502 | 502 | ||
503 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); | 503 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); |
504 | } | 504 | } |
@@ -549,20 +549,19 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
549 | dev->trans_start = jiffies; | 549 | dev->trans_start = jiffies; |
550 | ++tx->tx_head; | 550 | ++tx->tx_head; |
551 | 551 | ||
552 | if (tx->tx_head - tx->tx_tail == ipoib_sendq_size) { | 552 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
553 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", | 553 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", |
554 | tx->qp->qp_num); | 554 | tx->qp->qp_num); |
555 | netif_stop_queue(dev); | 555 | netif_stop_queue(dev); |
556 | set_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags); | ||
557 | } | 556 | } |
558 | } | 557 | } |
559 | } | 558 | } |
560 | 559 | ||
561 | static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx, | 560 | void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) |
562 | struct ib_wc *wc) | ||
563 | { | 561 | { |
564 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 562 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
565 | unsigned int wr_id = wc->wr_id; | 563 | struct ipoib_cm_tx *tx = wc->qp->qp_context; |
564 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; | ||
566 | struct ipoib_tx_buf *tx_req; | 565 | struct ipoib_tx_buf *tx_req; |
567 | unsigned long flags; | 566 | unsigned long flags; |
568 | 567 | ||
@@ -587,11 +586,10 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx | |||
587 | 586 | ||
588 | spin_lock_irqsave(&priv->tx_lock, flags); | 587 | spin_lock_irqsave(&priv->tx_lock, flags); |
589 | ++tx->tx_tail; | 588 | ++tx->tx_tail; |
590 | if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) && | 589 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && |
591 | tx->tx_head - tx->tx_tail <= ipoib_sendq_size >> 1) { | 590 | netif_queue_stopped(dev) && |
592 | clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags); | 591 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) |
593 | netif_wake_queue(dev); | 592 | netif_wake_queue(dev); |
594 | } | ||
595 | 593 | ||
596 | if (wc->status != IB_WC_SUCCESS && | 594 | if (wc->status != IB_WC_SUCCESS && |
597 | wc->status != IB_WC_WR_FLUSH_ERR) { | 595 | wc->status != IB_WC_WR_FLUSH_ERR) { |
@@ -614,11 +612,6 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx | |||
614 | tx->neigh = NULL; | 612 | tx->neigh = NULL; |
615 | } | 613 | } |
616 | 614 | ||
617 | /* queue would be re-started anyway when TX is destroyed, | ||
618 | * but it makes sense to do it ASAP here. */ | ||
619 | if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) | ||
620 | netif_wake_queue(dev); | ||
621 | |||
622 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | 615 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
623 | list_move(&tx->list, &priv->cm.reap_list); | 616 | list_move(&tx->list, &priv->cm.reap_list); |
624 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | 617 | queue_work(ipoib_workqueue, &priv->cm.reap_task); |
@@ -632,19 +625,6 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx | |||
632 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 625 | spin_unlock_irqrestore(&priv->tx_lock, flags); |
633 | } | 626 | } |
634 | 627 | ||
635 | static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr) | ||
636 | { | ||
637 | struct ipoib_cm_tx *tx = tx_ptr; | ||
638 | int n, i; | ||
639 | |||
640 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | ||
641 | do { | ||
642 | n = ib_poll_cq(cq, IPOIB_NUM_WC, tx->ibwc); | ||
643 | for (i = 0; i < n; ++i) | ||
644 | ipoib_cm_handle_tx_wc(tx->dev, tx, tx->ibwc + i); | ||
645 | } while (n == IPOIB_NUM_WC); | ||
646 | } | ||
647 | |||
648 | int ipoib_cm_dev_open(struct net_device *dev) | 628 | int ipoib_cm_dev_open(struct net_device *dev) |
649 | { | 629 | { |
650 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 630 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
@@ -807,17 +787,18 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
807 | return 0; | 787 | return 0; |
808 | } | 788 | } |
809 | 789 | ||
810 | static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ib_cq *cq) | 790 | static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) |
811 | { | 791 | { |
812 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 792 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
813 | struct ib_qp_init_attr attr = { | 793 | struct ib_qp_init_attr attr = { |
814 | .send_cq = cq, | 794 | .send_cq = priv->cq, |
815 | .recv_cq = priv->cq, | 795 | .recv_cq = priv->cq, |
816 | .srq = priv->cm.srq, | 796 | .srq = priv->cm.srq, |
817 | .cap.max_send_wr = ipoib_sendq_size, | 797 | .cap.max_send_wr = ipoib_sendq_size, |
818 | .cap.max_send_sge = 1, | 798 | .cap.max_send_sge = 1, |
819 | .sq_sig_type = IB_SIGNAL_ALL_WR, | 799 | .sq_sig_type = IB_SIGNAL_ALL_WR, |
820 | .qp_type = IB_QPT_RC, | 800 | .qp_type = IB_QPT_RC, |
801 | .qp_context = tx | ||
821 | }; | 802 | }; |
822 | 803 | ||
823 | return ib_create_qp(priv->pd, &attr); | 804 | return ib_create_qp(priv->pd, &attr); |
@@ -899,21 +880,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |||
899 | goto err_tx; | 880 | goto err_tx; |
900 | } | 881 | } |
901 | 882 | ||
902 | p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, | 883 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); |
903 | ipoib_sendq_size + 1, 0); | ||
904 | if (IS_ERR(p->cq)) { | ||
905 | ret = PTR_ERR(p->cq); | ||
906 | ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); | ||
907 | goto err_cq; | ||
908 | } | ||
909 | |||
910 | ret = ib_req_notify_cq(p->cq, IB_CQ_NEXT_COMP); | ||
911 | if (ret) { | ||
912 | ipoib_warn(priv, "failed to request completion notification: %d\n", ret); | ||
913 | goto err_req_notify; | ||
914 | } | ||
915 | |||
916 | p->qp = ipoib_cm_create_tx_qp(p->dev, p->cq); | ||
917 | if (IS_ERR(p->qp)) { | 884 | if (IS_ERR(p->qp)) { |
918 | ret = PTR_ERR(p->qp); | 885 | ret = PTR_ERR(p->qp); |
919 | ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); | 886 | ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); |
@@ -950,12 +917,8 @@ err_modify: | |||
950 | err_id: | 917 | err_id: |
951 | p->id = NULL; | 918 | p->id = NULL; |
952 | ib_destroy_qp(p->qp); | 919 | ib_destroy_qp(p->qp); |
953 | err_req_notify: | ||
954 | err_qp: | 920 | err_qp: |
955 | p->qp = NULL; | 921 | p->qp = NULL; |
956 | ib_destroy_cq(p->cq); | ||
957 | err_cq: | ||
958 | p->cq = NULL; | ||
959 | err_tx: | 922 | err_tx: |
960 | return ret; | 923 | return ret; |
961 | } | 924 | } |
@@ -964,6 +927,8 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) | |||
964 | { | 927 | { |
965 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | 928 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); |
966 | struct ipoib_tx_buf *tx_req; | 929 | struct ipoib_tx_buf *tx_req; |
930 | unsigned long flags; | ||
931 | unsigned long begin; | ||
967 | 932 | ||
968 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", | 933 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", |
969 | p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); | 934 | p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); |
@@ -971,27 +936,40 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) | |||
971 | if (p->id) | 936 | if (p->id) |
972 | ib_destroy_cm_id(p->id); | 937 | ib_destroy_cm_id(p->id); |
973 | 938 | ||
974 | if (p->qp) | ||
975 | ib_destroy_qp(p->qp); | ||
976 | |||
977 | if (p->cq) | ||
978 | ib_destroy_cq(p->cq); | ||
979 | |||
980 | if (test_bit(IPOIB_FLAG_NETIF_STOPPED, &p->flags)) | ||
981 | netif_wake_queue(p->dev); | ||
982 | |||
983 | if (p->tx_ring) { | 939 | if (p->tx_ring) { |
940 | /* Wait for all sends to complete */ | ||
941 | begin = jiffies; | ||
984 | while ((int) p->tx_tail - (int) p->tx_head < 0) { | 942 | while ((int) p->tx_tail - (int) p->tx_head < 0) { |
985 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; | 943 | if (time_after(jiffies, begin + 5 * HZ)) { |
986 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, | 944 | ipoib_warn(priv, "timing out; %d sends not completed\n", |
987 | DMA_TO_DEVICE); | 945 | p->tx_head - p->tx_tail); |
988 | dev_kfree_skb_any(tx_req->skb); | 946 | goto timeout; |
989 | ++p->tx_tail; | 947 | } |
948 | |||
949 | msleep(1); | ||
990 | } | 950 | } |
951 | } | ||
991 | 952 | ||
992 | kfree(p->tx_ring); | 953 | timeout: |
954 | |||
955 | while ((int) p->tx_tail - (int) p->tx_head < 0) { | ||
956 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; | ||
957 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, | ||
958 | DMA_TO_DEVICE); | ||
959 | dev_kfree_skb_any(tx_req->skb); | ||
960 | ++p->tx_tail; | ||
961 | spin_lock_irqsave(&priv->tx_lock, flags); | ||
962 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && | ||
963 | netif_queue_stopped(p->dev) && | ||
964 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | ||
965 | netif_wake_queue(p->dev); | ||
966 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
993 | } | 967 | } |
994 | 968 | ||
969 | if (p->qp) | ||
970 | ib_destroy_qp(p->qp); | ||
971 | |||
972 | kfree(p->tx_ring); | ||
995 | kfree(p); | 973 | kfree(p); |
996 | } | 974 | } |
997 | 975 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 1a77e79f6b43..5063dd509ad2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -267,11 +267,10 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
267 | 267 | ||
268 | spin_lock_irqsave(&priv->tx_lock, flags); | 268 | spin_lock_irqsave(&priv->tx_lock, flags); |
269 | ++priv->tx_tail; | 269 | ++priv->tx_tail; |
270 | if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &priv->flags)) && | 270 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && |
271 | priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) { | 271 | netif_queue_stopped(dev) && |
272 | clear_bit(IPOIB_FLAG_NETIF_STOPPED, &priv->flags); | 272 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) |
273 | netif_wake_queue(dev); | 273 | netif_wake_queue(dev); |
274 | } | ||
275 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 274 | spin_unlock_irqrestore(&priv->tx_lock, flags); |
276 | 275 | ||
277 | if (wc->status != IB_WC_SUCCESS && | 276 | if (wc->status != IB_WC_SUCCESS && |
@@ -301,14 +300,18 @@ poll_more: | |||
301 | for (i = 0; i < n; i++) { | 300 | for (i = 0; i < n; i++) { |
302 | struct ib_wc *wc = priv->ibwc + i; | 301 | struct ib_wc *wc = priv->ibwc + i; |
303 | 302 | ||
304 | if (wc->wr_id & IPOIB_CM_OP_SRQ) { | 303 | if (wc->wr_id & IPOIB_OP_RECV) { |
305 | ++done; | ||
306 | ipoib_cm_handle_rx_wc(dev, wc); | ||
307 | } else if (wc->wr_id & IPOIB_OP_RECV) { | ||
308 | ++done; | 304 | ++done; |
309 | ipoib_ib_handle_rx_wc(dev, wc); | 305 | if (wc->wr_id & IPOIB_OP_CM) |
310 | } else | 306 | ipoib_cm_handle_rx_wc(dev, wc); |
311 | ipoib_ib_handle_tx_wc(dev, wc); | 307 | else |
308 | ipoib_ib_handle_rx_wc(dev, wc); | ||
309 | } else { | ||
310 | if (wc->wr_id & IPOIB_OP_CM) | ||
311 | ipoib_cm_handle_tx_wc(dev, wc); | ||
312 | else | ||
313 | ipoib_ib_handle_tx_wc(dev, wc); | ||
314 | } | ||
312 | } | 315 | } |
313 | 316 | ||
314 | if (n != t) | 317 | if (n != t) |
@@ -401,10 +404,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
401 | address->last_send = priv->tx_head; | 404 | address->last_send = priv->tx_head; |
402 | ++priv->tx_head; | 405 | ++priv->tx_head; |
403 | 406 | ||
404 | if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) { | 407 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
405 | ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); | 408 | ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); |
406 | netif_stop_queue(dev); | 409 | netif_stop_queue(dev); |
407 | set_bit(IPOIB_FLAG_NETIF_STOPPED, &priv->flags); | ||
408 | } | 410 | } |
409 | } | 411 | } |
410 | } | 412 | } |
@@ -436,7 +438,8 @@ void ipoib_reap_ah(struct work_struct *work) | |||
436 | __ipoib_reap_ah(dev); | 438 | __ipoib_reap_ah(dev); |
437 | 439 | ||
438 | if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) | 440 | if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) |
439 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); | 441 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, |
442 | round_jiffies_relative(HZ)); | ||
440 | } | 443 | } |
441 | 444 | ||
442 | int ipoib_ib_dev_open(struct net_device *dev) | 445 | int ipoib_ib_dev_open(struct net_device *dev) |
@@ -472,7 +475,8 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
472 | } | 475 | } |
473 | 476 | ||
474 | clear_bit(IPOIB_STOP_REAPER, &priv->flags); | 477 | clear_bit(IPOIB_STOP_REAPER, &priv->flags); |
475 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); | 478 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, |
479 | round_jiffies_relative(HZ)); | ||
476 | 480 | ||
477 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | 481 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); |
478 | 482 | ||
@@ -561,12 +565,17 @@ void ipoib_drain_cq(struct net_device *dev) | |||
561 | if (priv->ibwc[i].status == IB_WC_SUCCESS) | 565 | if (priv->ibwc[i].status == IB_WC_SUCCESS) |
562 | priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; | 566 | priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; |
563 | 567 | ||
564 | if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ) | 568 | if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { |
565 | ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); | 569 | if (priv->ibwc[i].wr_id & IPOIB_OP_CM) |
566 | else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) | 570 | ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); |
567 | ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); | 571 | else |
568 | else | 572 | ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); |
569 | ipoib_ib_handle_tx_wc(dev, priv->ibwc + i); | 573 | } else { |
574 | if (priv->ibwc[i].wr_id & IPOIB_OP_CM) | ||
575 | ipoib_cm_handle_tx_wc(dev, priv->ibwc + i); | ||
576 | else | ||
577 | ipoib_ib_handle_tx_wc(dev, priv->ibwc + i); | ||
578 | } | ||
570 | } | 579 | } |
571 | } while (n == IPOIB_NUM_WC); | 580 | } while (n == IPOIB_NUM_WC); |
572 | } | 581 | } |
@@ -612,6 +621,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) | |||
612 | DMA_TO_DEVICE); | 621 | DMA_TO_DEVICE); |
613 | dev_kfree_skb_any(tx_req->skb); | 622 | dev_kfree_skb_any(tx_req->skb); |
614 | ++priv->tx_tail; | 623 | ++priv->tx_tail; |
624 | --priv->tx_outstanding; | ||
615 | } | 625 | } |
616 | 626 | ||
617 | for (i = 0; i < ipoib_recvq_size; ++i) { | 627 | for (i = 0; i < ipoib_recvq_size; ++i) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 362610d870e4..a03a65ebcf0c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -148,8 +148,6 @@ static int ipoib_stop(struct net_device *dev) | |||
148 | 148 | ||
149 | netif_stop_queue(dev); | 149 | netif_stop_queue(dev); |
150 | 150 | ||
151 | clear_bit(IPOIB_FLAG_NETIF_STOPPED, &priv->flags); | ||
152 | |||
153 | /* | 151 | /* |
154 | * Now flush workqueue to make sure a scheduled task doesn't | 152 | * Now flush workqueue to make sure a scheduled task doesn't |
155 | * bring our internal state back up. | 153 | * bring our internal state back up. |
@@ -902,7 +900,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
902 | goto out_rx_ring_cleanup; | 900 | goto out_rx_ring_cleanup; |
903 | } | 901 | } |
904 | 902 | ||
905 | /* priv->tx_head & tx_tail are already 0 */ | 903 | /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ |
906 | 904 | ||
907 | if (ipoib_ib_dev_init(dev, ca, port)) | 905 | if (ipoib_ib_dev_init(dev, ca, port)) |
908 | goto out_tx_ring_cleanup; | 906 | goto out_tx_ring_cleanup; |