aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib/ipoib_ib.c
diff options
context:
space:
mode:
authorEli Cohen <eli@dev.mellanox.co.il>2008-04-29 16:46:53 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-29 16:46:53 -0400
commitf56bcd8013566d4ad4759ae5fc85a6660e4655c7 (patch)
tree58b7e23f81caf5e6d8ada5819170f0bfb783d7e3 /drivers/infiniband/ulp/ipoib/ipoib_ib.c
parent87528227dfa8776d12779d073c217f0835fd6d20 (diff)
IPoIB: Use separate CQ for UD send completions
Use a dedicated CQ for UD send completions. Also, do not arm the UD send CQ, which reduces the number of interrupts generated. This patch farther reduces overhead by not calling poll CQ for every posted send WR -- it does polls only when there 16 or more outstanding work requests. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib_ib.c')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c45
1 files changed, 26 insertions, 19 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 7cf1fa7074ab..97b815c1a3fc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -364,7 +364,6 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
364 struct ipoib_dev_priv *priv = netdev_priv(dev); 364 struct ipoib_dev_priv *priv = netdev_priv(dev);
365 unsigned int wr_id = wc->wr_id; 365 unsigned int wr_id = wc->wr_id;
366 struct ipoib_tx_buf *tx_req; 366 struct ipoib_tx_buf *tx_req;
367 unsigned long flags;
368 367
369 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", 368 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
370 wr_id, wc->status); 369 wr_id, wc->status);
@@ -384,13 +383,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
384 383
385 dev_kfree_skb_any(tx_req->skb); 384 dev_kfree_skb_any(tx_req->skb);
386 385
387 spin_lock_irqsave(&priv->tx_lock, flags);
388 ++priv->tx_tail; 386 ++priv->tx_tail;
389 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 387 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
390 netif_queue_stopped(dev) && 388 netif_queue_stopped(dev) &&
391 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 389 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
392 netif_wake_queue(dev); 390 netif_wake_queue(dev);
393 spin_unlock_irqrestore(&priv->tx_lock, flags);
394 391
395 if (wc->status != IB_WC_SUCCESS && 392 if (wc->status != IB_WC_SUCCESS &&
396 wc->status != IB_WC_WR_FLUSH_ERR) 393 wc->status != IB_WC_WR_FLUSH_ERR)
@@ -399,6 +396,17 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
399 wc->status, wr_id, wc->vendor_err); 396 wc->status, wr_id, wc->vendor_err);
400} 397}
401 398
399static int poll_tx(struct ipoib_dev_priv *priv)
400{
401 int n, i;
402
403 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
404 for (i = 0; i < n; ++i)
405 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
406
407 return n == MAX_SEND_CQE;
408}
409
402int ipoib_poll(struct napi_struct *napi, int budget) 410int ipoib_poll(struct napi_struct *napi, int budget)
403{ 411{
404 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); 412 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
@@ -414,7 +422,7 @@ poll_more:
414 int max = (budget - done); 422 int max = (budget - done);
415 423
416 t = min(IPOIB_NUM_WC, max); 424 t = min(IPOIB_NUM_WC, max);
417 n = ib_poll_cq(priv->cq, t, priv->ibwc); 425 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
418 426
419 for (i = 0; i < n; i++) { 427 for (i = 0; i < n; i++) {
420 struct ib_wc *wc = priv->ibwc + i; 428 struct ib_wc *wc = priv->ibwc + i;
@@ -425,12 +433,8 @@ poll_more:
425 ipoib_cm_handle_rx_wc(dev, wc); 433 ipoib_cm_handle_rx_wc(dev, wc);
426 else 434 else
427 ipoib_ib_handle_rx_wc(dev, wc); 435 ipoib_ib_handle_rx_wc(dev, wc);
428 } else { 436 } else
429 if (wc->wr_id & IPOIB_OP_CM) 437 ipoib_cm_handle_tx_wc(priv->dev, wc);
430 ipoib_cm_handle_tx_wc(dev, wc);
431 else
432 ipoib_ib_handle_tx_wc(dev, wc);
433 }
434 } 438 }
435 439
436 if (n != t) 440 if (n != t)
@@ -439,7 +443,7 @@ poll_more:
439 443
440 if (done < budget) { 444 if (done < budget) {
441 netif_rx_complete(dev, napi); 445 netif_rx_complete(dev, napi);
442 if (unlikely(ib_req_notify_cq(priv->cq, 446 if (unlikely(ib_req_notify_cq(priv->recv_cq,
443 IB_CQ_NEXT_COMP | 447 IB_CQ_NEXT_COMP |
444 IB_CQ_REPORT_MISSED_EVENTS)) && 448 IB_CQ_REPORT_MISSED_EVENTS)) &&
445 netif_rx_reschedule(dev, napi)) 449 netif_rx_reschedule(dev, napi))
@@ -562,12 +566,16 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
562 566
563 address->last_send = priv->tx_head; 567 address->last_send = priv->tx_head;
564 ++priv->tx_head; 568 ++priv->tx_head;
569 skb_orphan(skb);
565 570
566 if (++priv->tx_outstanding == ipoib_sendq_size) { 571 if (++priv->tx_outstanding == ipoib_sendq_size) {
567 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 572 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
568 netif_stop_queue(dev); 573 netif_stop_queue(dev);
569 } 574 }
570 } 575 }
576
577 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
578 poll_tx(priv);
571} 579}
572 580
573static void __ipoib_reap_ah(struct net_device *dev) 581static void __ipoib_reap_ah(struct net_device *dev)
@@ -714,7 +722,7 @@ void ipoib_drain_cq(struct net_device *dev)
714 struct ipoib_dev_priv *priv = netdev_priv(dev); 722 struct ipoib_dev_priv *priv = netdev_priv(dev);
715 int i, n; 723 int i, n;
716 do { 724 do {
717 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); 725 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
718 for (i = 0; i < n; ++i) { 726 for (i = 0; i < n; ++i) {
719 /* 727 /*
720 * Convert any successful completions to flush 728 * Convert any successful completions to flush
@@ -729,14 +737,13 @@ void ipoib_drain_cq(struct net_device *dev)
729 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); 737 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
730 else 738 else
731 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); 739 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
732 } else { 740 } else
733 if (priv->ibwc[i].wr_id & IPOIB_OP_CM) 741 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
734 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
735 else
736 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
737 }
738 } 742 }
739 } while (n == IPOIB_NUM_WC); 743 } while (n == IPOIB_NUM_WC);
744
745 while (poll_tx(priv))
746 ; /* nothing */
740} 747}
741 748
742int ipoib_ib_dev_stop(struct net_device *dev, int flush) 749int ipoib_ib_dev_stop(struct net_device *dev, int flush)
@@ -826,7 +833,7 @@ timeout:
826 msleep(1); 833 msleep(1);
827 } 834 }
828 835
829 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); 836 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
830 837
831 return 0; 838 return 0;
832} 839}