aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib/ipoib_ib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib_ib.c')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c90
1 files changed, 67 insertions, 23 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 7cf1fa7074ab..f429bce24c20 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -364,7 +364,6 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
364 struct ipoib_dev_priv *priv = netdev_priv(dev); 364 struct ipoib_dev_priv *priv = netdev_priv(dev);
365 unsigned int wr_id = wc->wr_id; 365 unsigned int wr_id = wc->wr_id;
366 struct ipoib_tx_buf *tx_req; 366 struct ipoib_tx_buf *tx_req;
367 unsigned long flags;
368 367
369 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", 368 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
370 wr_id, wc->status); 369 wr_id, wc->status);
@@ -384,13 +383,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
384 383
385 dev_kfree_skb_any(tx_req->skb); 384 dev_kfree_skb_any(tx_req->skb);
386 385
387 spin_lock_irqsave(&priv->tx_lock, flags);
388 ++priv->tx_tail; 386 ++priv->tx_tail;
389 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 387 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
390 netif_queue_stopped(dev) && 388 netif_queue_stopped(dev) &&
391 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 389 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
392 netif_wake_queue(dev); 390 netif_wake_queue(dev);
393 spin_unlock_irqrestore(&priv->tx_lock, flags);
394 391
395 if (wc->status != IB_WC_SUCCESS && 392 if (wc->status != IB_WC_SUCCESS &&
396 wc->status != IB_WC_WR_FLUSH_ERR) 393 wc->status != IB_WC_WR_FLUSH_ERR)
@@ -399,6 +396,17 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
399 wc->status, wr_id, wc->vendor_err); 396 wc->status, wr_id, wc->vendor_err);
400} 397}
401 398
399static int poll_tx(struct ipoib_dev_priv *priv)
400{
401 int n, i;
402
403 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
404 for (i = 0; i < n; ++i)
405 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
406
407 return n == MAX_SEND_CQE;
408}
409
402int ipoib_poll(struct napi_struct *napi, int budget) 410int ipoib_poll(struct napi_struct *napi, int budget)
403{ 411{
404 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); 412 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
@@ -414,7 +422,7 @@ poll_more:
414 int max = (budget - done); 422 int max = (budget - done);
415 423
416 t = min(IPOIB_NUM_WC, max); 424 t = min(IPOIB_NUM_WC, max);
417 n = ib_poll_cq(priv->cq, t, priv->ibwc); 425 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
418 426
419 for (i = 0; i < n; i++) { 427 for (i = 0; i < n; i++) {
420 struct ib_wc *wc = priv->ibwc + i; 428 struct ib_wc *wc = priv->ibwc + i;
@@ -425,12 +433,8 @@ poll_more:
425 ipoib_cm_handle_rx_wc(dev, wc); 433 ipoib_cm_handle_rx_wc(dev, wc);
426 else 434 else
427 ipoib_ib_handle_rx_wc(dev, wc); 435 ipoib_ib_handle_rx_wc(dev, wc);
428 } else { 436 } else
429 if (wc->wr_id & IPOIB_OP_CM) 437 ipoib_cm_handle_tx_wc(priv->dev, wc);
430 ipoib_cm_handle_tx_wc(dev, wc);
431 else
432 ipoib_ib_handle_tx_wc(dev, wc);
433 }
434 } 438 }
435 439
436 if (n != t) 440 if (n != t)
@@ -439,7 +443,7 @@ poll_more:
439 443
440 if (done < budget) { 444 if (done < budget) {
441 netif_rx_complete(dev, napi); 445 netif_rx_complete(dev, napi);
442 if (unlikely(ib_req_notify_cq(priv->cq, 446 if (unlikely(ib_req_notify_cq(priv->recv_cq,
443 IB_CQ_NEXT_COMP | 447 IB_CQ_NEXT_COMP |
444 IB_CQ_REPORT_MISSED_EVENTS)) && 448 IB_CQ_REPORT_MISSED_EVENTS)) &&
445 netif_rx_reschedule(dev, napi)) 449 netif_rx_reschedule(dev, napi))
@@ -457,6 +461,26 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
457 netif_rx_schedule(dev, &priv->napi); 461 netif_rx_schedule(dev, &priv->napi);
458} 462}
459 463
464static void drain_tx_cq(struct net_device *dev)
465{
466 struct ipoib_dev_priv *priv = netdev_priv(dev);
467 unsigned long flags;
468
469 spin_lock_irqsave(&priv->tx_lock, flags);
470 while (poll_tx(priv))
471 ; /* nothing */
472
473 if (netif_queue_stopped(dev))
474 mod_timer(&priv->poll_timer, jiffies + 1);
475
476 spin_unlock_irqrestore(&priv->tx_lock, flags);
477}
478
479void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
480{
481 drain_tx_cq((struct net_device *)dev_ptr);
482}
483
460static inline int post_send(struct ipoib_dev_priv *priv, 484static inline int post_send(struct ipoib_dev_priv *priv,
461 unsigned int wr_id, 485 unsigned int wr_id,
462 struct ib_ah *address, u32 qpn, 486 struct ib_ah *address, u32 qpn,
@@ -551,23 +575,34 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
551 else 575 else
552 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 576 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
553 577
578 if (++priv->tx_outstanding == ipoib_sendq_size) {
579 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
580 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
581 ipoib_warn(priv, "request notify on send CQ failed\n");
582 netif_stop_queue(dev);
583 }
584
554 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 585 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
555 address->ah, qpn, tx_req, phead, hlen))) { 586 address->ah, qpn, tx_req, phead, hlen))) {
556 ipoib_warn(priv, "post_send failed\n"); 587 ipoib_warn(priv, "post_send failed\n");
557 ++dev->stats.tx_errors; 588 ++dev->stats.tx_errors;
589 --priv->tx_outstanding;
558 ipoib_dma_unmap_tx(priv->ca, tx_req); 590 ipoib_dma_unmap_tx(priv->ca, tx_req);
559 dev_kfree_skb_any(skb); 591 dev_kfree_skb_any(skb);
592 if (netif_queue_stopped(dev))
593 netif_wake_queue(dev);
560 } else { 594 } else {
561 dev->trans_start = jiffies; 595 dev->trans_start = jiffies;
562 596
563 address->last_send = priv->tx_head; 597 address->last_send = priv->tx_head;
564 ++priv->tx_head; 598 ++priv->tx_head;
599 skb_orphan(skb);
565 600
566 if (++priv->tx_outstanding == ipoib_sendq_size) {
567 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
568 netif_stop_queue(dev);
569 }
570 } 601 }
602
603 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
604 while (poll_tx(priv))
605 ; /* nothing */
571} 606}
572 607
573static void __ipoib_reap_ah(struct net_device *dev) 608static void __ipoib_reap_ah(struct net_device *dev)
@@ -601,6 +636,11 @@ void ipoib_reap_ah(struct work_struct *work)
601 round_jiffies_relative(HZ)); 636 round_jiffies_relative(HZ));
602} 637}
603 638
639static void ipoib_ib_tx_timer_func(unsigned long ctx)
640{
641 drain_tx_cq((struct net_device *)ctx);
642}
643
604int ipoib_ib_dev_open(struct net_device *dev) 644int ipoib_ib_dev_open(struct net_device *dev)
605{ 645{
606 struct ipoib_dev_priv *priv = netdev_priv(dev); 646 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -637,6 +677,10 @@ int ipoib_ib_dev_open(struct net_device *dev)
637 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 677 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
638 round_jiffies_relative(HZ)); 678 round_jiffies_relative(HZ));
639 679
680 init_timer(&priv->poll_timer);
681 priv->poll_timer.function = ipoib_ib_tx_timer_func;
682 priv->poll_timer.data = (unsigned long)dev;
683
640 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 684 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
641 685
642 return 0; 686 return 0;
@@ -714,7 +758,7 @@ void ipoib_drain_cq(struct net_device *dev)
714 struct ipoib_dev_priv *priv = netdev_priv(dev); 758 struct ipoib_dev_priv *priv = netdev_priv(dev);
715 int i, n; 759 int i, n;
716 do { 760 do {
717 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); 761 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
718 for (i = 0; i < n; ++i) { 762 for (i = 0; i < n; ++i) {
719 /* 763 /*
720 * Convert any successful completions to flush 764 * Convert any successful completions to flush
@@ -729,14 +773,13 @@ void ipoib_drain_cq(struct net_device *dev)
729 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); 773 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
730 else 774 else
731 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); 775 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
732 } else { 776 } else
733 if (priv->ibwc[i].wr_id & IPOIB_OP_CM) 777 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
734 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
735 else
736 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
737 }
738 } 778 }
739 } while (n == IPOIB_NUM_WC); 779 } while (n == IPOIB_NUM_WC);
780
781 while (poll_tx(priv))
782 ; /* nothing */
740} 783}
741 784
742int ipoib_ib_dev_stop(struct net_device *dev, int flush) 785int ipoib_ib_dev_stop(struct net_device *dev, int flush)
@@ -803,6 +846,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
803 ipoib_dbg(priv, "All sends and receives done.\n"); 846 ipoib_dbg(priv, "All sends and receives done.\n");
804 847
805timeout: 848timeout:
849 del_timer_sync(&priv->poll_timer);
806 qp_attr.qp_state = IB_QPS_RESET; 850 qp_attr.qp_state = IB_QPS_RESET;
807 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 851 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
808 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 852 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
@@ -826,7 +870,7 @@ timeout:
826 msleep(1); 870 msleep(1);
827 } 871 }
828 872
829 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); 873 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
830 874
831 return 0; 875 return 0;
832} 876}