aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cisco/enic/enic_main.c
diff options
context:
space:
mode:
authorGovindarajulu Varadarajan <_govind@gmx.com>2014-11-19 02:29:32 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-21 12:17:54 -0500
commitf8e34d246c7d89e13399b410f92234acd2f7411a (patch)
treea761fe599db9b9095f96656ddc949160f761e9ab /drivers/net/ethernet/cisco/enic/enic_main.c
parent3819ffdff70da4c0d3bab0f8becabfc6936a230c (diff)
enic: support skb->xmit_more
Check and update posted_index only when skb->xmit_more is 0 or tx queue is full. v2: use txq_map instead of skb_get_queue_mapping(skb) Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cisco/enic/enic_main.c')
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 5afe360c7e89..b9cda2fc5ae8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -533,6 +533,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
533 struct vnic_wq *wq; 533 struct vnic_wq *wq;
534 unsigned long flags; 534 unsigned long flags;
535 unsigned int txq_map; 535 unsigned int txq_map;
536 struct netdev_queue *txq;
536 537
537 if (skb->len <= 0) { 538 if (skb->len <= 0) {
538 dev_kfree_skb_any(skb); 539 dev_kfree_skb_any(skb);
@@ -541,6 +542,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
541 542
542 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; 543 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
543 wq = &enic->wq[txq_map]; 544 wq = &enic->wq[txq_map];
545 txq = netdev_get_tx_queue(netdev, txq_map);
544 546
545 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 547 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
546 * which is very likely. In the off chance it's going to take 548 * which is very likely. In the off chance it's going to take
@@ -558,7 +560,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
558 560
559 if (vnic_wq_desc_avail(wq) < 561 if (vnic_wq_desc_avail(wq) <
560 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 562 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
561 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); 563 netif_tx_stop_queue(txq);
562 /* This is a hard error, log it */ 564 /* This is a hard error, log it */
563 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 565 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
564 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); 566 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
@@ -568,7 +570,9 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
568 enic_queue_wq_skb(enic, wq, skb); 570 enic_queue_wq_skb(enic, wq, skb);
569 571
570 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 572 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
571 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); 573 netif_tx_stop_queue(txq);
574 if (!skb->xmit_more || netif_xmit_stopped(txq))
575 vnic_wq_doorbell(wq);
572 576
573 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); 577 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
574 578