diff options
author | Govindarajulu Varadarajan <_govind@gmx.com> | 2014-11-19 02:29:32 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-21 12:17:54 -0500 |
commit | f8e34d246c7d89e13399b410f92234acd2f7411a (patch) | |
tree | a761fe599db9b9095f96656ddc949160f761e9ab /drivers/net/ethernet/cisco | |
parent | 3819ffdff70da4c0d3bab0f8becabfc6936a230c (diff) |
enic: support skb->xmit_more
Check and update posted_index only when skb->xmit_more is 0 or tx queue is full.
v2:
use txq_map instead of skb_get_queue_mapping(skb)
Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cisco')
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_wq.h | 20 |
2 files changed, 17 insertions, 11 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 5afe360c7e89..b9cda2fc5ae8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -533,6 +533,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | |||
533 | struct vnic_wq *wq; | 533 | struct vnic_wq *wq; |
534 | unsigned long flags; | 534 | unsigned long flags; |
535 | unsigned int txq_map; | 535 | unsigned int txq_map; |
536 | struct netdev_queue *txq; | ||
536 | 537 | ||
537 | if (skb->len <= 0) { | 538 | if (skb->len <= 0) { |
538 | dev_kfree_skb_any(skb); | 539 | dev_kfree_skb_any(skb); |
@@ -541,6 +542,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | |||
541 | 542 | ||
542 | txq_map = skb_get_queue_mapping(skb) % enic->wq_count; | 543 | txq_map = skb_get_queue_mapping(skb) % enic->wq_count; |
543 | wq = &enic->wq[txq_map]; | 544 | wq = &enic->wq[txq_map]; |
545 | txq = netdev_get_tx_queue(netdev, txq_map); | ||
544 | 546 | ||
545 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | 547 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, |
546 | * which is very likely. In the off chance it's going to take | 548 | * which is very likely. In the off chance it's going to take |
@@ -558,7 +560,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | |||
558 | 560 | ||
559 | if (vnic_wq_desc_avail(wq) < | 561 | if (vnic_wq_desc_avail(wq) < |
560 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { | 562 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { |
561 | netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); | 563 | netif_tx_stop_queue(txq); |
562 | /* This is a hard error, log it */ | 564 | /* This is a hard error, log it */ |
563 | netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); | 565 | netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); |
564 | spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); | 566 | spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); |
@@ -568,7 +570,9 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | |||
568 | enic_queue_wq_skb(enic, wq, skb); | 570 | enic_queue_wq_skb(enic, wq, skb); |
569 | 571 | ||
570 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) | 572 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) |
571 | netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); | 573 | netif_tx_stop_queue(txq); |
574 | if (!skb->xmit_more || netif_xmit_stopped(txq)) | ||
575 | vnic_wq_doorbell(wq); | ||
572 | 576 | ||
573 | spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); | 577 | spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); |
574 | 578 | ||
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 2c6c70804a39..816f1ad6072f 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h | |||
@@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq) | |||
104 | return wq->to_use->desc; | 104 | return wq->to_use->desc; |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline void vnic_wq_doorbell(struct vnic_wq *wq) | ||
108 | { | ||
109 | /* Adding write memory barrier prevents compiler and/or CPU | ||
110 | * reordering, thus avoiding descriptor posting before | ||
111 | * descriptor is initialized. Otherwise, hardware can read | ||
112 | * stale descriptor fields. | ||
113 | */ | ||
114 | wmb(); | ||
115 | iowrite32(wq->to_use->index, &wq->ctrl->posted_index); | ||
116 | } | ||
117 | |||
107 | static inline void vnic_wq_post(struct vnic_wq *wq, | 118 | static inline void vnic_wq_post(struct vnic_wq *wq, |
108 | void *os_buf, dma_addr_t dma_addr, | 119 | void *os_buf, dma_addr_t dma_addr, |
109 | unsigned int len, int sop, int eop, | 120 | unsigned int len, int sop, int eop, |
@@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq, | |||
122 | buf->wr_id = wrid; | 133 | buf->wr_id = wrid; |
123 | 134 | ||
124 | buf = buf->next; | 135 | buf = buf->next; |
125 | if (eop) { | ||
126 | /* Adding write memory barrier prevents compiler and/or CPU | ||
127 | * reordering, thus avoiding descriptor posting before | ||
128 | * descriptor is initialized. Otherwise, hardware can read | ||
129 | * stale descriptor fields. | ||
130 | */ | ||
131 | wmb(); | ||
132 | iowrite32(buf->index, &wq->ctrl->posted_index); | ||
133 | } | ||
134 | wq->to_use = buf; | 136 | wq->to_use = buf; |
135 | 137 | ||
136 | wq->ring.desc_avail -= desc_skip_cnt; | 138 | wq->ring.desc_avail -= desc_skip_cnt; |