diff options
author | Eric Dumazet <edumazet@google.com> | 2014-10-06 12:30:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-07 13:20:39 -0400 |
commit | fe971b95c22578456ff7198537827841c726d3f7 (patch) | |
tree | 8819ad6ad894239f2e5f1faad4dc3c21a592c34c | |
parent | 377421662a1739de5ccb71220a0b10a300addbd8 (diff) |
net/mlx4_en: remove NETDEV_TX_BUSY
Drivers should avoid NETDEV_TX_BUSY as much as possible.
They should stop the tx queue before qdisc even tries to push another
packet, to avoid requeues.
For a driver supporting skb->xmit_more, this is likely to be a prereq
anyway, otherwise we could have a tx deadlock : We need to force a
doorbell if TX ring is full.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 48 |
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 92a7cf46d9af..8726a4aee5a7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -706,6 +706,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
706 | void *fragptr = NULL; | 706 | void *fragptr = NULL; |
707 | bool bounce = false; | 707 | bool bounce = false; |
708 | bool send_doorbell; | 708 | bool send_doorbell; |
709 | bool stop_queue; | ||
709 | bool inline_ok; | 710 | bool inline_ok; |
710 | u32 ring_cons; | 711 | u32 ring_cons; |
711 | 712 | ||
@@ -735,30 +736,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
735 | if (vlan_tx_tag_present(skb)) | 736 | if (vlan_tx_tag_present(skb)) |
736 | vlan_tag = vlan_tx_tag_get(skb); | 737 | vlan_tag = vlan_tx_tag_get(skb); |
737 | 738 | ||
738 | /* Check available TXBBs And 2K spare for prefetch */ | ||
739 | if (unlikely(((int)(ring->prod - ring_cons)) > | ||
740 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
741 | /* every full Tx ring stops queue */ | ||
742 | netif_tx_stop_queue(ring->tx_queue); | ||
743 | ring->queue_stopped++; | ||
744 | |||
745 | /* If queue was emptied after the if, and before the | ||
746 | * stop_queue - need to wake the queue, or else it will remain | ||
747 | * stopped forever. | ||
748 | * Need a memory barrier to make sure ring->cons was not | ||
749 | * updated before queue was stopped. | ||
750 | */ | ||
751 | wmb(); | ||
752 | |||
753 | ring_cons = ACCESS_ONCE(ring->cons); | ||
754 | if (unlikely(((int)(ring->prod - ring_cons)) <= | ||
755 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
756 | netif_tx_wake_queue(ring->tx_queue); | ||
757 | ring->wake_queue++; | ||
758 | } else { | ||
759 | return NETDEV_TX_BUSY; | ||
760 | } | ||
761 | } | ||
762 | 739 | ||
763 | prefetchw(&ring->tx_queue->dql); | 740 | prefetchw(&ring->tx_queue->dql); |
764 | 741 | ||
@@ -929,6 +906,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
929 | 906 | ||
930 | skb_tx_timestamp(skb); | 907 | skb_tx_timestamp(skb); |
931 | 908 | ||
909 | /* Check available TXBBs And 2K spare for prefetch */ | ||
910 | stop_queue = (int)(ring->prod - ring_cons) > | ||
911 | ring->size - HEADROOM - MAX_DESC_TXBBS; | ||
912 | if (unlikely(stop_queue)) { | ||
913 | netif_tx_stop_queue(ring->tx_queue); | ||
914 | ring->queue_stopped++; | ||
915 | } | ||
932 | send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); | 916 | send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); |
933 | 917 | ||
934 | real_size = (real_size / 16) & 0x3f; | 918 | real_size = (real_size / 16) & 0x3f; |
@@ -973,6 +957,22 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
973 | } | 957 | } |
974 | } | 958 | } |
975 | 959 | ||
960 | if (unlikely(stop_queue)) { | ||
961 | /* If queue was emptied after the if (stop_queue) , and before | ||
962 | * the netif_tx_stop_queue() - need to wake the queue, | ||
963 | * or else it will remain stopped forever. | ||
964 | * Need a memory barrier to make sure ring->cons was not | ||
965 | * updated before queue was stopped. | ||
966 | */ | ||
967 | smp_rmb(); | ||
968 | |||
969 | ring_cons = ACCESS_ONCE(ring->cons); | ||
970 | if (unlikely(((int)(ring->prod - ring_cons)) <= | ||
971 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | ||
972 | netif_tx_wake_queue(ring->tx_queue); | ||
973 | ring->wake_queue++; | ||
974 | } | ||
975 | } | ||
976 | return NETDEV_TX_OK; | 976 | return NETDEV_TX_OK; |
977 | 977 | ||
978 | tx_drop_unmap: | 978 | tx_drop_unmap: |