diff options
author | David Daney <ddaney@caviumnetworks.com> | 2010-05-05 09:03:12 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-06 00:22:35 -0400 |
commit | 4e4a4f1478fdb303c9d99c69cfb4e973526f0c99 (patch) | |
tree | a5b6a62d6743b1df1074e01d35b054471a688b6a /drivers/net/octeon/octeon_mgmt.c | |
parent | b635e06993076c3c8f8cc766b183be7da3baafdb (diff) |
netdev: octeon_mgmt: Try not to drop TX packets when stopping the queue.
Stop the queue when we add the packet that will fill it instead of dropping the packet
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/octeon/octeon_mgmt.c')
-rw-r--r-- | drivers/net/octeon/octeon_mgmt.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c index 3cf6f62502c8..1fdc7b303a6b 100644 --- a/drivers/net/octeon/octeon_mgmt.c +++ b/drivers/net/octeon/octeon_mgmt.c | |||
@@ -955,6 +955,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
955 | int port = p->port; | 955 | int port = p->port; |
956 | union mgmt_port_ring_entry re; | 956 | union mgmt_port_ring_entry re; |
957 | unsigned long flags; | 957 | unsigned long flags; |
958 | int rv = NETDEV_TX_BUSY; | ||
958 | 959 | ||
959 | re.d64 = 0; | 960 | re.d64 = 0; |
960 | re.s.len = skb->len; | 961 | re.s.len = skb->len; |
@@ -964,15 +965,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
964 | 965 | ||
965 | spin_lock_irqsave(&p->tx_list.lock, flags); | 966 | spin_lock_irqsave(&p->tx_list.lock, flags); |
966 | 967 | ||
968 | if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { | ||
969 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
970 | netif_stop_queue(netdev); | ||
971 | spin_lock_irqsave(&p->tx_list.lock, flags); | ||
972 | } | ||
973 | |||
967 | if (unlikely(p->tx_current_fill >= | 974 | if (unlikely(p->tx_current_fill >= |
968 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | 975 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { |
969 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | 976 | spin_unlock_irqrestore(&p->tx_list.lock, flags); |
970 | |||
971 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | 977 | dma_unmap_single(p->dev, re.s.addr, re.s.len, |
972 | DMA_TO_DEVICE); | 978 | DMA_TO_DEVICE); |
973 | 979 | goto out; | |
974 | netif_stop_queue(netdev); | ||
975 | return NETDEV_TX_BUSY; | ||
976 | } | 980 | } |
977 | 981 | ||
978 | __skb_queue_tail(&p->tx_list, skb); | 982 | __skb_queue_tail(&p->tx_list, skb); |
@@ -995,8 +999,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
995 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); | 999 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); |
996 | 1000 | ||
997 | netdev->trans_start = jiffies; | 1001 | netdev->trans_start = jiffies; |
1002 | rv = NETDEV_TX_OK; | ||
1003 | out: | ||
998 | octeon_mgmt_update_tx_stats(netdev); | 1004 | octeon_mgmt_update_tx_stats(netdev); |
999 | return NETDEV_TX_OK; | 1005 | return rv; |
1000 | } | 1006 | } |
1001 | 1007 | ||
1002 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1008 | #ifdef CONFIG_NET_POLL_CONTROLLER |