aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-28 04:39:31 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-28 04:39:31 -0400
commit6f19e12f623067d6a330748f932ca4a81b828ffb (patch)
tree3fbf664e08fe9ed4b396eeeb5200f253d7ad1521 /drivers/net/ethernet/intel/igb
parent2367a17390138f68b3aa28f2f220b8d7ff8d91f4 (diff)
igb: flush when in xmit_more mode and under descriptor pressure
Mirror the changes made to ixgbe in commit 2367a17390138f68b3aa28f2f220b8d7ff8d91f4 ("ixgbe: flush when in xmit_more mode and under descriptor pressure") Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/igb')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 89c29b40d61c..89de7fee5e94 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4813 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 4813 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4814} 4814}
4815 4815
4816static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4817{
4818 struct net_device *netdev = tx_ring->netdev;
4819
4820 netif_stop_subqueue(netdev, tx_ring->queue_index);
4821
4822 /* Herbert's original patch had:
4823 * smp_mb__after_netif_stop_queue();
4824 * but since that doesn't exist yet, just open code it.
4825 */
4826 smp_mb();
4827
4828 /* We need to check again in a case another CPU has just
4829 * made room available.
4830 */
4831 if (igb_desc_unused(tx_ring) < size)
4832 return -EBUSY;
4833
4834 /* A reprieve! */
4835 netif_wake_subqueue(netdev, tx_ring->queue_index);
4836
4837 u64_stats_update_begin(&tx_ring->tx_syncp2);
4838 tx_ring->tx_stats.restart_queue2++;
4839 u64_stats_update_end(&tx_ring->tx_syncp2);
4840
4841 return 0;
4842}
4843
4844static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4845{
4846 if (igb_desc_unused(tx_ring) >= size)
4847 return 0;
4848 return __igb_maybe_stop_tx(tx_ring, size);
4849}
4850
4816static void igb_tx_map(struct igb_ring *tx_ring, 4851static void igb_tx_map(struct igb_ring *tx_ring,
4817 struct igb_tx_buffer *first, 4852 struct igb_tx_buffer *first,
4818 const u8 hdr_len) 4853 const u8 hdr_len)
@@ -4915,7 +4950,10 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4915 4950
4916 tx_ring->next_to_use = i; 4951 tx_ring->next_to_use = i;
4917 4952
4918 if (!skb->xmit_more) { 4953 /* Make sure there is space in the ring for the next send. */
4954 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
4955
4956 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
4919 writel(i, tx_ring->tail); 4957 writel(i, tx_ring->tail);
4920 4958
4921 /* we need this if more than one processor can write to our tail 4959 /* we need this if more than one processor can write to our tail
@@ -4942,41 +4980,6 @@ dma_error:
4942 tx_ring->next_to_use = i; 4980 tx_ring->next_to_use = i;
4943} 4981}
4944 4982
4945static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4946{
4947 struct net_device *netdev = tx_ring->netdev;
4948
4949 netif_stop_subqueue(netdev, tx_ring->queue_index);
4950
4951 /* Herbert's original patch had:
4952 * smp_mb__after_netif_stop_queue();
4953 * but since that doesn't exist yet, just open code it.
4954 */
4955 smp_mb();
4956
4957 /* We need to check again in a case another CPU has just
4958 * made room available.
4959 */
4960 if (igb_desc_unused(tx_ring) < size)
4961 return -EBUSY;
4962
4963 /* A reprieve! */
4964 netif_wake_subqueue(netdev, tx_ring->queue_index);
4965
4966 u64_stats_update_begin(&tx_ring->tx_syncp2);
4967 tx_ring->tx_stats.restart_queue2++;
4968 u64_stats_update_end(&tx_ring->tx_syncp2);
4969
4970 return 0;
4971}
4972
4973static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4974{
4975 if (igb_desc_unused(tx_ring) >= size)
4976 return 0;
4977 return __igb_maybe_stop_tx(tx_ring, size);
4978}
4979
4980netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4983netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4981 struct igb_ring *tx_ring) 4984 struct igb_ring *tx_ring)
4982{ 4985{
@@ -5047,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5047 5050
5048 igb_tx_map(tx_ring, first, hdr_len); 5051 igb_tx_map(tx_ring, first, hdr_len);
5049 5052
5050 /* Make sure there is space in the ring for the next send. */
5051 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5052
5053 return NETDEV_TX_OK; 5053 return NETDEV_TX_OK;
5054 5054
5055out_drop: 5055out_drop: