aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/e1000/e1000_main.c')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 83140cbb5f01..7f997d36948f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
2977 struct e1000_tx_ring *tx_ring, int tx_flags, 2977 struct e1000_tx_ring *tx_ring, int tx_flags,
2978 int count) 2978 int count)
2979{ 2979{
2980 struct e1000_hw *hw = &adapter->hw;
2981 struct e1000_tx_desc *tx_desc = NULL; 2980 struct e1000_tx_desc *tx_desc = NULL;
2982 struct e1000_tx_buffer *buffer_info; 2981 struct e1000_tx_buffer *buffer_info;
2983 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2982 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3031 wmb(); 3030 wmb();
3032 3031
3033 tx_ring->next_to_use = i; 3032 tx_ring->next_to_use = i;
3034 writel(i, hw->hw_addr + tx_ring->tdt);
3035 /* we need this if more than one processor can write to our tail
3036 * at a time, it synchronizes IO on IA64/Altix systems
3037 */
3038 mmiowb();
3039} 3033}
3040 3034
3041/* 82547 workaround to avoid controller hang in half-duplex environment. 3035/* 82547 workaround to avoid controller hang in half-duplex environment.
@@ -3226,9 +3220,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3226 return NETDEV_TX_BUSY; 3220 return NETDEV_TX_BUSY;
3227 } 3221 }
3228 3222
3229 if (vlan_tx_tag_present(skb)) { 3223 if (skb_vlan_tag_present(skb)) {
3230 tx_flags |= E1000_TX_FLAGS_VLAN; 3224 tx_flags |= E1000_TX_FLAGS_VLAN;
3231 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3225 tx_flags |= (skb_vlan_tag_get(skb) <<
3226 E1000_TX_FLAGS_VLAN_SHIFT);
3232 } 3227 }
3233 3228
3234 first = tx_ring->next_to_use; 3229 first = tx_ring->next_to_use;
@@ -3263,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3263 /* Make sure there is space in the ring for the next send. */ 3258 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3259 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3265 3260
3261 if (!skb->xmit_more ||
3262 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3263 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3264 /* we need this if more than one processor can write to
3265 * our tail at a time, it synchronizes IO on IA64/Altix
3266 * systems
3267 */
3268 mmiowb();
3269 }
3266 } else { 3270 } else {
3267 dev_kfree_skb_any(skb); 3271 dev_kfree_skb_any(skb);
3268 tx_ring->buffer_info[first].time_stamp = 0; 3272 tx_ring->buffer_info[first].time_stamp = 0;