aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-03-25 17:58:45 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-26 04:00:53 -0400
commit37e73df8c3f19f4733c60ec53c104ff6f79ba467 (patch)
treeb02ed6f4dd5bd1491b0986a38dfc7b0da21ba28d /drivers/net/e1000
parent5a29f7893fbe681f1334285be7e41e56f0de666c (diff)
e1000: fix tx hang detect logic and address dma mapping issues
This patch changes the dma mapping to better support skb_dma_map/skb_dma_unmap and addresses and redefines the tx hang logic to be based off of time stamp instead of if the dma field is populated Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000_main.c55
1 files changed, 30 insertions, 25 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 1f390ceb4869..e60faabe9024 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2056,6 +2056,7 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2056 dev_kfree_skb_any(buffer_info->skb); 2056 dev_kfree_skb_any(buffer_info->skb);
2057 buffer_info->skb = NULL; 2057 buffer_info->skb = NULL;
2058 } 2058 }
2059 buffer_info->time_stamp = 0;
2059 /* buffer_info must be completely set up in the transmit path */ 2060 /* buffer_info must be completely set up in the transmit path */
2060} 2061}
2061 2062
@@ -2903,24 +2904,24 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2903 unsigned int mss) 2904 unsigned int mss)
2904{ 2905{
2905 struct e1000_hw *hw = &adapter->hw; 2906 struct e1000_hw *hw = &adapter->hw;
2907 struct e1000_buffer *buffer_info;
2906 unsigned int len = skb_headlen(skb); 2908 unsigned int len = skb_headlen(skb);
2907 unsigned int offset, size, count = 0, i; 2909 unsigned int offset, size, count = 0, i;
2908 unsigned int f; 2910 unsigned int f;
2909 dma_addr_t map; 2911 dma_addr_t *map;
2910 2912
2911 i = tx_ring->next_to_use; 2913 i = tx_ring->next_to_use;
2912 2914
2913 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 2915 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
2914 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 2916 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
2915 dev_kfree_skb(skb); 2917 return 0;
2916 return -2;
2917 } 2918 }
2918 2919
2919 map = skb_shinfo(skb)->dma_maps[0]; 2920 map = skb_shinfo(skb)->dma_maps;
2920 offset = 0; 2921 offset = 0;
2921 2922
2922 while (len) { 2923 while (len) {
2923 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; 2924 buffer_info = &tx_ring->buffer_info[i];
2924 size = min(len, max_per_txd); 2925 size = min(len, max_per_txd);
2925 /* Workaround for Controller erratum -- 2926 /* Workaround for Controller erratum --
2926 * descriptor for non-tso packet in a linear SKB that follows a 2927 * descriptor for non-tso packet in a linear SKB that follows a
@@ -2953,14 +2954,18 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2953 size -= 4; 2954 size -= 4;
2954 2955
2955 buffer_info->length = size; 2956 buffer_info->length = size;
2956 buffer_info->dma = map + offset; 2957 buffer_info->dma = map[0] + offset;
2957 buffer_info->time_stamp = jiffies; 2958 buffer_info->time_stamp = jiffies;
2958 buffer_info->next_to_watch = i; 2959 buffer_info->next_to_watch = i;
2959 2960
2960 len -= size; 2961 len -= size;
2961 offset += size; 2962 offset += size;
2962 count++; 2963 count++;
2963 if (unlikely(++i == tx_ring->count)) i = 0; 2964 if (len) {
2965 i++;
2966 if (unlikely(i == tx_ring->count))
2967 i = 0;
2968 }
2964 } 2969 }
2965 2970
2966 for (f = 0; f < nr_frags; f++) { 2971 for (f = 0; f < nr_frags; f++) {
@@ -2968,11 +2973,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2968 2973
2969 frag = &skb_shinfo(skb)->frags[f]; 2974 frag = &skb_shinfo(skb)->frags[f];
2970 len = frag->size; 2975 len = frag->size;
2971 map = skb_shinfo(skb)->dma_maps[f + 1];
2972 offset = 0; 2976 offset = 0;
2973 2977
2974 while (len) { 2978 while (len) {
2975 struct e1000_buffer *buffer_info; 2979 i++;
2980 if (unlikely(i == tx_ring->count))
2981 i = 0;
2982
2976 buffer_info = &tx_ring->buffer_info[i]; 2983 buffer_info = &tx_ring->buffer_info[i];
2977 size = min(len, max_per_txd); 2984 size = min(len, max_per_txd);
2978 /* Workaround for premature desc write-backs 2985 /* Workaround for premature desc write-backs
@@ -2988,21 +2995,18 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2988 size -= 4; 2995 size -= 4;
2989 2996
2990 buffer_info->length = size; 2997 buffer_info->length = size;
2991 buffer_info->dma = map + offset; 2998 buffer_info->dma = map[f + 1] + offset;
2992 buffer_info->time_stamp = jiffies; 2999 buffer_info->time_stamp = jiffies;
2993 buffer_info->next_to_watch = i; 3000 buffer_info->next_to_watch = i;
2994 3001
2995 len -= size; 3002 len -= size;
2996 offset += size; 3003 offset += size;
2997 count++; 3004 count++;
2998 if (unlikely(++i == tx_ring->count)) i = 0;
2999 } 3005 }
3000 } 3006 }
3001 3007
3002 i = (i == 0) ? tx_ring->count - 1 : i - 1;
3003 tx_ring->buffer_info[i].skb = skb; 3008 tx_ring->buffer_info[i].skb = skb;
3004 tx_ring->buffer_info[first].next_to_watch = i; 3009 tx_ring->buffer_info[first].next_to_watch = i;
3005 smp_wmb();
3006 3010
3007 return count; 3011 return count;
3008} 3012}
@@ -3318,14 +3322,20 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3318 if (likely(skb->protocol == htons(ETH_P_IP))) 3322 if (likely(skb->protocol == htons(ETH_P_IP)))
3319 tx_flags |= E1000_TX_FLAGS_IPV4; 3323 tx_flags |= E1000_TX_FLAGS_IPV4;
3320 3324
3321 e1000_tx_queue(adapter, tx_ring, tx_flags, 3325 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3322 e1000_tx_map(adapter, tx_ring, skb, first, 3326 nr_frags, mss);
3323 max_per_txd, nr_frags, mss));
3324 3327
3325 netdev->trans_start = jiffies; 3328 if (count) {
3329 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3330 netdev->trans_start = jiffies;
3331 /* Make sure there is space in the ring for the next send. */
3332 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3326 3333
3327 /* Make sure there is space in the ring for the next send. */ 3334 } else {
3328 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3335 dev_kfree_skb_any(skb);
3336 tx_ring->buffer_info[first].time_stamp = 0;
3337 tx_ring->next_to_use = first;
3338 }
3329 3339
3330 return NETDEV_TX_OK; 3340 return NETDEV_TX_OK;
3331} 3341}
@@ -3842,12 +3852,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3842 /* Detect a transmit hang in hardware, this serializes the 3852 /* Detect a transmit hang in hardware, this serializes the
3843 * check with the clearing of time_stamp and movement of i */ 3853 * check with the clearing of time_stamp and movement of i */
3844 adapter->detect_tx_hung = false; 3854 adapter->detect_tx_hung = false;
3845 /* 3855 if (tx_ring->buffer_info[eop].time_stamp &&
3846 * read barrier to make sure that the ->dma member and time
3847 * stamp are updated fully
3848 */
3849 smp_rmb();
3850 if (tx_ring->buffer_info[eop].dma &&
3851 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3856 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3852 (adapter->tx_timeout_factor * HZ)) 3857 (adapter->tx_timeout_factor * HZ))
3853 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3858 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {