diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-03-19 20:17:43 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-21 19:57:03 -0400 |
commit | 65689fef7e484631e996541a6772706627b0991a (patch) | |
tree | 7ad078b1dd22af24a7df121c9d70a3741c56a753 /drivers/net/igb | |
parent | c5cd11e380002d24fd4fd4c0fc38f59ab394e885 (diff) |
igb: cleanup tx dma so map & unmap use matching calls
The igb driver was using map_single to map the skbs and then unmap_page to
unmap them. This update changes that so instead uses skb_dma_map and
skb_dma_unmap.
In addition the next_to_watch member of the buffer_info struct was being
set uneccesarily. I removed the spots where it was set without being needed.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r-- | drivers/net/igb/igb_main.c | 66 |
1 files changed, 35 insertions, 31 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index d6dda1621166..ca842163dce4 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -2257,19 +2257,14 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) | |||
2257 | static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, | 2257 | static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, |
2258 | struct igb_buffer *buffer_info) | 2258 | struct igb_buffer *buffer_info) |
2259 | { | 2259 | { |
2260 | if (buffer_info->dma) { | 2260 | buffer_info->dma = 0; |
2261 | pci_unmap_page(adapter->pdev, | ||
2262 | buffer_info->dma, | ||
2263 | buffer_info->length, | ||
2264 | PCI_DMA_TODEVICE); | ||
2265 | buffer_info->dma = 0; | ||
2266 | } | ||
2267 | if (buffer_info->skb) { | 2261 | if (buffer_info->skb) { |
2262 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | ||
2263 | DMA_TO_DEVICE); | ||
2268 | dev_kfree_skb_any(buffer_info->skb); | 2264 | dev_kfree_skb_any(buffer_info->skb); |
2269 | buffer_info->skb = NULL; | 2265 | buffer_info->skb = NULL; |
2270 | } | 2266 | } |
2271 | buffer_info->time_stamp = 0; | 2267 | buffer_info->time_stamp = 0; |
2272 | buffer_info->next_to_watch = 0; | ||
2273 | /* buffer_info must be completely set up in the transmit path */ | 2268 | /* buffer_info must be completely set up in the transmit path */ |
2274 | } | 2269 | } |
2275 | 2270 | ||
@@ -3078,25 +3073,33 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, | |||
3078 | unsigned int len = skb_headlen(skb); | 3073 | unsigned int len = skb_headlen(skb); |
3079 | unsigned int count = 0, i; | 3074 | unsigned int count = 0, i; |
3080 | unsigned int f; | 3075 | unsigned int f; |
3076 | dma_addr_t *map; | ||
3081 | 3077 | ||
3082 | i = tx_ring->next_to_use; | 3078 | i = tx_ring->next_to_use; |
3083 | 3079 | ||
3080 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
3081 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
3082 | return 0; | ||
3083 | } | ||
3084 | |||
3085 | map = skb_shinfo(skb)->dma_maps; | ||
3086 | |||
3084 | buffer_info = &tx_ring->buffer_info[i]; | 3087 | buffer_info = &tx_ring->buffer_info[i]; |
3085 | BUG_ON(len >= IGB_MAX_DATA_PER_TXD); | 3088 | BUG_ON(len >= IGB_MAX_DATA_PER_TXD); |
3086 | buffer_info->length = len; | 3089 | buffer_info->length = len; |
3087 | /* set time_stamp *before* dma to help avoid a possible race */ | 3090 | /* set time_stamp *before* dma to help avoid a possible race */ |
3088 | buffer_info->time_stamp = jiffies; | 3091 | buffer_info->time_stamp = jiffies; |
3089 | buffer_info->next_to_watch = i; | 3092 | buffer_info->next_to_watch = i; |
3090 | buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len, | 3093 | buffer_info->dma = map[count]; |
3091 | PCI_DMA_TODEVICE); | ||
3092 | count++; | 3094 | count++; |
3093 | i++; | ||
3094 | if (i == tx_ring->count) | ||
3095 | i = 0; | ||
3096 | 3095 | ||
3097 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 3096 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
3098 | struct skb_frag_struct *frag; | 3097 | struct skb_frag_struct *frag; |
3099 | 3098 | ||
3099 | i++; | ||
3100 | if (i == tx_ring->count) | ||
3101 | i = 0; | ||
3102 | |||
3100 | frag = &skb_shinfo(skb)->frags[f]; | 3103 | frag = &skb_shinfo(skb)->frags[f]; |
3101 | len = frag->size; | 3104 | len = frag->size; |
3102 | 3105 | ||
@@ -3105,19 +3108,10 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, | |||
3105 | buffer_info->length = len; | 3108 | buffer_info->length = len; |
3106 | buffer_info->time_stamp = jiffies; | 3109 | buffer_info->time_stamp = jiffies; |
3107 | buffer_info->next_to_watch = i; | 3110 | buffer_info->next_to_watch = i; |
3108 | buffer_info->dma = pci_map_page(adapter->pdev, | 3111 | buffer_info->dma = map[count]; |
3109 | frag->page, | ||
3110 | frag->page_offset, | ||
3111 | len, | ||
3112 | PCI_DMA_TODEVICE); | ||
3113 | |||
3114 | count++; | 3112 | count++; |
3115 | i++; | ||
3116 | if (i == tx_ring->count) | ||
3117 | i = 0; | ||
3118 | } | 3113 | } |
3119 | 3114 | ||
3120 | i = ((i == 0) ? tx_ring->count - 1 : i - 1); | ||
3121 | tx_ring->buffer_info[i].skb = skb; | 3115 | tx_ring->buffer_info[i].skb = skb; |
3122 | tx_ring->buffer_info[first].next_to_watch = i; | 3116 | tx_ring->buffer_info[first].next_to_watch = i; |
3123 | 3117 | ||
@@ -3230,6 +3224,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
3230 | unsigned int first; | 3224 | unsigned int first; |
3231 | unsigned int tx_flags = 0; | 3225 | unsigned int tx_flags = 0; |
3232 | u8 hdr_len = 0; | 3226 | u8 hdr_len = 0; |
3227 | int count = 0; | ||
3233 | int tso = 0; | 3228 | int tso = 0; |
3234 | union skb_shared_tx *shtx; | 3229 | union skb_shared_tx *shtx; |
3235 | 3230 | ||
@@ -3291,14 +3286,23 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
3291 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 3286 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
3292 | tx_flags |= IGB_TX_FLAGS_CSUM; | 3287 | tx_flags |= IGB_TX_FLAGS_CSUM; |
3293 | 3288 | ||
3294 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, | 3289 | /* |
3295 | igb_tx_map_adv(adapter, tx_ring, skb, first), | 3290 | * count reflects descriptors mapped, if 0 then mapping error |
3296 | skb->len, hdr_len); | 3291 | * has occured and we need to rewind the descriptor queue |
3297 | 3292 | */ | |
3298 | netdev->trans_start = jiffies; | 3293 | count = igb_tx_map_adv(adapter, tx_ring, skb, first); |
3299 | 3294 | ||
3300 | /* Make sure there is space in the ring for the next send. */ | 3295 | if (count) { |
3301 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); | 3296 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, |
3297 | skb->len, hdr_len); | ||
3298 | netdev->trans_start = jiffies; | ||
3299 | /* Make sure there is space in the ring for the next send. */ | ||
3300 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); | ||
3301 | } else { | ||
3302 | dev_kfree_skb_any(skb); | ||
3303 | tx_ring->buffer_info[first].time_stamp = 0; | ||
3304 | tx_ring->next_to_use = first; | ||
3305 | } | ||
3302 | 3306 | ||
3303 | return NETDEV_TX_OK; | 3307 | return NETDEV_TX_OK; |
3304 | } | 3308 | } |