aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-09-17 21:56:27 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-09-22 06:18:20 -0400
commitc9f14bf3a49f86e6402a6e3476a180f2bdc8a71b (patch)
treee9bf482bcbdffcd74fb0cc41b270cbc9ca4deb04 /drivers/net/ethernet/intel
parenta57fe23e240b95282e60d643cd8ada3d2a66d8c6 (diff)
igb: Use dma_unmap_addr and dma_unmap_len defines
This change is meant to improve performance on systems that do not require the DMA unmap calls. On those systems we do not need to make use of the unmap address for Tx or the unmap length so we can drop both thereby reducing the size of the Tx buffer info structure. In addition I have changed the logic to check for unmap length instead of unmap address when checking to see if a buffer needs to be unmapped from DMA use. The reasons for this change is that on some platforms it is possible to receive a valid DMA address of 0 from an IOMMU. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c64
2 files changed, 34 insertions, 34 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9cad05894193..8aad230c0592 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -168,8 +168,8 @@ struct igb_tx_buffer {
168 unsigned int bytecount; 168 unsigned int bytecount;
169 u16 gso_segs; 169 u16 gso_segs;
170 __be16 protocol; 170 __be16 protocol;
171 dma_addr_t dma; 171 DEFINE_DMA_UNMAP_ADDR(dma);
172 u32 length; 172 DEFINE_DMA_UNMAP_LEN(len);
173 u32 tx_flags; 173 u32 tx_flags;
174}; 174};
175 175
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index db6e456688a1..60bf46534835 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -403,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", 404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
405 n, tx_ring->next_to_use, tx_ring->next_to_clean, 405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
406 (u64)buffer_info->dma, 406 (u64)dma_unmap_addr(buffer_info, dma),
407 buffer_info->length, 407 dma_unmap_len(buffer_info, len),
408 buffer_info->next_to_watch, 408 buffer_info->next_to_watch,
409 (u64)buffer_info->time_stamp); 409 (u64)buffer_info->time_stamp);
410 } 410 }
@@ -455,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
455 " %04X %p %016llX %p%s\n", i, 455 " %04X %p %016llX %p%s\n", i,
456 le64_to_cpu(u0->a), 456 le64_to_cpu(u0->a),
457 le64_to_cpu(u0->b), 457 le64_to_cpu(u0->b),
458 (u64)buffer_info->dma, 458 (u64)dma_unmap_addr(buffer_info, dma),
459 buffer_info->length, 459 dma_unmap_len(buffer_info, len),
460 buffer_info->next_to_watch, 460 buffer_info->next_to_watch,
461 (u64)buffer_info->time_stamp, 461 (u64)buffer_info->time_stamp,
462 buffer_info->skb, next_desc); 462 buffer_info->skb, next_desc);
@@ -465,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
465 print_hex_dump(KERN_INFO, "", 465 print_hex_dump(KERN_INFO, "",
466 DUMP_PREFIX_ADDRESS, 466 DUMP_PREFIX_ADDRESS,
467 16, 1, buffer_info->skb->data, 467 16, 1, buffer_info->skb->data,
468 buffer_info->length, true); 468 dma_unmap_len(buffer_info, len),
469 true);
469 } 470 }
470 } 471 }
471 472
@@ -3198,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3198{ 3199{
3199 if (tx_buffer->skb) { 3200 if (tx_buffer->skb) {
3200 dev_kfree_skb_any(tx_buffer->skb); 3201 dev_kfree_skb_any(tx_buffer->skb);
3201 if (tx_buffer->dma) 3202 if (dma_unmap_len(tx_buffer, len))
3202 dma_unmap_single(ring->dev, 3203 dma_unmap_single(ring->dev,
3203 tx_buffer->dma, 3204 dma_unmap_addr(tx_buffer, dma),
3204 tx_buffer->length, 3205 dma_unmap_len(tx_buffer, len),
3205 DMA_TO_DEVICE); 3206 DMA_TO_DEVICE);
3206 } else if (tx_buffer->dma) { 3207 } else if (dma_unmap_len(tx_buffer, len)) {
3207 dma_unmap_page(ring->dev, 3208 dma_unmap_page(ring->dev,
3208 tx_buffer->dma, 3209 dma_unmap_addr(tx_buffer, dma),
3209 tx_buffer->length, 3210 dma_unmap_len(tx_buffer, len),
3210 DMA_TO_DEVICE); 3211 DMA_TO_DEVICE);
3211 } 3212 }
3212 tx_buffer->next_to_watch = NULL; 3213 tx_buffer->next_to_watch = NULL;
3213 tx_buffer->skb = NULL; 3214 tx_buffer->skb = NULL;
3214 tx_buffer->dma = 0; 3215 dma_unmap_len_set(tx_buffer, len, 0);
3215 /* buffer_info must be completely set up in the transmit path */ 3216 /* buffer_info must be completely set up in the transmit path */
3216} 3217}
3217 3218
@@ -4206,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4206 const u8 hdr_len) 4207 const u8 hdr_len)
4207{ 4208{
4208 struct sk_buff *skb = first->skb; 4209 struct sk_buff *skb = first->skb;
4209 struct igb_tx_buffer *tx_buffer_info; 4210 struct igb_tx_buffer *tx_buffer;
4210 union e1000_adv_tx_desc *tx_desc; 4211 union e1000_adv_tx_desc *tx_desc;
4211 dma_addr_t dma; 4212 dma_addr_t dma;
4212 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 4213 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4227,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4227 goto dma_error; 4228 goto dma_error;
4228 4229
4229 /* record length, and DMA address */ 4230 /* record length, and DMA address */
4230 first->length = size; 4231 dma_unmap_len_set(first, len, size);
4231 first->dma = dma; 4232 dma_unmap_addr_set(first, dma, dma);
4232 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4233 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4233 4234
4234 for (;;) { 4235 for (;;) {
@@ -4270,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4270 if (dma_mapping_error(tx_ring->dev, dma)) 4271 if (dma_mapping_error(tx_ring->dev, dma))
4271 goto dma_error; 4272 goto dma_error;
4272 4273
4273 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4274 tx_buffer = &tx_ring->tx_buffer_info[i];
4274 tx_buffer_info->length = size; 4275 dma_unmap_len_set(tx_buffer, len, size);
4275 tx_buffer_info->dma = dma; 4276 dma_unmap_addr_set(tx_buffer, dma, dma);
4276 4277
4277 tx_desc->read.olinfo_status = 0; 4278 tx_desc->read.olinfo_status = 0;
4278 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4279 tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4323,9 +4324,9 @@ dma_error:
4323 4324
4324 /* clear dma mappings for failed tx_buffer_info map */ 4325 /* clear dma mappings for failed tx_buffer_info map */
4325 for (;;) { 4326 for (;;) {
4326 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4327 tx_buffer = &tx_ring->tx_buffer_info[i];
4327 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4328 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4328 if (tx_buffer_info == first) 4329 if (tx_buffer == first)
4329 break; 4330 break;
4330 if (i == 0) 4331 if (i == 0)
4331 i = tx_ring->count; 4332 i = tx_ring->count;
@@ -5716,18 +5717,19 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5716 5717
5717 /* free the skb */ 5718 /* free the skb */
5718 dev_kfree_skb_any(tx_buffer->skb); 5719 dev_kfree_skb_any(tx_buffer->skb);
5719 tx_buffer->skb = NULL;
5720 5720
5721 /* unmap skb header data */ 5721 /* unmap skb header data */
5722 dma_unmap_single(tx_ring->dev, 5722 dma_unmap_single(tx_ring->dev,
5723 tx_buffer->dma, 5723 dma_unmap_addr(tx_buffer, dma),
5724 tx_buffer->length, 5724 dma_unmap_len(tx_buffer, len),
5725 DMA_TO_DEVICE); 5725 DMA_TO_DEVICE);
5726 5726
5727 /* clear tx_buffer data */
5728 tx_buffer->skb = NULL;
5729 dma_unmap_len_set(tx_buffer, len, 0);
5730
5727 /* clear last DMA location and unmap remaining buffers */ 5731 /* clear last DMA location and unmap remaining buffers */
5728 while (tx_desc != eop_desc) { 5732 while (tx_desc != eop_desc) {
5729 tx_buffer->dma = 0;
5730
5731 tx_buffer++; 5733 tx_buffer++;
5732 tx_desc++; 5734 tx_desc++;
5733 i++; 5735 i++;
@@ -5738,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5738 } 5740 }
5739 5741
5740 /* unmap any remaining paged data */ 5742 /* unmap any remaining paged data */
5741 if (tx_buffer->dma) { 5743 if (dma_unmap_len(tx_buffer, len)) {
5742 dma_unmap_page(tx_ring->dev, 5744 dma_unmap_page(tx_ring->dev,
5743 tx_buffer->dma, 5745 dma_unmap_addr(tx_buffer, dma),
5744 tx_buffer->length, 5746 dma_unmap_len(tx_buffer, len),
5745 DMA_TO_DEVICE); 5747 DMA_TO_DEVICE);
5748 dma_unmap_len_set(tx_buffer, len, 0);
5746 } 5749 }
5747 } 5750 }
5748 5751
5749 /* clear last DMA location */
5750 tx_buffer->dma = 0;
5751
5752 /* move us one more past the eop_desc for start of next pkt */ 5752 /* move us one more past the eop_desc for start of next pkt */
5753 tx_buffer++; 5753 tx_buffer++;
5754 tx_desc++; 5754 tx_desc++;