diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-02-08 02:51:06 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-03-17 04:41:49 -0400 |
commit | 729739b754affa482e92fa7836e4066096089d11 (patch) | |
tree | 878a830a30731a5c64559ecb95caa115a524b799 /drivers | |
parent | 091a6246869cec2ac66e897b436f7fd59ec4d316 (diff) |
ixgbe: always write DMA for single_mapped value with skb
This change makes it so that we always write the DMA address for the skb
itself on the same tx_buffer struct that the skb is written on. This way
we don't need the MAPPED_AS_PAGE flag and we always know it will be the
first DMA value that we will have to unmap.
In addition I have found an issue in which we were leaking a DMA mapping if
the value happened to be 0 which is possible on some platforms. In order
to resolve that I have updated the transmit path to use the length instead
of the DMA mapping in order to determine if a mapping is actually present.
One other tweak in this patch is that it only writes the olinfo information
on the first descriptor. As it turns out it isn't necessary to write it
for anything but the first descriptor so there is no need to carry it
forward.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 266 |
2 files changed, 153 insertions, 118 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 55f31fe58e41..e0d809d0ed75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -96,7 +96,6 @@ | |||
96 | #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) | 96 | #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) |
97 | #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) | 97 | #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) |
98 | #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) | 98 | #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) |
99 | #define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8) | ||
100 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 | 99 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 |
101 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 | 100 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 |
102 | #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 | 101 | #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 |
@@ -153,8 +152,8 @@ struct ixgbe_tx_buffer { | |||
153 | struct sk_buff *skb; | 152 | struct sk_buff *skb; |
154 | unsigned int bytecount; | 153 | unsigned int bytecount; |
155 | unsigned short gso_segs; | 154 | unsigned short gso_segs; |
156 | dma_addr_t dma; | 155 | DEFINE_DMA_UNMAP_ADDR(dma); |
157 | unsigned int length; | 156 | DEFINE_DMA_UNMAP_LEN(len); |
158 | u32 tx_flags; | 157 | u32 tx_flags; |
159 | }; | 158 | }; |
160 | 159 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 40d729eb1443..1d8f9f83f8ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -289,7 +289,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
289 | struct ixgbe_reg_info *reginfo; | 289 | struct ixgbe_reg_info *reginfo; |
290 | int n = 0; | 290 | int n = 0; |
291 | struct ixgbe_ring *tx_ring; | 291 | struct ixgbe_ring *tx_ring; |
292 | struct ixgbe_tx_buffer *tx_buffer_info; | 292 | struct ixgbe_tx_buffer *tx_buffer; |
293 | union ixgbe_adv_tx_desc *tx_desc; | 293 | union ixgbe_adv_tx_desc *tx_desc; |
294 | struct my_u0 { u64 a; u64 b; } *u0; | 294 | struct my_u0 { u64 a; u64 b; } *u0; |
295 | struct ixgbe_ring *rx_ring; | 295 | struct ixgbe_ring *rx_ring; |
@@ -329,14 +329,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
329 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); | 329 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); |
330 | for (n = 0; n < adapter->num_tx_queues; n++) { | 330 | for (n = 0; n < adapter->num_tx_queues; n++) { |
331 | tx_ring = adapter->tx_ring[n]; | 331 | tx_ring = adapter->tx_ring[n]; |
332 | tx_buffer_info = | 332 | tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
333 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; | ||
334 | pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", | 333 | pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", |
335 | n, tx_ring->next_to_use, tx_ring->next_to_clean, | 334 | n, tx_ring->next_to_use, tx_ring->next_to_clean, |
336 | (u64)tx_buffer_info->dma, | 335 | (u64)dma_unmap_addr(tx_buffer, dma), |
337 | tx_buffer_info->length, | 336 | dma_unmap_len(tx_buffer, len), |
338 | tx_buffer_info->next_to_watch, | 337 | tx_buffer->next_to_watch, |
339 | (u64)tx_buffer_info->time_stamp); | 338 | (u64)tx_buffer->time_stamp); |
340 | } | 339 | } |
341 | 340 | ||
342 | /* Print TX Rings */ | 341 | /* Print TX Rings */ |
@@ -367,17 +366,17 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
367 | 366 | ||
368 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | 367 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
369 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 368 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
370 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 369 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
371 | u0 = (struct my_u0 *)tx_desc; | 370 | u0 = (struct my_u0 *)tx_desc; |
372 | pr_info("T [0x%03X] %016llX %016llX %016llX" | 371 | pr_info("T [0x%03X] %016llX %016llX %016llX" |
373 | " %04X %p %016llX %p", i, | 372 | " %04X %p %016llX %p", i, |
374 | le64_to_cpu(u0->a), | 373 | le64_to_cpu(u0->a), |
375 | le64_to_cpu(u0->b), | 374 | le64_to_cpu(u0->b), |
376 | (u64)tx_buffer_info->dma, | 375 | (u64)dma_unmap_addr(tx_buffer, dma), |
377 | tx_buffer_info->length, | 376 | dma_unmap_len(tx_buffer, len), |
378 | tx_buffer_info->next_to_watch, | 377 | tx_buffer->next_to_watch, |
379 | (u64)tx_buffer_info->time_stamp, | 378 | (u64)tx_buffer->time_stamp, |
380 | tx_buffer_info->skb); | 379 | tx_buffer->skb); |
381 | if (i == tx_ring->next_to_use && | 380 | if (i == tx_ring->next_to_use && |
382 | i == tx_ring->next_to_clean) | 381 | i == tx_ring->next_to_clean) |
383 | pr_cont(" NTC/U\n"); | 382 | pr_cont(" NTC/U\n"); |
@@ -389,11 +388,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
389 | pr_cont("\n"); | 388 | pr_cont("\n"); |
390 | 389 | ||
391 | if (netif_msg_pktdata(adapter) && | 390 | if (netif_msg_pktdata(adapter) && |
392 | tx_buffer_info->dma != 0) | 391 | dma_unmap_len(tx_buffer, len) != 0) |
393 | print_hex_dump(KERN_INFO, "", | 392 | print_hex_dump(KERN_INFO, "", |
394 | DUMP_PREFIX_ADDRESS, 16, 1, | 393 | DUMP_PREFIX_ADDRESS, 16, 1, |
395 | phys_to_virt(tx_buffer_info->dma), | 394 | phys_to_virt(dma_unmap_addr(tx_buffer, |
396 | tx_buffer_info->length, true); | 395 | dma)), |
396 | dma_unmap_len(tx_buffer, len), | ||
397 | true); | ||
397 | } | 398 | } |
398 | } | 399 | } |
399 | 400 | ||
@@ -579,32 +580,26 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, | |||
579 | } | 580 | } |
580 | } | 581 | } |
581 | 582 | ||
582 | static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring, | 583 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, |
583 | struct ixgbe_tx_buffer *tx_buffer) | 584 | struct ixgbe_tx_buffer *tx_buffer) |
584 | { | 585 | { |
585 | if (tx_buffer->dma) { | 586 | if (tx_buffer->skb) { |
586 | if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE) | 587 | dev_kfree_skb_any(tx_buffer->skb); |
587 | dma_unmap_page(ring->dev, | 588 | if (dma_unmap_len(tx_buffer, len)) |
588 | tx_buffer->dma, | ||
589 | tx_buffer->length, | ||
590 | DMA_TO_DEVICE); | ||
591 | else | ||
592 | dma_unmap_single(ring->dev, | 589 | dma_unmap_single(ring->dev, |
593 | tx_buffer->dma, | 590 | dma_unmap_addr(tx_buffer, dma), |
594 | tx_buffer->length, | 591 | dma_unmap_len(tx_buffer, len), |
595 | DMA_TO_DEVICE); | 592 | DMA_TO_DEVICE); |
593 | } else if (dma_unmap_len(tx_buffer, len)) { | ||
594 | dma_unmap_page(ring->dev, | ||
595 | dma_unmap_addr(tx_buffer, dma), | ||
596 | dma_unmap_len(tx_buffer, len), | ||
597 | DMA_TO_DEVICE); | ||
596 | } | 598 | } |
597 | tx_buffer->dma = 0; | 599 | tx_buffer->next_to_watch = NULL; |
598 | } | 600 | tx_buffer->skb = NULL; |
599 | 601 | dma_unmap_len_set(tx_buffer, len, 0); | |
600 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, | 602 | /* tx_buffer must be completely set up in the transmit path */ |
601 | struct ixgbe_tx_buffer *tx_buffer_info) | ||
602 | { | ||
603 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); | ||
604 | if (tx_buffer_info->skb) | ||
605 | dev_kfree_skb_any(tx_buffer_info->skb); | ||
606 | tx_buffer_info->skb = NULL; | ||
607 | /* tx_buffer_info must be completely set up in the transmit path */ | ||
608 | } | 603 | } |
609 | 604 | ||
610 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) | 605 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) |
@@ -741,12 +736,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
741 | union ixgbe_adv_tx_desc *tx_desc; | 736 | union ixgbe_adv_tx_desc *tx_desc; |
742 | unsigned int total_bytes = 0, total_packets = 0; | 737 | unsigned int total_bytes = 0, total_packets = 0; |
743 | unsigned int budget = q_vector->tx.work_limit; | 738 | unsigned int budget = q_vector->tx.work_limit; |
744 | u16 i = tx_ring->next_to_clean; | 739 | unsigned int i = tx_ring->next_to_clean; |
740 | |||
741 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
742 | return true; | ||
745 | 743 | ||
746 | tx_buffer = &tx_ring->tx_buffer_info[i]; | 744 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
747 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 745 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
746 | i -= tx_ring->count; | ||
748 | 747 | ||
749 | for (; budget; budget--) { | 748 | do { |
750 | union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; | 749 | union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; |
751 | 750 | ||
752 | /* if next_to_watch is not set then there is no work pending */ | 751 | /* if next_to_watch is not set then there is no work pending */ |
@@ -770,27 +769,55 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
770 | /* free the skb */ | 769 | /* free the skb */ |
771 | dev_kfree_skb_any(tx_buffer->skb); | 770 | dev_kfree_skb_any(tx_buffer->skb); |
772 | 771 | ||
772 | /* unmap skb header data */ | ||
773 | dma_unmap_single(tx_ring->dev, | ||
774 | dma_unmap_addr(tx_buffer, dma), | ||
775 | dma_unmap_len(tx_buffer, len), | ||
776 | DMA_TO_DEVICE); | ||
777 | |||
773 | /* clear tx_buffer data */ | 778 | /* clear tx_buffer data */ |
774 | tx_buffer->skb = NULL; | 779 | tx_buffer->skb = NULL; |
780 | dma_unmap_len_set(tx_buffer, len, 0); | ||
775 | 781 | ||
776 | do { | 782 | /* unmap remaining buffers */ |
777 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer); | 783 | while (tx_desc != eop_desc) { |
778 | if (likely(tx_desc == eop_desc)) | ||
779 | eop_desc = NULL; | ||
780 | |||
781 | tx_buffer++; | 784 | tx_buffer++; |
782 | tx_desc++; | 785 | tx_desc++; |
783 | i++; | 786 | i++; |
784 | if (unlikely(i == tx_ring->count)) { | 787 | if (unlikely(!i)) { |
785 | i = 0; | 788 | i -= tx_ring->count; |
786 | |||
787 | tx_buffer = tx_ring->tx_buffer_info; | 789 | tx_buffer = tx_ring->tx_buffer_info; |
788 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | 790 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
789 | } | 791 | } |
790 | 792 | ||
791 | } while (eop_desc); | 793 | /* unmap any remaining paged data */ |
792 | } | 794 | if (dma_unmap_len(tx_buffer, len)) { |
795 | dma_unmap_page(tx_ring->dev, | ||
796 | dma_unmap_addr(tx_buffer, dma), | ||
797 | dma_unmap_len(tx_buffer, len), | ||
798 | DMA_TO_DEVICE); | ||
799 | dma_unmap_len_set(tx_buffer, len, 0); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | /* move us one more past the eop_desc for start of next pkt */ | ||
804 | tx_buffer++; | ||
805 | tx_desc++; | ||
806 | i++; | ||
807 | if (unlikely(!i)) { | ||
808 | i -= tx_ring->count; | ||
809 | tx_buffer = tx_ring->tx_buffer_info; | ||
810 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
811 | } | ||
812 | |||
813 | /* issue prefetch for next Tx descriptor */ | ||
814 | prefetch(tx_desc); | ||
793 | 815 | ||
816 | /* update budget accounting */ | ||
817 | budget--; | ||
818 | } while (likely(budget)); | ||
819 | |||
820 | i += tx_ring->count; | ||
794 | tx_ring->next_to_clean = i; | 821 | tx_ring->next_to_clean = i; |
795 | u64_stats_update_begin(&tx_ring->syncp); | 822 | u64_stats_update_begin(&tx_ring->syncp); |
796 | tx_ring->stats.bytes += total_bytes; | 823 | tx_ring->stats.bytes += total_bytes; |
@@ -802,7 +829,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
802 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { | 829 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { |
803 | /* schedule immediate reset if we believe we hung */ | 830 | /* schedule immediate reset if we believe we hung */ |
804 | struct ixgbe_hw *hw = &adapter->hw; | 831 | struct ixgbe_hw *hw = &adapter->hw; |
805 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | ||
806 | e_err(drv, "Detected Tx Unit Hang\n" | 832 | e_err(drv, "Detected Tx Unit Hang\n" |
807 | " Tx Queue <%d>\n" | 833 | " Tx Queue <%d>\n" |
808 | " TDH, TDT <%x>, <%x>\n" | 834 | " TDH, TDT <%x>, <%x>\n" |
@@ -840,9 +866,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
840 | * sees the new next_to_clean. | 866 | * sees the new next_to_clean. |
841 | */ | 867 | */ |
842 | smp_mb(); | 868 | smp_mb(); |
843 | if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && | 869 | if (__netif_subqueue_stopped(tx_ring->netdev, |
844 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 870 | tx_ring->queue_index) |
845 | netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); | 871 | && !test_bit(__IXGBE_DOWN, &adapter->state)) { |
872 | netif_wake_subqueue(tx_ring->netdev, | ||
873 | tx_ring->queue_index); | ||
846 | ++tx_ring->tx_stats.restart_queue; | 874 | ++tx_ring->tx_stats.restart_queue; |
847 | } | 875 | } |
848 | } | 876 | } |
@@ -6707,7 +6735,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) | |||
6707 | return cmd_type; | 6735 | return cmd_type; |
6708 | } | 6736 | } |
6709 | 6737 | ||
6710 | static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen) | 6738 | static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, |
6739 | u32 tx_flags, unsigned int paylen) | ||
6711 | { | 6740 | { |
6712 | __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); | 6741 | __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); |
6713 | 6742 | ||
@@ -6738,7 +6767,7 @@ static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen) | |||
6738 | #endif | 6767 | #endif |
6739 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); | 6768 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); |
6740 | 6769 | ||
6741 | return olinfo_status; | 6770 | tx_desc->read.olinfo_status = olinfo_status; |
6742 | } | 6771 | } |
6743 | 6772 | ||
6744 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ | 6773 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ |
@@ -6749,103 +6778,102 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, | |||
6749 | u32 tx_flags, | 6778 | u32 tx_flags, |
6750 | const u8 hdr_len) | 6779 | const u8 hdr_len) |
6751 | { | 6780 | { |
6781 | dma_addr_t dma; | ||
6752 | struct sk_buff *skb = first->skb; | 6782 | struct sk_buff *skb = first->skb; |
6753 | struct device *dev = tx_ring->dev; | 6783 | struct ixgbe_tx_buffer *tx_buffer; |
6754 | struct ixgbe_tx_buffer *tx_buffer_info; | ||
6755 | union ixgbe_adv_tx_desc *tx_desc; | 6784 | union ixgbe_adv_tx_desc *tx_desc; |
6756 | dma_addr_t dma; | 6785 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; |
6757 | __le32 cmd_type, olinfo_status; | ||
6758 | struct skb_frag_struct *frag; | ||
6759 | unsigned int f = 0; | ||
6760 | unsigned int data_len = skb->data_len; | 6786 | unsigned int data_len = skb->data_len; |
6761 | unsigned int size = skb_headlen(skb); | 6787 | unsigned int size = skb_headlen(skb); |
6762 | u32 offset = 0; | 6788 | unsigned int paylen = skb->len - hdr_len; |
6763 | u32 paylen = skb->len - hdr_len; | 6789 | __le32 cmd_type; |
6764 | u16 i = tx_ring->next_to_use; | 6790 | u16 i = tx_ring->next_to_use; |
6765 | 6791 | ||
6792 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | ||
6793 | |||
6794 | ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); | ||
6795 | cmd_type = ixgbe_tx_cmd_type(tx_flags); | ||
6796 | |||
6766 | #ifdef IXGBE_FCOE | 6797 | #ifdef IXGBE_FCOE |
6767 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | 6798 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
6768 | if (data_len >= sizeof(struct fcoe_crc_eof)) { | 6799 | if (data_len < sizeof(struct fcoe_crc_eof)) { |
6769 | data_len -= sizeof(struct fcoe_crc_eof); | ||
6770 | } else { | ||
6771 | size -= sizeof(struct fcoe_crc_eof) - data_len; | 6800 | size -= sizeof(struct fcoe_crc_eof) - data_len; |
6772 | data_len = 0; | 6801 | data_len = 0; |
6802 | } else { | ||
6803 | data_len -= sizeof(struct fcoe_crc_eof); | ||
6773 | } | 6804 | } |
6774 | } | 6805 | } |
6775 | 6806 | ||
6776 | #endif | 6807 | #endif |
6777 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); | 6808 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); |
6778 | if (dma_mapping_error(dev, dma)) | 6809 | if (dma_mapping_error(tx_ring->dev, dma)) |
6779 | goto dma_error; | 6810 | goto dma_error; |
6780 | 6811 | ||
6781 | cmd_type = ixgbe_tx_cmd_type(tx_flags); | 6812 | /* record length, and DMA address */ |
6782 | olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen); | 6813 | dma_unmap_len_set(first, len, size); |
6814 | dma_unmap_addr_set(first, dma, dma); | ||
6815 | first->tx_flags = tx_flags; | ||
6783 | 6816 | ||
6784 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 6817 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
6785 | 6818 | ||
6786 | for (;;) { | 6819 | for (;;) { |
6787 | while (size > IXGBE_MAX_DATA_PER_TXD) { | 6820 | while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { |
6788 | tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); | ||
6789 | tx_desc->read.cmd_type_len = | 6821 | tx_desc->read.cmd_type_len = |
6790 | cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); | 6822 | cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); |
6791 | tx_desc->read.olinfo_status = olinfo_status; | ||
6792 | |||
6793 | offset += IXGBE_MAX_DATA_PER_TXD; | ||
6794 | size -= IXGBE_MAX_DATA_PER_TXD; | ||
6795 | 6823 | ||
6796 | tx_desc++; | ||
6797 | i++; | 6824 | i++; |
6825 | tx_desc++; | ||
6798 | if (i == tx_ring->count) { | 6826 | if (i == tx_ring->count) { |
6799 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | 6827 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
6800 | i = 0; | 6828 | i = 0; |
6801 | } | 6829 | } |
6830 | |||
6831 | dma += IXGBE_MAX_DATA_PER_TXD; | ||
6832 | size -= IXGBE_MAX_DATA_PER_TXD; | ||
6833 | |||
6834 | tx_desc->read.buffer_addr = cpu_to_le64(dma); | ||
6835 | tx_desc->read.olinfo_status = 0; | ||
6802 | } | 6836 | } |
6803 | 6837 | ||
6804 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6838 | if (likely(!data_len)) |
6805 | tx_buffer_info->length = offset + size; | 6839 | break; |
6806 | tx_buffer_info->tx_flags = tx_flags; | ||
6807 | tx_buffer_info->dma = dma; | ||
6808 | 6840 | ||
6809 | tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); | ||
6810 | if (unlikely(skb->no_fcs)) | 6841 | if (unlikely(skb->no_fcs)) |
6811 | cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); | 6842 | cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); |
6812 | tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); | 6843 | tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); |
6813 | tx_desc->read.olinfo_status = olinfo_status; | ||
6814 | 6844 | ||
6815 | if (!data_len) | 6845 | i++; |
6816 | break; | 6846 | tx_desc++; |
6847 | if (i == tx_ring->count) { | ||
6848 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
6849 | i = 0; | ||
6850 | } | ||
6817 | 6851 | ||
6818 | frag = &skb_shinfo(skb)->frags[f]; | ||
6819 | #ifdef IXGBE_FCOE | 6852 | #ifdef IXGBE_FCOE |
6820 | size = min_t(unsigned int, data_len, skb_frag_size(frag)); | 6853 | size = min_t(unsigned int, data_len, skb_frag_size(frag)); |
6821 | #else | 6854 | #else |
6822 | size = skb_frag_size(frag); | 6855 | size = skb_frag_size(frag); |
6823 | #endif | 6856 | #endif |
6824 | data_len -= size; | 6857 | data_len -= size; |
6825 | f++; | ||
6826 | 6858 | ||
6827 | offset = 0; | 6859 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, |
6828 | tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE; | 6860 | DMA_TO_DEVICE); |
6829 | 6861 | if (dma_mapping_error(tx_ring->dev, dma)) | |
6830 | dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); | ||
6831 | if (dma_mapping_error(dev, dma)) | ||
6832 | goto dma_error; | 6862 | goto dma_error; |
6833 | 6863 | ||
6834 | tx_desc++; | 6864 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
6835 | i++; | 6865 | dma_unmap_len_set(tx_buffer, len, size); |
6836 | if (i == tx_ring->count) { | 6866 | dma_unmap_addr_set(tx_buffer, dma, dma); |
6837 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
6838 | i = 0; | ||
6839 | } | ||
6840 | } | ||
6841 | 6867 | ||
6842 | tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); | 6868 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
6869 | tx_desc->read.olinfo_status = 0; | ||
6843 | 6870 | ||
6844 | i++; | 6871 | frag++; |
6845 | if (i == tx_ring->count) | 6872 | } |
6846 | i = 0; | ||
6847 | 6873 | ||
6848 | tx_ring->next_to_use = i; | 6874 | /* write last descriptor with RS and EOP bits */ |
6875 | cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); | ||
6876 | tx_desc->read.cmd_type_len = cmd_type; | ||
6849 | 6877 | ||
6850 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); | 6878 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); |
6851 | 6879 | ||
@@ -6853,28 +6881,36 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, | |||
6853 | first->time_stamp = jiffies; | 6881 | first->time_stamp = jiffies; |
6854 | 6882 | ||
6855 | /* | 6883 | /* |
6856 | * Force memory writes to complete before letting h/w | 6884 | * Force memory writes to complete before letting h/w know there |
6857 | * know there are new descriptors to fetch. (Only | 6885 | * are new descriptors to fetch. (Only applicable for weak-ordered |
6858 | * applicable for weak-ordered memory model archs, | 6886 | * memory model archs, such as IA-64). |
6859 | * such as IA-64). | 6887 | * |
6888 | * We also need this memory barrier to make certain all of the | ||
6889 | * status bits have been updated before next_to_watch is written. | ||
6860 | */ | 6890 | */ |
6861 | wmb(); | 6891 | wmb(); |
6862 | 6892 | ||
6863 | /* set next_to_watch value indicating a packet is present */ | 6893 | /* set next_to_watch value indicating a packet is present */ |
6864 | first->next_to_watch = tx_desc; | 6894 | first->next_to_watch = tx_desc; |
6865 | 6895 | ||
6896 | i++; | ||
6897 | if (i == tx_ring->count) | ||
6898 | i = 0; | ||
6899 | |||
6900 | tx_ring->next_to_use = i; | ||
6901 | |||
6866 | /* notify HW of packet */ | 6902 | /* notify HW of packet */ |
6867 | writel(i, tx_ring->tail); | 6903 | writel(i, tx_ring->tail); |
6868 | 6904 | ||
6869 | return; | 6905 | return; |
6870 | dma_error: | 6906 | dma_error: |
6871 | dev_err(dev, "TX DMA map failed\n"); | 6907 | dev_err(tx_ring->dev, "TX DMA map failed\n"); |
6872 | 6908 | ||
6873 | /* clear dma mappings for failed tx_buffer_info map */ | 6909 | /* clear dma mappings for failed tx_buffer_info map */ |
6874 | for (;;) { | 6910 | for (;;) { |
6875 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6911 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
6876 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); | 6912 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); |
6877 | if (tx_buffer_info == first) | 6913 | if (tx_buffer == first) |
6878 | break; | 6914 | break; |
6879 | if (i == 0) | 6915 | if (i == 0) |
6880 | i = tx_ring->count; | 6916 | i = tx_ring->count; |