aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-28 10:19:48 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-28 10:19:48 -0500
commit6b2e2829c11ea677aa97ecfe95d9544aa0208b8c (patch)
tree7b77e6494ab97efc7a70b86ee3a4ab9db0fd4552
parent751c45bd828f935ac6330b933ff10bf55277c84c (diff)
parent2bafa8fac19a31ca72ae1a3e48df35f73661dbed (diff)
Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 10GbE Intel Wired LAN Driver Updates 2018-01-26 This series contains updates to ixgbe and ixgbevf. Emil updates ixgbevf to match ixgbe functionality, starting with the consolidating of functions that represent logical steps in the receive process so we can later update them more easily. Updated ixgbevf to only synchronize the length of the frame, which will typically be the MTU or smaller. Updated the VF driver to use the length of the packet instead of the DD status bit to determine if a new descriptor is ready to be processed, which saves on reads and we can save time on initialization. Added support for DMA_ATTR_SKIP_CPU_SYNC/WEAK_ORDERING to help improve performance on some platforms. Updated the VF driver to do bulk updates of the page reference count instead of just incrementing it by one reference at a time. Updated the VF driver to only go through the region of the receive ring that was designated to be cleaned up, rather than process the entire ring. Colin Ian King adds the use of ARRAY_SIZE() on various arrays. Miroslav Lichvar fixes an issue where ethtool was reporting timestamping filters unsupported for X550, which is incorrect. Paul adds support for reporting 5G link speed for some devices. Dan Carpenter fixes a typo where && was used when it should have been ||. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c362
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c17
8 files changed, 271 insertions, 179 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 7ac7ef9b37ff..61188f343955 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -4087,7 +4087,7 @@ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
4087 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4087 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
4088 4088
4089 /* Return is offset to OEM Product Version block is invalid */ 4089 /* Return is offset to OEM Product Version block is invalid */
4090 if (offset == 0x0 && offset == NVM_INVALID_PTR) 4090 if (offset == 0x0 || offset == NVM_INVALID_PTR)
4091 return; 4091 return;
4092 4092
4093 /* Read product version block */ 4093 /* Read product version block */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 317351025fd7..221f15803480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3085,26 +3085,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
3085 case ixgbe_mac_X550EM_x: 3085 case ixgbe_mac_X550EM_x:
3086 case ixgbe_mac_x550em_a: 3086 case ixgbe_mac_x550em_a:
3087 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); 3087 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3088 /* fallthrough */ 3088 break;
3089 case ixgbe_mac_X540: 3089 case ixgbe_mac_X540:
3090 case ixgbe_mac_82599EB: 3090 case ixgbe_mac_82599EB:
3091 info->so_timestamping =
3092 SOF_TIMESTAMPING_TX_SOFTWARE |
3093 SOF_TIMESTAMPING_RX_SOFTWARE |
3094 SOF_TIMESTAMPING_SOFTWARE |
3095 SOF_TIMESTAMPING_TX_HARDWARE |
3096 SOF_TIMESTAMPING_RX_HARDWARE |
3097 SOF_TIMESTAMPING_RAW_HARDWARE;
3098
3099 if (adapter->ptp_clock)
3100 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3101 else
3102 info->phc_index = -1;
3103
3104 info->tx_types =
3105 BIT(HWTSTAMP_TX_OFF) |
3106 BIT(HWTSTAMP_TX_ON);
3107
3108 info->rx_filters |= 3091 info->rx_filters |=
3109 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 3092 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3110 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 3093 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
@@ -3113,6 +3096,24 @@ static int ixgbe_get_ts_info(struct net_device *dev,
3113 default: 3096 default:
3114 return ethtool_op_get_ts_info(dev, info); 3097 return ethtool_op_get_ts_info(dev, info);
3115 } 3098 }
3099
3100 info->so_timestamping =
3101 SOF_TIMESTAMPING_TX_SOFTWARE |
3102 SOF_TIMESTAMPING_RX_SOFTWARE |
3103 SOF_TIMESTAMPING_SOFTWARE |
3104 SOF_TIMESTAMPING_TX_HARDWARE |
3105 SOF_TIMESTAMPING_RX_HARDWARE |
3106 SOF_TIMESTAMPING_RAW_HARDWARE;
3107
3108 if (adapter->ptp_clock)
3109 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3110 else
3111 info->phc_index = -1;
3112
3113 info->tx_types =
3114 BIT(HWTSTAMP_TX_OFF) |
3115 BIT(HWTSTAMP_TX_ON);
3116
3116 return 0; 3117 return 0;
3117} 3118}
3118 3119
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bbb622f15a77..0da5aa2c8aba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4133,11 +4133,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4133 rxdctl &= ~0x3FFFFF; 4133 rxdctl &= ~0x3FFFFF;
4134 rxdctl |= 0x080420; 4134 rxdctl |= 0x080420;
4135#if (PAGE_SIZE < 8192) 4135#if (PAGE_SIZE < 8192)
4136 } else { 4136 /* RXDCTL.RLPML does not work on 82599 */
4137 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4137 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | 4138 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4138 IXGBE_RXDCTL_RLPML_EN); 4139 IXGBE_RXDCTL_RLPML_EN);
4139 4140
4140 /* Limit the maximum frame size so we don't overrun the skb */ 4141 /* Limit the maximum frame size so we don't overrun the skb.
4142 * This can happen in SRIOV mode when the MTU of the VF is
4143 * higher than the MTU of the PF.
4144 */
4141 if (ring_uses_build_skb(ring) && 4145 if (ring_uses_build_skb(ring) &&
4142 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 4146 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4143 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | 4147 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
@@ -7259,6 +7263,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7259 case IXGBE_LINK_SPEED_10GB_FULL: 7263 case IXGBE_LINK_SPEED_10GB_FULL:
7260 speed_str = "10 Gbps"; 7264 speed_str = "10 Gbps";
7261 break; 7265 break;
7266 case IXGBE_LINK_SPEED_5GB_FULL:
7267 speed_str = "5 Gbps";
7268 break;
7262 case IXGBE_LINK_SPEED_2_5GB_FULL: 7269 case IXGBE_LINK_SPEED_2_5GB_FULL:
7263 speed_str = "2.5 Gbps"; 7270 speed_str = "2.5 Gbps";
7264 break; 7271 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 3bce26e77090..f470d0204771 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -949,7 +949,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
949 u16 length, bufsz, i, start; 949 u16 length, bufsz, i, start;
950 u16 *local_buffer; 950 u16 *local_buffer;
951 951
952 bufsz = sizeof(buf) / sizeof(buf[0]); 952 bufsz = ARRAY_SIZE(buf);
953 953
954 /* Read a chunk at the pointer location */ 954 /* Read a chunk at the pointer location */
955 if (!buffer) { 955 if (!buffer) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index ff9d05f308ee..4400e49090b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
75 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), 75 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
76 IXGBEVF_NETDEV_STAT(multicast), 76 IXGBEVF_NETDEV_STAT(multicast),
77 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), 77 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
78 IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
79 IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
80 IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
78}; 81};
79 82
80#define IXGBEVF_QUEUE_STATS_LEN ( \ 83#define IXGBEVF_QUEUE_STATS_LEN ( \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 581f44bbd7b3..f6952425c87d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer {
62struct ixgbevf_rx_buffer { 62struct ixgbevf_rx_buffer {
63 dma_addr_t dma; 63 dma_addr_t dma;
64 struct page *page; 64 struct page *page;
65 unsigned int page_offset; 65#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
66 __u32 page_offset;
67#else
68 __u16 page_offset;
69#endif
70 __u16 pagecnt_bias;
66}; 71};
67 72
68struct ixgbevf_stats { 73struct ixgbevf_stats {
@@ -79,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
79struct ixgbevf_rx_queue_stats { 84struct ixgbevf_rx_queue_stats {
80 u64 alloc_rx_page_failed; 85 u64 alloc_rx_page_failed;
81 u64 alloc_rx_buff_failed; 86 u64 alloc_rx_buff_failed;
87 u64 alloc_rx_page;
82 u64 csum_err; 88 u64 csum_err;
83}; 89};
84 90
@@ -260,6 +266,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
260#define MIN_MSIX_Q_VECTORS 1 266#define MIN_MSIX_Q_VECTORS 1
261#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 267#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
262 268
269#define IXGBEVF_RX_DMA_ATTR \
270 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
271
263/* board specific private data structure */ 272/* board specific private data structure */
264struct ixgbevf_adapter { 273struct ixgbevf_adapter {
265 /* this field must be first, see ixgbevf_process_skb_fields */ 274 /* this field must be first, see ixgbevf_process_skb_fields */
@@ -287,8 +296,9 @@ struct ixgbevf_adapter {
287 u64 hw_csum_rx_error; 296 u64 hw_csum_rx_error;
288 u64 hw_rx_no_dma_resources; 297 u64 hw_rx_no_dma_resources;
289 int num_msix_vectors; 298 int num_msix_vectors;
290 u32 alloc_rx_page_failed; 299 u64 alloc_rx_page_failed;
291 u32 alloc_rx_buff_failed; 300 u64 alloc_rx_buff_failed;
301 u64 alloc_rx_page;
292 302
293 struct msix_entry *msix_entries; 303 struct msix_entry *msix_entries;
294 304
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ed5c3aea7939..9b3d43d28106 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
206 } 206 }
207} 207}
208 208
209static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
210 struct ixgbevf_tx_buffer *tx_buffer)
211{
212 if (tx_buffer->skb) {
213 dev_kfree_skb_any(tx_buffer->skb);
214 if (dma_unmap_len(tx_buffer, len))
215 dma_unmap_single(tx_ring->dev,
216 dma_unmap_addr(tx_buffer, dma),
217 dma_unmap_len(tx_buffer, len),
218 DMA_TO_DEVICE);
219 } else if (dma_unmap_len(tx_buffer, len)) {
220 dma_unmap_page(tx_ring->dev,
221 dma_unmap_addr(tx_buffer, dma),
222 dma_unmap_len(tx_buffer, len),
223 DMA_TO_DEVICE);
224 }
225 tx_buffer->next_to_watch = NULL;
226 tx_buffer->skb = NULL;
227 dma_unmap_len_set(tx_buffer, len, 0);
228 /* tx_buffer must be completely set up in the transmit path */
229}
230
231static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) 209static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
232{ 210{
233 return ring->stats.packets; 211 return ring->stats.packets;
@@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
349 DMA_TO_DEVICE); 327 DMA_TO_DEVICE);
350 328
351 /* clear tx_buffer data */ 329 /* clear tx_buffer data */
352 tx_buffer->skb = NULL;
353 dma_unmap_len_set(tx_buffer, len, 0); 330 dma_unmap_len_set(tx_buffer, len, 0);
354 331
355 /* unmap remaining buffers */ 332 /* unmap remaining buffers */
@@ -595,8 +572,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
595 } 572 }
596 573
597 /* map page for use */ 574 /* map page for use */
598 dma = dma_map_page(rx_ring->dev, page, 0, 575 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
599 PAGE_SIZE, DMA_FROM_DEVICE); 576 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
600 577
601 /* if mapping failed free memory back to system since 578 /* if mapping failed free memory back to system since
602 * there isn't much point in holding memory we can't use 579 * there isn't much point in holding memory we can't use
@@ -604,13 +581,15 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
604 if (dma_mapping_error(rx_ring->dev, dma)) { 581 if (dma_mapping_error(rx_ring->dev, dma)) {
605 __free_page(page); 582 __free_page(page);
606 583
607 rx_ring->rx_stats.alloc_rx_buff_failed++; 584 rx_ring->rx_stats.alloc_rx_page_failed++;
608 return false; 585 return false;
609 } 586 }
610 587
611 bi->dma = dma; 588 bi->dma = dma;
612 bi->page = page; 589 bi->page = page;
613 bi->page_offset = 0; 590 bi->page_offset = 0;
591 bi->pagecnt_bias = 1;
592 rx_ring->rx_stats.alloc_rx_page++;
614 593
615 return true; 594 return true;
616} 595}
@@ -639,6 +618,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
639 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) 618 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
640 break; 619 break;
641 620
621 /* sync the buffer for use by the device */
622 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
623 bi->page_offset,
624 IXGBEVF_RX_BUFSZ,
625 DMA_FROM_DEVICE);
626
642 /* Refresh the desc even if pkt_addr didn't change 627 /* Refresh the desc even if pkt_addr didn't change
643 * because each write-back erases this info. 628 * because each write-back erases this info.
644 */ 629 */
@@ -653,8 +638,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
653 i -= rx_ring->count; 638 i -= rx_ring->count;
654 } 639 }
655 640
656 /* clear the hdr_addr for the next_to_use descriptor */ 641 /* clear the length for the next_to_use descriptor */
657 rx_desc->read.hdr_addr = 0; 642 rx_desc->wb.upper.length = 0;
658 643
659 cleaned_count--; 644 cleaned_count--;
660 } while (cleaned_count); 645 } while (cleaned_count);
@@ -741,12 +726,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
741 new_buff->page = old_buff->page; 726 new_buff->page = old_buff->page;
742 new_buff->dma = old_buff->dma; 727 new_buff->dma = old_buff->dma;
743 new_buff->page_offset = old_buff->page_offset; 728 new_buff->page_offset = old_buff->page_offset;
744 729 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
745 /* sync the buffer for use by the device */
746 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
747 new_buff->page_offset,
748 IXGBEVF_RX_BUFSZ,
749 DMA_FROM_DEVICE);
750} 730}
751 731
752static inline bool ixgbevf_page_is_reserved(struct page *page) 732static inline bool ixgbevf_page_is_reserved(struct page *page)
@@ -754,6 +734,45 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
754 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 734 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
755} 735}
756 736
737static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
738 struct page *page,
739 const unsigned int truesize)
740{
741 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
742
743 /* avoid re-using remote pages */
744 if (unlikely(ixgbevf_page_is_reserved(page)))
745 return false;
746
747#if (PAGE_SIZE < 8192)
748 /* if we are only owner of page we can reuse it */
749 if (unlikely(page_ref_count(page) != pagecnt_bias))
750 return false;
751
752 /* flip page offset to other buffer */
753 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
754
755#else
756 /* move offset up to the next cache line */
757 rx_buffer->page_offset += truesize;
758
759 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
760 return false;
761
762#endif
763
764 /* If we have drained the page fragment pool we need to update
765 * the pagecnt_bias and page count so that we fully restock the
766 * number of references the driver holds.
767 */
768 if (unlikely(pagecnt_bias == 1)) {
769 page_ref_add(page, USHRT_MAX);
770 rx_buffer->pagecnt_bias = USHRT_MAX;
771 }
772
773 return true;
774}
775
757/** 776/**
758 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff 777 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
759 * @rx_ring: rx descriptor ring to transact packets on 778 * @rx_ring: rx descriptor ring to transact packets on
@@ -771,12 +790,12 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
771 **/ 790 **/
772static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, 791static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
773 struct ixgbevf_rx_buffer *rx_buffer, 792 struct ixgbevf_rx_buffer *rx_buffer,
793 u16 size,
774 union ixgbe_adv_rx_desc *rx_desc, 794 union ixgbe_adv_rx_desc *rx_desc,
775 struct sk_buff *skb) 795 struct sk_buff *skb)
776{ 796{
777 struct page *page = rx_buffer->page; 797 struct page *page = rx_buffer->page;
778 unsigned char *va = page_address(page) + rx_buffer->page_offset; 798 unsigned char *va = page_address(page) + rx_buffer->page_offset;
779 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
780#if (PAGE_SIZE < 8192) 799#if (PAGE_SIZE < 8192)
781 unsigned int truesize = IXGBEVF_RX_BUFSZ; 800 unsigned int truesize = IXGBEVF_RX_BUFSZ;
782#else 801#else
@@ -795,7 +814,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
795 return true; 814 return true;
796 815
797 /* this page cannot be reused so discard it */ 816 /* this page cannot be reused so discard it */
798 put_page(page);
799 return false; 817 return false;
800 } 818 }
801 819
@@ -815,32 +833,7 @@ add_tail_frag:
815 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 833 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
816 (unsigned long)va & ~PAGE_MASK, size, truesize); 834 (unsigned long)va & ~PAGE_MASK, size, truesize);
817 835
818 /* avoid re-using remote pages */ 836 return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
819 if (unlikely(ixgbevf_page_is_reserved(page)))
820 return false;
821
822#if (PAGE_SIZE < 8192)
823 /* if we are only owner of page we can reuse it */
824 if (unlikely(page_count(page) != 1))
825 return false;
826
827 /* flip page offset to other buffer */
828 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
829
830#else
831 /* move offset up to the next cache line */
832 rx_buffer->page_offset += truesize;
833
834 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
835 return false;
836
837#endif
838 /* Even if we own the page, we are not allowed to use atomic_set()
839 * This would break get_page_unless_zero() users.
840 */
841 page_ref_inc(page);
842
843 return true;
844} 837}
845 838
846static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, 839static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
@@ -849,11 +842,19 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
849{ 842{
850 struct ixgbevf_rx_buffer *rx_buffer; 843 struct ixgbevf_rx_buffer *rx_buffer;
851 struct page *page; 844 struct page *page;
845 u16 size = le16_to_cpu(rx_desc->wb.upper.length);
852 846
853 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 847 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
854 page = rx_buffer->page; 848 page = rx_buffer->page;
855 prefetchw(page); 849 prefetchw(page);
856 850
851 /* we are reusing so sync this buffer for CPU use */
852 dma_sync_single_range_for_cpu(rx_ring->dev,
853 rx_buffer->dma,
854 rx_buffer->page_offset,
855 size,
856 DMA_FROM_DEVICE);
857
857 if (likely(!skb)) { 858 if (likely(!skb)) {
858 void *page_addr = page_address(page) + 859 void *page_addr = page_address(page) +
859 rx_buffer->page_offset; 860 rx_buffer->page_offset;
@@ -879,21 +880,18 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
879 prefetchw(skb->data); 880 prefetchw(skb->data);
880 } 881 }
881 882
882 /* we are reusing so sync this buffer for CPU use */
883 dma_sync_single_range_for_cpu(rx_ring->dev,
884 rx_buffer->dma,
885 rx_buffer->page_offset,
886 IXGBEVF_RX_BUFSZ,
887 DMA_FROM_DEVICE);
888
889 /* pull page into skb */ 883 /* pull page into skb */
890 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 884 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
891 /* hand second half of page back to the ring */ 885 /* hand second half of page back to the ring */
892 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); 886 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
893 } else { 887 } else {
894 /* we are not reusing the buffer so unmap it */ 888 /* We are not reusing the buffer so unmap it and free
895 dma_unmap_page(rx_ring->dev, rx_buffer->dma, 889 * any references we are holding to it
896 PAGE_SIZE, DMA_FROM_DEVICE); 890 */
891 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
892 PAGE_SIZE, DMA_FROM_DEVICE,
893 IXGBEVF_RX_DMA_ATTR);
894 __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
897 } 895 }
898 896
899 /* clear contents of buffer_info */ 897 /* clear contents of buffer_info */
@@ -930,7 +928,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
930 928
931 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 929 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
932 930
933 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 931 if (!rx_desc->wb.upper.length)
934 break; 932 break;
935 933
936 /* This memory barrier is needed to keep us from reading 934 /* This memory barrier is needed to keep us from reading
@@ -943,8 +941,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
943 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); 941 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
944 942
945 /* exit if we failed to retrieve a buffer */ 943 /* exit if we failed to retrieve a buffer */
946 if (!skb) 944 if (!skb) {
945 rx_ring->rx_stats.alloc_rx_buff_failed++;
947 break; 946 break;
947 }
948 948
949 cleaned_count++; 949 cleaned_count++;
950 950
@@ -1553,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1553 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 1553 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1554 32; /* PTHRESH = 32 */ 1554 32; /* PTHRESH = 32 */
1555 1555
1556 /* reinitialize tx_buffer_info */
1557 memset(ring->tx_buffer_info, 0,
1558 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1559
1556 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); 1560 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1557 1561
1558 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1562 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -1721,6 +1725,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1721 struct ixgbevf_ring *ring) 1725 struct ixgbevf_ring *ring)
1722{ 1726{
1723 struct ixgbe_hw *hw = &adapter->hw; 1727 struct ixgbe_hw *hw = &adapter->hw;
1728 union ixgbe_adv_rx_desc *rx_desc;
1724 u64 rdba = ring->dma; 1729 u64 rdba = ring->dma;
1725 u32 rxdctl; 1730 u32 rxdctl;
1726 u8 reg_idx = ring->reg_idx; 1731 u8 reg_idx = ring->reg_idx;
@@ -1749,6 +1754,14 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1749 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1754 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1750 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); 1755 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1751 1756
1757 /* initialize rx_buffer_info */
1758 memset(ring->rx_buffer_info, 0,
1759 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1760
1761 /* initialize Rx descriptor 0 */
1762 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1763 rx_desc->wb.upper.length = 0;
1764
1752 /* reset ntu and ntc to place SW in sync with hardwdare */ 1765 /* reset ntu and ntc to place SW in sync with hardwdare */
1753 ring->next_to_clean = 0; 1766 ring->next_to_clean = 0;
1754 ring->next_to_use = 0; 1767 ring->next_to_use = 0;
@@ -2103,9 +2116,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
2103 **/ 2116 **/
2104static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 2117static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2105{ 2118{
2106 struct device *dev = rx_ring->dev; 2119 u16 i = rx_ring->next_to_clean;
2107 unsigned long size;
2108 unsigned int i;
2109 2120
2110 /* Free Rx ring sk_buff */ 2121 /* Free Rx ring sk_buff */
2111 if (rx_ring->skb) { 2122 if (rx_ring->skb) {
@@ -2113,29 +2124,39 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2113 rx_ring->skb = NULL; 2124 rx_ring->skb = NULL;
2114 } 2125 }
2115 2126
2116 /* ring already cleared, nothing to do */
2117 if (!rx_ring->rx_buffer_info)
2118 return;
2119
2120 /* Free all the Rx ring pages */ 2127 /* Free all the Rx ring pages */
2121 for (i = 0; i < rx_ring->count; i++) { 2128 while (i != rx_ring->next_to_alloc) {
2122 struct ixgbevf_rx_buffer *rx_buffer; 2129 struct ixgbevf_rx_buffer *rx_buffer;
2123 2130
2124 rx_buffer = &rx_ring->rx_buffer_info[i]; 2131 rx_buffer = &rx_ring->rx_buffer_info[i];
2125 if (rx_buffer->dma)
2126 dma_unmap_page(dev, rx_buffer->dma,
2127 PAGE_SIZE, DMA_FROM_DEVICE);
2128 rx_buffer->dma = 0;
2129 if (rx_buffer->page)
2130 __free_page(rx_buffer->page);
2131 rx_buffer->page = NULL;
2132 }
2133 2132
2134 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2133 /* Invalidate cache lines that may have been written to by
2135 memset(rx_ring->rx_buffer_info, 0, size); 2134 * device so that we avoid corrupting memory.
2135 */
2136 dma_sync_single_range_for_cpu(rx_ring->dev,
2137 rx_buffer->dma,
2138 rx_buffer->page_offset,
2139 IXGBEVF_RX_BUFSZ,
2140 DMA_FROM_DEVICE);
2141
2142 /* free resources associated with mapping */
2143 dma_unmap_page_attrs(rx_ring->dev,
2144 rx_buffer->dma,
2145 PAGE_SIZE,
2146 DMA_FROM_DEVICE,
2147 IXGBEVF_RX_DMA_ATTR);
2148
2149 __page_frag_cache_drain(rx_buffer->page,
2150 rx_buffer->pagecnt_bias);
2136 2151
2137 /* Zero out the descriptor ring */ 2152 i++;
2138 memset(rx_ring->desc, 0, rx_ring->size); 2153 if (i == rx_ring->count)
2154 i = 0;
2155 }
2156
2157 rx_ring->next_to_alloc = 0;
2158 rx_ring->next_to_clean = 0;
2159 rx_ring->next_to_use = 0;
2139} 2160}
2140 2161
2141/** 2162/**
@@ -2144,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2144 **/ 2165 **/
2145static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 2166static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2146{ 2167{
2147 struct ixgbevf_tx_buffer *tx_buffer_info; 2168 u16 i = tx_ring->next_to_clean;
2148 unsigned long size; 2169 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2149 unsigned int i;
2150 2170
2151 if (!tx_ring->tx_buffer_info) 2171 while (i != tx_ring->next_to_use) {
2152 return; 2172 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2153 2173
2154 /* Free all the Tx ring sk_buffs */ 2174 /* Free all the Tx ring sk_buffs */
2155 for (i = 0; i < tx_ring->count; i++) { 2175 dev_kfree_skb_any(tx_buffer->skb);
2156 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2176
2157 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2177 /* unmap skb header data */
2178 dma_unmap_single(tx_ring->dev,
2179 dma_unmap_addr(tx_buffer, dma),
2180 dma_unmap_len(tx_buffer, len),
2181 DMA_TO_DEVICE);
2182
2183 /* check for eop_desc to determine the end of the packet */
2184 eop_desc = tx_buffer->next_to_watch;
2185 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2186
2187 /* unmap remaining buffers */
2188 while (tx_desc != eop_desc) {
2189 tx_buffer++;
2190 tx_desc++;
2191 i++;
2192 if (unlikely(i == tx_ring->count)) {
2193 i = 0;
2194 tx_buffer = tx_ring->tx_buffer_info;
2195 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2196 }
2197
2198 /* unmap any remaining paged data */
2199 if (dma_unmap_len(tx_buffer, len))
2200 dma_unmap_page(tx_ring->dev,
2201 dma_unmap_addr(tx_buffer, dma),
2202 dma_unmap_len(tx_buffer, len),
2203 DMA_TO_DEVICE);
2204 }
2205
2206 /* move us one more past the eop_desc for start of next pkt */
2207 tx_buffer++;
2208 i++;
2209 if (unlikely(i == tx_ring->count)) {
2210 i = 0;
2211 tx_buffer = tx_ring->tx_buffer_info;
2212 }
2158 } 2213 }
2159 2214
2160 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2215 /* reset next_to_use and next_to_clean */
2161 memset(tx_ring->tx_buffer_info, 0, size); 2216 tx_ring->next_to_use = 0;
2217 tx_ring->next_to_clean = 0;
2162 2218
2163 memset(tx_ring->desc, 0, tx_ring->size);
2164} 2219}
2165 2220
2166/** 2221/**
@@ -2712,6 +2767,8 @@ out:
2712void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2767void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2713{ 2768{
2714 struct ixgbe_hw *hw = &adapter->hw; 2769 struct ixgbe_hw *hw = &adapter->hw;
2770 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
2771 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
2715 int i; 2772 int i;
2716 2773
2717 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2774 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
@@ -2732,10 +2789,18 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2732 adapter->stats.vfmprc); 2789 adapter->stats.vfmprc);
2733 2790
2734 for (i = 0; i < adapter->num_rx_queues; i++) { 2791 for (i = 0; i < adapter->num_rx_queues; i++) {
2735 adapter->hw_csum_rx_error += 2792 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2736 adapter->rx_ring[i]->hw_csum_rx_error; 2793
2737 adapter->rx_ring[i]->hw_csum_rx_error = 0; 2794 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
2795 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
2796 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
2797 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
2738 } 2798 }
2799
2800 adapter->hw_csum_rx_error = hw_csum_rx_error;
2801 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
2802 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
2803 adapter->alloc_rx_page = alloc_rx_page;
2739} 2804}
2740 2805
2741/** 2806/**
@@ -2980,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2980 int size; 3045 int size;
2981 3046
2982 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 3047 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2983 tx_ring->tx_buffer_info = vzalloc(size); 3048 tx_ring->tx_buffer_info = vmalloc(size);
2984 if (!tx_ring->tx_buffer_info) 3049 if (!tx_ring->tx_buffer_info)
2985 goto err; 3050 goto err;
2986 3051
@@ -3040,7 +3105,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3040 int size; 3105 int size;
3041 3106
3042 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 3107 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3043 rx_ring->rx_buffer_info = vzalloc(size); 3108 rx_ring->rx_buffer_info = vmalloc(size);
3044 if (!rx_ring->rx_buffer_info) 3109 if (!rx_ring->rx_buffer_info)
3045 goto err; 3110 goto err;
3046 3111
@@ -3482,34 +3547,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3482 struct ixgbevf_tx_buffer *first, 3547 struct ixgbevf_tx_buffer *first,
3483 const u8 hdr_len) 3548 const u8 hdr_len)
3484{ 3549{
3485 dma_addr_t dma;
3486 struct sk_buff *skb = first->skb; 3550 struct sk_buff *skb = first->skb;
3487 struct ixgbevf_tx_buffer *tx_buffer; 3551 struct ixgbevf_tx_buffer *tx_buffer;
3488 union ixgbe_adv_tx_desc *tx_desc; 3552 union ixgbe_adv_tx_desc *tx_desc;
3489 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 3553 struct skb_frag_struct *frag;
3490 unsigned int data_len = skb->data_len; 3554 dma_addr_t dma;
3491 unsigned int size = skb_headlen(skb); 3555 unsigned int data_len, size;
3492 unsigned int paylen = skb->len - hdr_len;
3493 u32 tx_flags = first->tx_flags; 3556 u32 tx_flags = first->tx_flags;
3494 __le32 cmd_type; 3557 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3495 u16 i = tx_ring->next_to_use; 3558 u16 i = tx_ring->next_to_use;
3496 3559
3497 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3560 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3498 3561
3499 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); 3562 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3500 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3563
3564 size = skb_headlen(skb);
3565 data_len = skb->data_len;
3501 3566
3502 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3567 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3503 if (dma_mapping_error(tx_ring->dev, dma))
3504 goto dma_error;
3505 3568
3506 /* record length, and DMA address */ 3569 tx_buffer = first;
3507 dma_unmap_len_set(first, len, size); 3570
3508 dma_unmap_addr_set(first, dma, dma); 3571 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3572 if (dma_mapping_error(tx_ring->dev, dma))
3573 goto dma_error;
3574
3575 /* record length, and DMA address */
3576 dma_unmap_len_set(tx_buffer, len, size);
3577 dma_unmap_addr_set(tx_buffer, dma, dma);
3509 3578
3510 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3579 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3511 3580
3512 for (;;) {
3513 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 3581 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3514 tx_desc->read.cmd_type_len = 3582 tx_desc->read.cmd_type_len =
3515 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); 3583 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
@@ -3520,12 +3588,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3520 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3588 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3521 i = 0; 3589 i = 0;
3522 } 3590 }
3591 tx_desc->read.olinfo_status = 0;
3523 3592
3524 dma += IXGBE_MAX_DATA_PER_TXD; 3593 dma += IXGBE_MAX_DATA_PER_TXD;
3525 size -= IXGBE_MAX_DATA_PER_TXD; 3594 size -= IXGBE_MAX_DATA_PER_TXD;
3526 3595
3527 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3596 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3528 tx_desc->read.olinfo_status = 0;
3529 } 3597 }
3530 3598
3531 if (likely(!data_len)) 3599 if (likely(!data_len))
@@ -3539,23 +3607,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3539 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3607 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3540 i = 0; 3608 i = 0;
3541 } 3609 }
3610 tx_desc->read.olinfo_status = 0;
3542 3611
3543 size = skb_frag_size(frag); 3612 size = skb_frag_size(frag);
3544 data_len -= size; 3613 data_len -= size;
3545 3614
3546 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3615 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3547 DMA_TO_DEVICE); 3616 DMA_TO_DEVICE);
3548 if (dma_mapping_error(tx_ring->dev, dma))
3549 goto dma_error;
3550 3617
3551 tx_buffer = &tx_ring->tx_buffer_info[i]; 3618 tx_buffer = &tx_ring->tx_buffer_info[i];
3552 dma_unmap_len_set(tx_buffer, len, size);
3553 dma_unmap_addr_set(tx_buffer, dma, dma);
3554
3555 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3556 tx_desc->read.olinfo_status = 0;
3557
3558 frag++;
3559 } 3619 }
3560 3620
3561 /* write last descriptor with RS and EOP bits */ 3621 /* write last descriptor with RS and EOP bits */
@@ -3589,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3589 return; 3649 return;
3590dma_error: 3650dma_error:
3591 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3651 dev_err(tx_ring->dev, "TX DMA map failed\n");
3652 tx_buffer = &tx_ring->tx_buffer_info[i];
3592 3653
3593 /* clear dma mappings for failed tx_buffer_info map */ 3654 /* clear dma mappings for failed tx_buffer_info map */
3594 for (;;) { 3655 while (tx_buffer != first) {
3656 if (dma_unmap_len(tx_buffer, len))
3657 dma_unmap_page(tx_ring->dev,
3658 dma_unmap_addr(tx_buffer, dma),
3659 dma_unmap_len(tx_buffer, len),
3660 DMA_TO_DEVICE);
3661 dma_unmap_len_set(tx_buffer, len, 0);
3662
3663 if (i-- == 0)
3664 i += tx_ring->count;
3595 tx_buffer = &tx_ring->tx_buffer_info[i]; 3665 tx_buffer = &tx_ring->tx_buffer_info[i];
3596 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3597 if (tx_buffer == first)
3598 break;
3599 if (i == 0)
3600 i = tx_ring->count;
3601 i--;
3602 } 3666 }
3603 3667
3668 if (dma_unmap_len(tx_buffer, len))
3669 dma_unmap_single(tx_ring->dev,
3670 dma_unmap_addr(tx_buffer, dma),
3671 dma_unmap_len(tx_buffer, len),
3672 DMA_TO_DEVICE);
3673 dma_unmap_len_set(tx_buffer, len, 0);
3674
3675 dev_kfree_skb_any(tx_buffer->skb);
3676 tx_buffer->skb = NULL;
3677
3604 tx_ring->next_to_use = i; 3678 tx_ring->next_to_use = i;
3605} 3679}
3606 3680
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 64c93e8becc6..38d3a327c1bc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -286,7 +286,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
286 ether_addr_copy(msg_addr, addr); 286 ether_addr_copy(msg_addr, addr);
287 287
288 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 288 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
289 sizeof(msgbuf) / sizeof(u32)); 289 ARRAY_SIZE(msgbuf));
290 if (!ret_val) { 290 if (!ret_val) {
291 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 291 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
292 292
@@ -456,8 +456,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
456 ether_addr_copy(msg_addr, addr); 456 ether_addr_copy(msg_addr, addr);
457 457
458 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 458 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
459 sizeof(msgbuf) / sizeof(u32)); 459 ARRAY_SIZE(msgbuf));
460
461 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 460 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
462 461
463 /* if nacked the address was rejected, use "perm_addr" */ 462 /* if nacked the address was rejected, use "perm_addr" */
@@ -574,7 +573,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
574 msgbuf[1] = xcast_mode; 573 msgbuf[1] = xcast_mode;
575 574
576 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 575 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
577 sizeof(msgbuf) / sizeof(u32)); 576 ARRAY_SIZE(msgbuf));
578 if (err) 577 if (err)
579 return err; 578 return err;
580 579
@@ -614,7 +613,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
614 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; 613 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
615 614
616 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 615 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
617 sizeof(msgbuf) / sizeof(u32)); 616 ARRAY_SIZE(msgbuf));
618 if (err) 617 if (err)
619 goto mbx_err; 618 goto mbx_err;
620 619
@@ -826,7 +825,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
826 msgbuf[1] = max_size; 825 msgbuf[1] = max_size;
827 826
828 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 827 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
829 sizeof(msgbuf) / sizeof(u32)); 828 ARRAY_SIZE(msgbuf));
830 if (ret_val) 829 if (ret_val)
831 return ret_val; 830 return ret_val;
832 if ((msgbuf[0] & IXGBE_VF_SET_LPE) && 831 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -872,8 +871,7 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
872 msg[1] = api; 871 msg[1] = api;
873 msg[2] = 0; 872 msg[2] = 0;
874 873
875 err = ixgbevf_write_msg_read_ack(hw, msg, msg, 874 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
876 sizeof(msg) / sizeof(u32));
877 if (!err) { 875 if (!err) {
878 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 876 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
879 877
@@ -924,8 +922,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
924 msg[0] = IXGBE_VF_GET_QUEUE; 922 msg[0] = IXGBE_VF_GET_QUEUE;
925 msg[1] = msg[2] = msg[3] = msg[4] = 0; 923 msg[1] = msg[2] = msg[3] = msg[4] = 0;
926 924
927 err = ixgbevf_write_msg_read_ack(hw, msg, msg, 925 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
928 sizeof(msg) / sizeof(u32));
929 if (!err) { 926 if (!err) {
930 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 927 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
931 928