aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2017-02-06 21:27:36 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2017-03-17 15:11:44 -0400
commitb1bb2eb0a0deb03e9847d8e29eb1e75ce141e4d9 (patch)
tree65272763a1cdb6c9afb9587738bd004fd1c6fd4d /drivers/net/ethernet/intel/igb/igb_main.c
parente014272672b964471608a2624e4cdf1d5e7c22ea (diff)
igb: Re-add support for build_skb in igb
This reverts commit f9d40f6a9921 ("igb: Revert support for build_skb in igb") and adds a few changes to update it to work with the latest version of igb. We are now able to revert the removal of this due to the fact that with the recent changes to the page count and the use of DMA_ATTR_SKIP_CPU_SYNC we can make the pages writable so we should not be invalidating the additional data added when we call build_skb. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dfae641647d3..79f39a785dca 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7021,6 +7021,51 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
7021 return skb; 7021 return skb;
7022} 7022}
7023 7023
7024static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
7025 struct igb_rx_buffer *rx_buffer,
7026 union e1000_adv_rx_desc *rx_desc,
7027 unsigned int size)
7028{
7029 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7030#if (PAGE_SIZE < 8192)
7031 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7032#else
7033 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
7034 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
7035#endif
7036 struct sk_buff *skb;
7037
7038 /* prefetch first cache line of first page */
7039 prefetch(va);
7040#if L1_CACHE_BYTES < 128
7041 prefetch(va + L1_CACHE_BYTES);
7042#endif
7043
7044 /* build an skb to around the page buffer */
7045 skb = build_skb(va - IGB_SKB_PAD, truesize);
7046 if (unlikely(!skb))
7047 return NULL;
7048
7049 /* update pointers within the skb to store the data */
7050 skb_reserve(skb, IGB_SKB_PAD);
7051 __skb_put(skb, size);
7052
7053 /* pull timestamp out of packet data */
7054 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
7055 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
7056 __skb_pull(skb, IGB_TS_HDR_LEN);
7057 }
7058
7059 /* update buffer offset */
7060#if (PAGE_SIZE < 8192)
7061 rx_buffer->page_offset ^= truesize;
7062#else
7063 rx_buffer->page_offset += truesize;
7064#endif
7065
7066 return skb;
7067}
7068
7024static inline void igb_rx_checksum(struct igb_ring *ring, 7069static inline void igb_rx_checksum(struct igb_ring *ring,
7025 union e1000_adv_rx_desc *rx_desc, 7070 union e1000_adv_rx_desc *rx_desc,
7026 struct sk_buff *skb) 7071 struct sk_buff *skb)
@@ -7250,6 +7295,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7250 /* retrieve a buffer from the ring */ 7295 /* retrieve a buffer from the ring */
7251 if (skb) 7296 if (skb)
7252 igb_add_rx_frag(rx_ring, rx_buffer, skb, size); 7297 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
7298 else if (ring_uses_build_skb(rx_ring))
7299 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
7253 else 7300 else
7254 skb = igb_construct_skb(rx_ring, rx_buffer, 7301 skb = igb_construct_skb(rx_ring, rx_buffer,
7255 rx_desc, size); 7302 rx_desc, size);