aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2017-02-06 21:27:14 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2017-03-17 15:11:44 -0400
commite3cdf68d4a861d91ef62ed615483e673f07fccfe (patch)
treee2ce2ca2e229f416e60e4c1fe377dead5aaaf25a /drivers/net/ethernet/intel/igb/igb_main.c
parent8649aaef4044681257ed38cf8706aea88430f2c4 (diff)
igb: Add support for padding packet
With the size of the frame limited we can now write to an offset within the buffer instead of having to write at the very start of the buffer. The advantage to this is that it allows us to leave padding room for things like supporting XDP in the future. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 24c20d401240..3ef66577872b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3783,11 +3783,14 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3783 struct igb_ring *rx_ring) 3783 struct igb_ring *rx_ring)
3784{ 3784{
3785 /* set build_skb and buffer size flags */ 3785 /* set build_skb and buffer size flags */
3786 clear_ring_build_skb_enabled(rx_ring);
3786 clear_ring_uses_large_buffer(rx_ring); 3787 clear_ring_uses_large_buffer(rx_ring);
3787 3788
3788 if (adapter->flags & IGB_FLAG_RX_LEGACY) 3789 if (adapter->flags & IGB_FLAG_RX_LEGACY)
3789 return; 3790 return;
3790 3791
3792 set_ring_build_skb_enabled(rx_ring);
3793
3791#if (PAGE_SIZE < 8192) 3794#if (PAGE_SIZE < 8192)
3792 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) 3795 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
3793 return; 3796 return;
@@ -6957,7 +6960,9 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6957#if (PAGE_SIZE < 8192) 6960#if (PAGE_SIZE < 8192)
6958 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; 6961 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
6959#else 6962#else
6960 unsigned int truesize = SKB_DATA_ALIGN(size); 6963 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
6964 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
6965 SKB_DATA_ALIGN(size);
6961#endif 6966#endif
6962 unsigned int pull_len; 6967 unsigned int pull_len;
6963 6968
@@ -7293,6 +7298,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7293 return total_packets; 7298 return total_packets;
7294} 7299}
7295 7300
7301static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
7302{
7303 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
7304}
7305
7296static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 7306static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7297 struct igb_rx_buffer *bi) 7307 struct igb_rx_buffer *bi)
7298{ 7308{
@@ -7328,7 +7338,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7328 7338
7329 bi->dma = dma; 7339 bi->dma = dma;
7330 bi->page = page; 7340 bi->page = page;
7331 bi->page_offset = 0; 7341 bi->page_offset = igb_rx_offset(rx_ring);
7332 bi->pagecnt_bias = 1; 7342 bi->pagecnt_bias = 1;
7333 7343
7334 return true; 7344 return true;