aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2017-02-06 21:25:50 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2017-03-17 15:11:44 -0400
commitd2bead576e67c34fe9ea174bb245254d0fe237b5 (patch)
treeef6ec3767553e886f4e8144aa3238bbecff2eca4 /drivers/net/ethernet/intel/igb/igb_main.c
parent7ec0116c9131a8cd58dc456ae2bd5bc9976460d1 (diff)
igb: Clear Rx buffer_info in configure instead of clean
This change makes it so that instead of going through the entire ring on Rx cleanup we only go through the region that was designated to be cleaned up and stop when we reach the region where new allocations should start. In addition we can avoid having to perform a memset on the Rx buffer_info structures until we are about to start using the ring again. By deferring this we can avoid dirtying the cache any more than we have to which can help to improve the time needed to bring the interface down and then back up again in a reset or suspend/resume cycle. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1d76d3a90a17..680f0d3d9b72 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3435,7 +3435,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
3435 3435
3436 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 3436 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3437 3437
3438 rx_ring->rx_buffer_info = vzalloc(size); 3438 rx_ring->rx_buffer_info = vmalloc(size);
3439 if (!rx_ring->rx_buffer_info) 3439 if (!rx_ring->rx_buffer_info)
3440 goto err; 3440 goto err;
3441 3441
@@ -3759,6 +3759,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3759 rxdctl |= IGB_RX_HTHRESH << 8; 3759 rxdctl |= IGB_RX_HTHRESH << 8;
3760 rxdctl |= IGB_RX_WTHRESH << 16; 3760 rxdctl |= IGB_RX_WTHRESH << 16;
3761 3761
3762 /* initialize rx_buffer_info */
3763 memset(ring->rx_buffer_info, 0,
3764 sizeof(struct igb_rx_buffer) * ring->count);
3765
3762 /* initialize Rx descriptor 0 */ 3766 /* initialize Rx descriptor 0 */
3763 rx_desc = IGB_RX_DESC(ring, 0); 3767 rx_desc = IGB_RX_DESC(ring, 0);
3764 rx_desc->wb.upper.length = 0; 3768 rx_desc->wb.upper.length = 0;
@@ -3937,23 +3941,16 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3937 **/ 3941 **/
3938static void igb_clean_rx_ring(struct igb_ring *rx_ring) 3942static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3939{ 3943{
3940 unsigned long size; 3944 u16 i = rx_ring->next_to_clean;
3941 u16 i;
3942 3945
3943 if (rx_ring->skb) 3946 if (rx_ring->skb)
3944 dev_kfree_skb(rx_ring->skb); 3947 dev_kfree_skb(rx_ring->skb);
3945 rx_ring->skb = NULL; 3948 rx_ring->skb = NULL;
3946 3949
3947 if (!rx_ring->rx_buffer_info)
3948 return;
3949
3950 /* Free all the Rx ring sk_buffs */ 3950 /* Free all the Rx ring sk_buffs */
3951 for (i = 0; i < rx_ring->count; i++) { 3951 while (i != rx_ring->next_to_alloc) {
3952 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 3952 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3953 3953
3954 if (!buffer_info->page)
3955 continue;
3956
3957 /* Invalidate cache lines that may have been written to by 3954 /* Invalidate cache lines that may have been written to by
3958 * device so that we avoid corrupting memory. 3955 * device so that we avoid corrupting memory.
3959 */ 3956 */
@@ -3972,12 +3969,11 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3972 __page_frag_cache_drain(buffer_info->page, 3969 __page_frag_cache_drain(buffer_info->page,
3973 buffer_info->pagecnt_bias); 3970 buffer_info->pagecnt_bias);
3974 3971
3975 buffer_info->page = NULL; 3972 i++;
3973 if (i == rx_ring->count)
3974 i = 0;
3976 } 3975 }
3977 3976
3978 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3979 memset(rx_ring->rx_buffer_info, 0, size);
3980
3981 rx_ring->next_to_alloc = 0; 3977 rx_ring->next_to_alloc = 0;
3982 rx_ring->next_to_clean = 0; 3978 rx_ring->next_to_clean = 0;
3983 rx_ring->next_to_use = 0; 3979 rx_ring->next_to_use = 0;