aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-08-26 03:43:43 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-09-20 02:57:42 -0400
commitc023cd8898dbee857c8e82b357b4e68dc2d9561d (patch)
tree48c0b10648e0d0cb5150f5c3e17397b6ea72b92d /drivers/net/ethernet/intel/igb
parent44390ca6cb3d4d3c7c4078bafde11073b5268150 (diff)
igb: streamline Rx buffer allocation and cleanup
This change is meant to streamline the Rx buffer allocation and cleanup. This is accomplished by reducing the number of writes by only having the Rx descriptor ring written by software during allocation, and it will only be read during cleanup. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb')
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c190
2 files changed, 104 insertions, 88 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 50632b19d2d..b2f2a8ca46e 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -370,7 +370,7 @@ extern void igb_setup_rctl(struct igb_adapter *);
370extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); 370extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
371extern void igb_unmap_and_free_tx_resource(struct igb_ring *, 371extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
372 struct igb_buffer *); 372 struct igb_buffer *);
373extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 373extern void igb_alloc_rx_buffers_adv(struct igb_ring *, u16);
374extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); 374extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
375extern bool igb_has_link(struct igb_adapter *adapter); 375extern bool igb_has_link(struct igb_adapter *adapter);
376extern void igb_set_ethtool_ops(struct net_device *); 376extern void igb_set_ethtool_ops(struct net_device *);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 022c4420350..af8c2f783a9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3243,16 +3243,15 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3243 **/ 3243 **/
3244static void igb_clean_rx_ring(struct igb_ring *rx_ring) 3244static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3245{ 3245{
3246 struct igb_buffer *buffer_info;
3247 unsigned long size; 3246 unsigned long size;
3248 unsigned int i; 3247 u16 i;
3249 3248
3250 if (!rx_ring->buffer_info) 3249 if (!rx_ring->buffer_info)
3251 return; 3250 return;
3252 3251
3253 /* Free all the Rx ring sk_buffs */ 3252 /* Free all the Rx ring sk_buffs */
3254 for (i = 0; i < rx_ring->count; i++) { 3253 for (i = 0; i < rx_ring->count; i++) {
3255 buffer_info = &rx_ring->buffer_info[i]; 3254 struct igb_buffer *buffer_info = &rx_ring->buffer_info[i];
3256 if (buffer_info->dma) { 3255 if (buffer_info->dma) {
3257 dma_unmap_single(rx_ring->dev, 3256 dma_unmap_single(rx_ring->dev,
3258 buffer_info->dma, 3257 buffer_info->dma,
@@ -5764,7 +5763,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5764 struct igb_buffer *buffer_info , *next_buffer; 5763 struct igb_buffer *buffer_info , *next_buffer;
5765 struct sk_buff *skb; 5764 struct sk_buff *skb;
5766 bool cleaned = false; 5765 bool cleaned = false;
5767 int cleaned_count = 0; 5766 u16 cleaned_count = igb_desc_unused(rx_ring);
5768 int current_node = numa_node_id(); 5767 int current_node = numa_node_id();
5769 unsigned int total_bytes = 0, total_packets = 0; 5768 unsigned int total_bytes = 0, total_packets = 0;
5770 unsigned int i; 5769 unsigned int i;
@@ -5848,7 +5847,6 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5848 igb_rx_checksum_adv(rx_ring, staterr, skb); 5847 igb_rx_checksum_adv(rx_ring, staterr, skb);
5849 5848
5850 skb->protocol = eth_type_trans(skb, netdev); 5849 skb->protocol = eth_type_trans(skb, netdev);
5851 skb_record_rx_queue(skb, rx_ring->queue_index);
5852 5850
5853 if (staterr & E1000_RXD_STAT_VP) { 5851 if (staterr & E1000_RXD_STAT_VP) {
5854 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 5852 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
@@ -5858,8 +5856,6 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5858 napi_gro_receive(&q_vector->napi, skb); 5856 napi_gro_receive(&q_vector->napi, skb);
5859 5857
5860next_desc: 5858next_desc:
5861 rx_desc->wb.upper.status_error = 0;
5862
5863 /* return some buffers to hardware, one at a time is too slow */ 5859 /* return some buffers to hardware, one at a time is too slow */
5864 if (cleaned_count >= IGB_RX_BUFFER_WRITE) { 5860 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
5865 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 5861 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
@@ -5873,110 +5869,130 @@ next_desc:
5873 } 5869 }
5874 5870
5875 rx_ring->next_to_clean = i; 5871 rx_ring->next_to_clean = i;
5876 cleaned_count = igb_desc_unused(rx_ring);
5877
5878 if (cleaned_count)
5879 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5880
5881 rx_ring->total_packets += total_packets;
5882 rx_ring->total_bytes += total_bytes;
5883 u64_stats_update_begin(&rx_ring->rx_syncp); 5872 u64_stats_update_begin(&rx_ring->rx_syncp);
5884 rx_ring->rx_stats.packets += total_packets; 5873 rx_ring->rx_stats.packets += total_packets;
5885 rx_ring->rx_stats.bytes += total_bytes; 5874 rx_ring->rx_stats.bytes += total_bytes;
5886 u64_stats_update_end(&rx_ring->rx_syncp); 5875 u64_stats_update_end(&rx_ring->rx_syncp);
5876 rx_ring->total_packets += total_packets;
5877 rx_ring->total_bytes += total_bytes;
5878
5879 if (cleaned_count)
5880 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5881
5887 return cleaned; 5882 return cleaned;
5888} 5883}
5889 5884
5885static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
5886 struct igb_buffer *bi)
5887{
5888 struct sk_buff *skb = bi->skb;
5889 dma_addr_t dma = bi->dma;
5890
5891 if (dma)
5892 return true;
5893
5894 if (likely(!skb)) {
5895 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
5896 IGB_RX_HDR_LEN);
5897 bi->skb = skb;
5898 if (!skb) {
5899 rx_ring->rx_stats.alloc_failed++;
5900 return false;
5901 }
5902
5903 /* initialize skb for ring */
5904 skb_record_rx_queue(skb, rx_ring->queue_index);
5905 }
5906
5907 dma = dma_map_single(rx_ring->dev, skb->data,
5908 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
5909
5910 if (dma_mapping_error(rx_ring->dev, dma)) {
5911 rx_ring->rx_stats.alloc_failed++;
5912 return false;
5913 }
5914
5915 bi->dma = dma;
5916 return true;
5917}
5918
5919static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
5920 struct igb_buffer *bi)
5921{
5922 struct page *page = bi->page;
5923 dma_addr_t page_dma = bi->page_dma;
5924 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
5925
5926 if (page_dma)
5927 return true;
5928
5929 if (!page) {
5930 page = netdev_alloc_page(rx_ring->netdev);
5931 bi->page = page;
5932 if (unlikely(!page)) {
5933 rx_ring->rx_stats.alloc_failed++;
5934 return false;
5935 }
5936 }
5937
5938 page_dma = dma_map_page(rx_ring->dev, page,
5939 page_offset, PAGE_SIZE / 2,
5940 DMA_FROM_DEVICE);
5941
5942 if (dma_mapping_error(rx_ring->dev, page_dma)) {
5943 rx_ring->rx_stats.alloc_failed++;
5944 return false;
5945 }
5946
5947 bi->page_dma = page_dma;
5948 bi->page_offset = page_offset;
5949 return true;
5950}
5951
5890/** 5952/**
5891 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 5953 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5892 * @adapter: address of board private structure 5954 * @adapter: address of board private structure
5893 **/ 5955 **/
5894void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) 5956void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, u16 cleaned_count)
5895{ 5957{
5896 struct net_device *netdev = rx_ring->netdev;
5897 union e1000_adv_rx_desc *rx_desc; 5958 union e1000_adv_rx_desc *rx_desc;
5898 struct igb_buffer *buffer_info; 5959 struct igb_buffer *bi;
5899 struct sk_buff *skb; 5960 u16 i = rx_ring->next_to_use;
5900 unsigned int i;
5901 5961
5902 i = rx_ring->next_to_use; 5962 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5903 buffer_info = &rx_ring->buffer_info[i]; 5963 bi = &rx_ring->buffer_info[i];
5964 i -= rx_ring->count;
5904 5965
5905 while (cleaned_count--) { 5966 while (cleaned_count--) {
5906 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5967 if (!igb_alloc_mapped_skb(rx_ring, bi))
5907 5968 break;
5908 if (!buffer_info->page_dma) {
5909 if (!buffer_info->page) {
5910 buffer_info->page = netdev_alloc_page(netdev);
5911 if (unlikely(!buffer_info->page)) {
5912 u64_stats_update_begin(&rx_ring->rx_syncp);
5913 rx_ring->rx_stats.alloc_failed++;
5914 u64_stats_update_end(&rx_ring->rx_syncp);
5915 goto no_buffers;
5916 }
5917 buffer_info->page_offset = 0;
5918 } else {
5919 buffer_info->page_offset ^= PAGE_SIZE / 2;
5920 }
5921 buffer_info->page_dma =
5922 dma_map_page(rx_ring->dev, buffer_info->page,
5923 buffer_info->page_offset,
5924 PAGE_SIZE / 2,
5925 DMA_FROM_DEVICE);
5926 if (dma_mapping_error(rx_ring->dev,
5927 buffer_info->page_dma)) {
5928 buffer_info->page_dma = 0;
5929 u64_stats_update_begin(&rx_ring->rx_syncp);
5930 rx_ring->rx_stats.alloc_failed++;
5931 u64_stats_update_end(&rx_ring->rx_syncp);
5932 goto no_buffers;
5933 }
5934 }
5935 5969
5936 skb = buffer_info->skb; 5970 /* Refresh the desc even if buffer_addrs didn't change
5937 if (!skb) { 5971 * because each write-back erases this info. */
5938 skb = netdev_alloc_skb_ip_align(netdev, IGB_RX_HDR_LEN); 5972 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
5939 if (unlikely(!skb)) {
5940 u64_stats_update_begin(&rx_ring->rx_syncp);
5941 rx_ring->rx_stats.alloc_failed++;
5942 u64_stats_update_end(&rx_ring->rx_syncp);
5943 goto no_buffers;
5944 }
5945 5973
5946 buffer_info->skb = skb; 5974 if (!igb_alloc_mapped_page(rx_ring, bi))
5947 } 5975 break;
5948 if (!buffer_info->dma) { 5976
5949 buffer_info->dma = dma_map_single(rx_ring->dev, 5977 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
5950 skb->data,
5951 IGB_RX_HDR_LEN,
5952 DMA_FROM_DEVICE);
5953 if (dma_mapping_error(rx_ring->dev,
5954 buffer_info->dma)) {
5955 buffer_info->dma = 0;
5956 u64_stats_update_begin(&rx_ring->rx_syncp);
5957 rx_ring->rx_stats.alloc_failed++;
5958 u64_stats_update_end(&rx_ring->rx_syncp);
5959 goto no_buffers;
5960 }
5961 }
5962 /* Refresh the desc even if buffer_addrs didn't change because
5963 * each write-back erases this info. */
5964 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma);
5965 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5966 5978
5979 rx_desc++;
5980 bi++;
5967 i++; 5981 i++;
5968 if (i == rx_ring->count) 5982 if (unlikely(!i)) {
5969 i = 0; 5983 rx_desc = E1000_RX_DESC_ADV(*rx_ring, 0);
5970 buffer_info = &rx_ring->buffer_info[i]; 5984 bi = rx_ring->buffer_info;
5985 i -= rx_ring->count;
5986 }
5987
5988 /* clear the hdr_addr for the next_to_use descriptor */
5989 rx_desc->read.hdr_addr = 0;
5971 } 5990 }
5972 5991
5973no_buffers: 5992 i += rx_ring->count;
5993
5974 if (rx_ring->next_to_use != i) { 5994 if (rx_ring->next_to_use != i) {
5975 rx_ring->next_to_use = i; 5995 rx_ring->next_to_use = i;
5976 if (i == 0)
5977 i = (rx_ring->count - 1);
5978 else
5979 i--;
5980 5996
5981 /* Force memory writes to complete before letting h/w 5997 /* Force memory writes to complete before letting h/w
5982 * know there are new descriptors to fetch. (Only 5998 * know there are new descriptors to fetch. (Only