diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-09-24 20:31:12 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-10-19 07:34:35 -0400 |
commit | de78d1f9c83d0aceca42c17abbbf730ebdc2fc6e (patch) | |
tree | 8a44db40745b7e1be95ceb82cbf8e9f9bd8758eb | |
parent | 2e334eee9bef61505b6727b356e724033da55ae3 (diff) |
igb: Lock buffer size at 2K even on systems with larger pages
This change locks us in at 2K buffers even on a system that supports larger
frames. The reason for this change is to make better use of pages and to
reduce the overall truesize of frames generated by igb.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 27 |
3 files changed, 23 insertions, 15 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 1d15bb0b1e91..d3fd0127c0c8 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -132,9 +132,10 @@ struct vf_data_storage { | |||
132 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | 132 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
133 | 133 | ||
134 | /* Supported Rx Buffer Sizes */ | 134 | /* Supported Rx Buffer Sizes */ |
135 | #define IGB_RXBUFFER_256 256 | 135 | #define IGB_RXBUFFER_256 256 |
136 | #define IGB_RXBUFFER_16384 16384 | 136 | #define IGB_RXBUFFER_2048 2048 |
137 | #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 | 137 | #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 |
138 | #define IGB_RX_BUFSZ IGB_RXBUFFER_2048 | ||
138 | 139 | ||
139 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ | 140 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ |
140 | #define IGB_TX_QUEUE_WAKE 16 | 141 | #define IGB_TX_QUEUE_WAKE 16 |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 96c6df65726f..375c0dad8d29 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -1727,7 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, | |||
1727 | /* sync Rx buffer for CPU read */ | 1727 | /* sync Rx buffer for CPU read */ |
1728 | dma_sync_single_for_cpu(rx_ring->dev, | 1728 | dma_sync_single_for_cpu(rx_ring->dev, |
1729 | rx_buffer_info->dma, | 1729 | rx_buffer_info->dma, |
1730 | PAGE_SIZE / 2, | 1730 | IGB_RX_BUFSZ, |
1731 | DMA_FROM_DEVICE); | 1731 | DMA_FROM_DEVICE); |
1732 | 1732 | ||
1733 | /* verify contents of skb */ | 1733 | /* verify contents of skb */ |
@@ -1737,7 +1737,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, | |||
1737 | /* sync Rx buffer for device write */ | 1737 | /* sync Rx buffer for device write */ |
1738 | dma_sync_single_for_device(rx_ring->dev, | 1738 | dma_sync_single_for_device(rx_ring->dev, |
1739 | rx_buffer_info->dma, | 1739 | rx_buffer_info->dma, |
1740 | PAGE_SIZE / 2, | 1740 | IGB_RX_BUFSZ, |
1741 | DMA_FROM_DEVICE); | 1741 | DMA_FROM_DEVICE); |
1742 | 1742 | ||
1743 | /* unmap buffer on tx side */ | 1743 | /* unmap buffer on tx side */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fa7ddec4cfe5..0141ef3ea678 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -554,7 +554,7 @@ rx_ring_summary: | |||
554 | 16, 1, | 554 | 16, 1, |
555 | page_address(buffer_info->page) + | 555 | page_address(buffer_info->page) + |
556 | buffer_info->page_offset, | 556 | buffer_info->page_offset, |
557 | PAGE_SIZE/2, true); | 557 | IGB_RX_BUFSZ, true); |
558 | } | 558 | } |
559 | } | 559 | } |
560 | } | 560 | } |
@@ -3103,11 +3103,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
3103 | 3103 | ||
3104 | /* set descriptor configuration */ | 3104 | /* set descriptor configuration */ |
3105 | srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | 3105 | srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
3106 | #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 | 3106 | srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
3107 | srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
3108 | #else | ||
3109 | srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
3110 | #endif | ||
3111 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; | 3107 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
3112 | #ifdef CONFIG_IGB_PTP | 3108 | #ifdef CONFIG_IGB_PTP |
3113 | if (hw->mac.type >= e1000_82580) | 3109 | if (hw->mac.type >= e1000_82580) |
@@ -5855,7 +5851,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, | |||
5855 | /* sync the buffer for use by the device */ | 5851 | /* sync the buffer for use by the device */ |
5856 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, | 5852 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, |
5857 | old_buff->page_offset, | 5853 | old_buff->page_offset, |
5858 | PAGE_SIZE / 2, | 5854 | IGB_RX_BUFSZ, |
5859 | DMA_FROM_DEVICE); | 5855 | DMA_FROM_DEVICE); |
5860 | } | 5856 | } |
5861 | 5857 | ||
@@ -5905,18 +5901,19 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
5905 | } | 5901 | } |
5906 | 5902 | ||
5907 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | 5903 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
5908 | rx_buffer->page_offset, size, PAGE_SIZE / 2); | 5904 | rx_buffer->page_offset, size, IGB_RX_BUFSZ); |
5909 | 5905 | ||
5910 | /* avoid re-using remote pages */ | 5906 | /* avoid re-using remote pages */ |
5911 | if (unlikely(page_to_nid(page) != numa_node_id())) | 5907 | if (unlikely(page_to_nid(page) != numa_node_id())) |
5912 | return false; | 5908 | return false; |
5913 | 5909 | ||
5910 | #if (PAGE_SIZE < 8192) | ||
5914 | /* if we are only owner of page we can reuse it */ | 5911 | /* if we are only owner of page we can reuse it */ |
5915 | if (unlikely(page_count(page) != 1)) | 5912 | if (unlikely(page_count(page) != 1)) |
5916 | return false; | 5913 | return false; |
5917 | 5914 | ||
5918 | /* flip page offset to other buffer */ | 5915 | /* flip page offset to other buffer */ |
5919 | rx_buffer->page_offset ^= PAGE_SIZE / 2; | 5916 | rx_buffer->page_offset ^= IGB_RX_BUFSZ; |
5920 | 5917 | ||
5921 | /* | 5918 | /* |
5922 | * since we are the only owner of the page and we need to | 5919 | * since we are the only owner of the page and we need to |
@@ -5924,6 +5921,16 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
5924 | * an unnecessary locked operation | 5921 | * an unnecessary locked operation |
5925 | */ | 5922 | */ |
5926 | atomic_set(&page->_count, 2); | 5923 | atomic_set(&page->_count, 2); |
5924 | #else | ||
5925 | /* move offset up to the next cache line */ | ||
5926 | rx_buffer->page_offset += SKB_DATA_ALIGN(size); | ||
5927 | |||
5928 | if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) | ||
5929 | return false; | ||
5930 | |||
5931 | /* bump ref count on page before it is given to the stack */ | ||
5932 | get_page(page); | ||
5933 | #endif | ||
5927 | 5934 | ||
5928 | return true; | 5935 | return true; |
5929 | } | 5936 | } |
@@ -5977,7 +5984,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | |||
5977 | dma_sync_single_range_for_cpu(rx_ring->dev, | 5984 | dma_sync_single_range_for_cpu(rx_ring->dev, |
5978 | rx_buffer->dma, | 5985 | rx_buffer->dma, |
5979 | rx_buffer->page_offset, | 5986 | rx_buffer->page_offset, |
5980 | PAGE_SIZE / 2, | 5987 | IGB_RX_BUFSZ, |
5981 | DMA_FROM_DEVICE); | 5988 | DMA_FROM_DEVICE); |
5982 | 5989 | ||
5983 | /* pull page into skb */ | 5990 | /* pull page into skb */ |