aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2008-07-08 18:11:40 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-07-11 01:20:32 -0400
commitbf36c1a0040cc6ccd63cdd1cec25d2085f2df964 (patch)
treebc23a159e39a8f99ae7359484cadd77237e5828f /drivers/net/igb
parent7dfc16fab1186769d7d0086830ab3fbc8fddfcba (diff)
igb: add page recycling support
This patch adds support for page recycling by splitting the page into two usable portions and tracking the reference count. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb.h4
-rw-r--r--drivers/net/igb/igb_main.c138
2 files changed, 63 insertions, 79 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index ee08010d2c4f..f41b9996d2ed 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -125,6 +125,7 @@ struct igb_buffer {
125 struct { 125 struct {
126 struct page *page; 126 struct page *page;
127 u64 page_dma; 127 u64 page_dma;
128 unsigned int page_offset;
128 }; 129 };
129 }; 130 };
130}; 131};
@@ -163,9 +164,6 @@ struct igb_ring {
163 }; 164 };
164 /* RX */ 165 /* RX */
165 struct { 166 struct {
166 /* arrays of page information for packet split */
167 struct sk_buff *pending_skb;
168 int pending_skb_page;
169 int no_itr_adjust; 167 int no_itr_adjust;
170 struct igb_queue_stats rx_stats; 168 struct igb_queue_stats rx_stats;
171 struct napi_struct napi; 169 struct napi_struct napi;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 68a4fef3df9a..660a78653287 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1725,7 +1725,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1725 1725
1726 rx_ring->next_to_clean = 0; 1726 rx_ring->next_to_clean = 0;
1727 rx_ring->next_to_use = 0; 1727 rx_ring->next_to_use = 0;
1728 rx_ring->pending_skb = NULL;
1729 1728
1730 rx_ring->adapter = adapter; 1729 rx_ring->adapter = adapter;
1731 1730
@@ -1817,15 +1816,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1817 rctl |= E1000_RCTL_SZ_2048; 1816 rctl |= E1000_RCTL_SZ_2048;
1818 rctl &= ~E1000_RCTL_BSEX; 1817 rctl &= ~E1000_RCTL_BSEX;
1819 break; 1818 break;
1820 case IGB_RXBUFFER_4096:
1821 rctl |= E1000_RCTL_SZ_4096;
1822 break;
1823 case IGB_RXBUFFER_8192:
1824 rctl |= E1000_RCTL_SZ_8192;
1825 break;
1826 case IGB_RXBUFFER_16384:
1827 rctl |= E1000_RCTL_SZ_16384;
1828 break;
1829 } 1819 }
1830 } else { 1820 } else {
1831 rctl &= ~E1000_RCTL_BSEX; 1821 rctl &= ~E1000_RCTL_BSEX;
@@ -1843,10 +1833,8 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1843 * so only enable packet split for jumbo frames */ 1833 * so only enable packet split for jumbo frames */
1844 if (rctl & E1000_RCTL_LPE) { 1834 if (rctl & E1000_RCTL_LPE) {
1845 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; 1835 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1846 srrctl = adapter->rx_ps_hdr_size << 1836 srrctl |= adapter->rx_ps_hdr_size <<
1847 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1837 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1848 /* buffer size is ALWAYS one page */
1849 srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1850 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1838 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1851 } else { 1839 } else {
1852 adapter->rx_ps_hdr_size = 0; 1840 adapter->rx_ps_hdr_size = 0;
@@ -2151,20 +2139,17 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2151 buffer_info->skb = NULL; 2139 buffer_info->skb = NULL;
2152 } 2140 }
2153 if (buffer_info->page) { 2141 if (buffer_info->page) {
2154 pci_unmap_page(pdev, buffer_info->page_dma, 2142 if (buffer_info->page_dma)
2155 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2143 pci_unmap_page(pdev, buffer_info->page_dma,
2144 PAGE_SIZE / 2,
2145 PCI_DMA_FROMDEVICE);
2156 put_page(buffer_info->page); 2146 put_page(buffer_info->page);
2157 buffer_info->page = NULL; 2147 buffer_info->page = NULL;
2158 buffer_info->page_dma = 0; 2148 buffer_info->page_dma = 0;
2149 buffer_info->page_offset = 0;
2159 } 2150 }
2160 } 2151 }
2161 2152
2162 /* there also may be some cached data from a chained receive */
2163 if (rx_ring->pending_skb) {
2164 dev_kfree_skb(rx_ring->pending_skb);
2165 rx_ring->pending_skb = NULL;
2166 }
2167
2168 size = sizeof(struct igb_buffer) * rx_ring->count; 2153 size = sizeof(struct igb_buffer) * rx_ring->count;
2169 memset(rx_ring->buffer_info, 0, size); 2154 memset(rx_ring->buffer_info, 0, size);
2170 2155
@@ -3091,7 +3076,11 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3091 else if (max_frame <= IGB_RXBUFFER_2048) 3076 else if (max_frame <= IGB_RXBUFFER_2048)
3092 adapter->rx_buffer_len = IGB_RXBUFFER_2048; 3077 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3093 else 3078 else
3094 adapter->rx_buffer_len = IGB_RXBUFFER_4096; 3079#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3080 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3081#else
3082 adapter->rx_buffer_len = PAGE_SIZE / 2;
3083#endif
3095 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3084 /* adjust allocation if LPE protects us, and we aren't using SBP */
3096 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3085 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3097 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) 3086 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
@@ -3796,7 +3785,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3796 union e1000_adv_rx_desc *rx_desc , *next_rxd; 3785 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3797 struct igb_buffer *buffer_info , *next_buffer; 3786 struct igb_buffer *buffer_info , *next_buffer;
3798 struct sk_buff *skb; 3787 struct sk_buff *skb;
3799 unsigned int i, j; 3788 unsigned int i;
3800 u32 length, hlen, staterr; 3789 u32 length, hlen, staterr;
3801 bool cleaned = false; 3790 bool cleaned = false;
3802 int cleaned_count = 0; 3791 int cleaned_count = 0;
@@ -3826,61 +3815,46 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3826 cleaned = true; 3815 cleaned = true;
3827 cleaned_count++; 3816 cleaned_count++;
3828 3817
3829 if (rx_ring->pending_skb != NULL) { 3818 skb = buffer_info->skb;
3830 skb = rx_ring->pending_skb; 3819 prefetch(skb->data - NET_IP_ALIGN);
3831 rx_ring->pending_skb = NULL; 3820 buffer_info->skb = NULL;
3832 j = rx_ring->pending_skb_page; 3821 if (!adapter->rx_ps_hdr_size) {
3833 } else { 3822 pci_unmap_single(pdev, buffer_info->dma,
3834 skb = buffer_info->skb; 3823 adapter->rx_buffer_len +
3835 prefetch(skb->data - NET_IP_ALIGN); 3824 NET_IP_ALIGN,
3836 buffer_info->skb = NULL; 3825 PCI_DMA_FROMDEVICE);
3837 if (hlen) { 3826 skb_put(skb, length);
3838 pci_unmap_single(pdev, buffer_info->dma, 3827 goto send_up;
3839 adapter->rx_ps_hdr_size + 3828 }
3840 NET_IP_ALIGN, 3829
3841 PCI_DMA_FROMDEVICE); 3830 if (!skb_shinfo(skb)->nr_frags) {
3842 skb_put(skb, hlen); 3831 pci_unmap_single(pdev, buffer_info->dma,
3843 } else { 3832 adapter->rx_ps_hdr_size +
3844 pci_unmap_single(pdev, buffer_info->dma, 3833 NET_IP_ALIGN,
3845 adapter->rx_buffer_len + 3834 PCI_DMA_FROMDEVICE);
3846 NET_IP_ALIGN, 3835 skb_put(skb, hlen);
3847 PCI_DMA_FROMDEVICE);
3848 skb_put(skb, length);
3849 goto send_up;
3850 }
3851 j = 0;
3852 } 3836 }
3853 3837
3854 while (length) { 3838 if (length) {
3855 pci_unmap_page(pdev, buffer_info->page_dma, 3839 pci_unmap_page(pdev, buffer_info->page_dma,
3856 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3840 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
3857 buffer_info->page_dma = 0; 3841 buffer_info->page_dma = 0;
3858 skb_fill_page_desc(skb, j, buffer_info->page, 3842
3859 0, length); 3843 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
3860 buffer_info->page = NULL; 3844 buffer_info->page,
3845 buffer_info->page_offset,
3846 length);
3847
3848 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
3849 (page_count(buffer_info->page) != 1))
3850 buffer_info->page = NULL;
3851 else
3852 get_page(buffer_info->page);
3861 3853
3862 skb->len += length; 3854 skb->len += length;
3863 skb->data_len += length; 3855 skb->data_len += length;
3864 skb->truesize += length;
3865 rx_desc->wb.upper.status_error = 0;
3866 if (staterr & E1000_RXD_STAT_EOP)
3867 break;
3868
3869 j++;
3870 cleaned_count++;
3871 i++;
3872 if (i == rx_ring->count)
3873 i = 0;
3874 3856
3875 buffer_info = &rx_ring->buffer_info[i]; 3857 skb->truesize += length;
3876 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3877 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3878 length = le16_to_cpu(rx_desc->wb.upper.length);
3879 if (!(staterr & E1000_RXD_STAT_DD)) {
3880 rx_ring->pending_skb = skb;
3881 rx_ring->pending_skb_page = j;
3882 goto out;
3883 }
3884 } 3858 }
3885send_up: 3859send_up:
3886 i++; 3860 i++;
@@ -3890,6 +3864,12 @@ send_up:
3890 prefetch(next_rxd); 3864 prefetch(next_rxd);
3891 next_buffer = &rx_ring->buffer_info[i]; 3865 next_buffer = &rx_ring->buffer_info[i];
3892 3866
3867 if (!(staterr & E1000_RXD_STAT_EOP)) {
3868 buffer_info->skb = xchg(&next_buffer->skb, skb);
3869 buffer_info->dma = xchg(&next_buffer->dma, 0);
3870 goto next_desc;
3871 }
3872
3893 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 3873 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3894 dev_kfree_skb_irq(skb); 3874 dev_kfree_skb_irq(skb);
3895 goto next_desc; 3875 goto next_desc;
@@ -3922,7 +3902,7 @@ next_desc:
3922 3902
3923 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 3903 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3924 } 3904 }
3925out: 3905
3926 rx_ring->next_to_clean = i; 3906 rx_ring->next_to_clean = i;
3927 cleaned_count = IGB_DESC_UNUSED(rx_ring); 3907 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3928 3908
@@ -3960,16 +3940,22 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
3960 while (cleaned_count--) { 3940 while (cleaned_count--) {
3961 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 3941 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3962 3942
3963 if (adapter->rx_ps_hdr_size && !buffer_info->page) { 3943 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
3964 buffer_info->page = alloc_page(GFP_ATOMIC);
3965 if (!buffer_info->page) { 3944 if (!buffer_info->page) {
3966 adapter->alloc_rx_buff_failed++; 3945 buffer_info->page = alloc_page(GFP_ATOMIC);
3967 goto no_buffers; 3946 if (!buffer_info->page) {
3947 adapter->alloc_rx_buff_failed++;
3948 goto no_buffers;
3949 }
3950 buffer_info->page_offset = 0;
3951 } else {
3952 buffer_info->page_offset ^= PAGE_SIZE / 2;
3968 } 3953 }
3969 buffer_info->page_dma = 3954 buffer_info->page_dma =
3970 pci_map_page(pdev, 3955 pci_map_page(pdev,
3971 buffer_info->page, 3956 buffer_info->page,
3972 0, PAGE_SIZE, 3957 buffer_info->page_offset,
3958 PAGE_SIZE / 2,
3973 PCI_DMA_FROMDEVICE); 3959 PCI_DMA_FROMDEVICE);
3974 } 3960 }
3975 3961