aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 11:50:48 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-28 04:20:21 -0400
commit6ec43fe635fb5c96fbc0955b2794b74fee69b723 (patch)
treef5b287d27f3eb67791ad1953e1a562eabf7bb54b /drivers/net/igb/igb_main.c
parent85b430b47736d1f59e8f9efb0e47bc46aeb2b01d (diff)
igb: remove rx_ps_hdr_len
This patch removes the rx_ps_hdr_len which isn't really needed since we can now use rx_buffer_len less than 1K to indicate that we are in a packet split mode. We also don't need it since we always use a half page for the data buffers when receiving so we always know the size to map/unmap. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c98
1 files changed, 38 insertions, 60 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 61ef4c2c4fca..24e502df0889 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1862,7 +1862,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1862 adapter->tx_ring_count = IGB_DEFAULT_TXD; 1862 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1863 adapter->rx_ring_count = IGB_DEFAULT_RXD; 1863 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1864 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1864 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1865 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1866 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1865 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1867 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1866 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1868 1867
@@ -2254,12 +2253,8 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2254 */ 2253 */
2255 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 2254 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2256 2255
2257 /* enable LPE when to prevent packets larger than max_frame_size */ 2256 /* enable LPE to prevent packets larger than max_frame_size */
2258 rctl |= E1000_RCTL_LPE; 2257 rctl |= E1000_RCTL_LPE;
2259
2260 /* Setup buffer sizes */
2261 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2262 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2263 2258
2264 /* 82575 and greater support packet-split where the protocol 2259 /* 82575 and greater support packet-split where the protocol
2265 * header is placed in skb->data and the packet data is 2260 * header is placed in skb->data and the packet data is
@@ -2270,13 +2265,20 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2270 */ 2265 */
2271 /* allocations using alloc_page take too long for regular MTU 2266 /* allocations using alloc_page take too long for regular MTU
2272 * so only enable packet split for jumbo frames */ 2267 * so only enable packet split for jumbo frames */
2273 if (adapter->netdev->mtu > ETH_DATA_LEN) { 2268 if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) {
2274 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; 2269 srrctl = ALIGN(adapter->rx_buffer_len, 64) <<
2275 srrctl |= adapter->rx_ps_hdr_size << 2270 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2276 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2271#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2272 srrctl |= IGB_RXBUFFER_16384 >>
2273 E1000_SRRCTL_BSIZEPKT_SHIFT;
2274#else
2275 srrctl |= (PAGE_SIZE / 2) >>
2276 E1000_SRRCTL_BSIZEPKT_SHIFT;
2277#endif
2277 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2278 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2278 } else { 2279 } else {
2279 adapter->rx_ps_hdr_size = 0; 2280 srrctl = ALIGN(adapter->rx_buffer_len, 1024) >>
2281 E1000_SRRCTL_BSIZEPKT_SHIFT;
2280 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2282 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2281 } 2283 }
2282 2284
@@ -2647,14 +2649,9 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2647 for (i = 0; i < rx_ring->count; i++) { 2649 for (i = 0; i < rx_ring->count; i++) {
2648 buffer_info = &rx_ring->buffer_info[i]; 2650 buffer_info = &rx_ring->buffer_info[i];
2649 if (buffer_info->dma) { 2651 if (buffer_info->dma) {
2650 if (adapter->rx_ps_hdr_size) 2652 pci_unmap_single(pdev, buffer_info->dma,
2651 pci_unmap_single(pdev, buffer_info->dma, 2653 adapter->rx_buffer_len,
2652 adapter->rx_ps_hdr_size, 2654 PCI_DMA_FROMDEVICE);
2653 PCI_DMA_FROMDEVICE);
2654 else
2655 pci_unmap_single(pdev, buffer_info->dma,
2656 adapter->rx_buffer_len,
2657 PCI_DMA_FROMDEVICE);
2658 buffer_info->dma = 0; 2655 buffer_info->dma = 0;
2659 } 2656 }
2660 2657
@@ -2662,14 +2659,15 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2662 dev_kfree_skb(buffer_info->skb); 2659 dev_kfree_skb(buffer_info->skb);
2663 buffer_info->skb = NULL; 2660 buffer_info->skb = NULL;
2664 } 2661 }
2662 if (buffer_info->page_dma) {
2663 pci_unmap_page(pdev, buffer_info->page_dma,
2664 PAGE_SIZE / 2,
2665 PCI_DMA_FROMDEVICE);
2666 buffer_info->page_dma = 0;
2667 }
2665 if (buffer_info->page) { 2668 if (buffer_info->page) {
2666 if (buffer_info->page_dma)
2667 pci_unmap_page(pdev, buffer_info->page_dma,
2668 PAGE_SIZE / 2,
2669 PCI_DMA_FROMDEVICE);
2670 put_page(buffer_info->page); 2669 put_page(buffer_info->page);
2671 buffer_info->page = NULL; 2670 buffer_info->page = NULL;
2672 buffer_info->page_dma = 0;
2673 buffer_info->page_offset = 0; 2671 buffer_info->page_offset = 0;
2674 } 2672 }
2675 } 2673 }
@@ -3792,19 +3790,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3792 3790
3793 if (max_frame <= IGB_RXBUFFER_1024) 3791 if (max_frame <= IGB_RXBUFFER_1024)
3794 adapter->rx_buffer_len = IGB_RXBUFFER_1024; 3792 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3795 else if (max_frame <= IGB_RXBUFFER_2048) 3793 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3796 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3797 else
3798#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3799 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3800#else
3801 adapter->rx_buffer_len = PAGE_SIZE / 2;
3802#endif
3803
3804 /* adjust allocation if LPE protects us, and we aren't using SBP */
3805 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3806 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3807 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3794 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3795 else
3796 adapter->rx_buffer_len = IGB_RXBUFFER_128;
3808 3797
3809 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 3798 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3810 netdev->mtu, new_mtu); 3799 netdev->mtu, new_mtu);
@@ -4864,8 +4853,8 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4864 */ 4853 */
4865 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 4854 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4866 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 4855 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4867 if (hlen > adapter->rx_ps_hdr_size) 4856 if (hlen > adapter->rx_buffer_len)
4868 hlen = adapter->rx_ps_hdr_size; 4857 hlen = adapter->rx_buffer_len;
4869 return hlen; 4858 return hlen;
4870} 4859}
4871 4860
@@ -4913,23 +4902,16 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4913 cleaned = true; 4902 cleaned = true;
4914 cleaned_count++; 4903 cleaned_count++;
4915 4904
4916 /* this is the fast path for the non-packet split case */
4917 if (!adapter->rx_ps_hdr_size) {
4918 pci_unmap_single(pdev, buffer_info->dma,
4919 adapter->rx_buffer_len,
4920 PCI_DMA_FROMDEVICE);
4921 buffer_info->dma = 0;
4922 skb_put(skb, length);
4923 goto send_up;
4924 }
4925
4926 if (buffer_info->dma) { 4905 if (buffer_info->dma) {
4927 u16 hlen = igb_get_hlen(adapter, rx_desc);
4928 pci_unmap_single(pdev, buffer_info->dma, 4906 pci_unmap_single(pdev, buffer_info->dma,
4929 adapter->rx_ps_hdr_size, 4907 adapter->rx_buffer_len,
4930 PCI_DMA_FROMDEVICE); 4908 PCI_DMA_FROMDEVICE);
4931 buffer_info->dma = 0; 4909 buffer_info->dma = 0;
4932 skb_put(skb, hlen); 4910 if (adapter->rx_buffer_len >= IGB_RXBUFFER_1024) {
4911 skb_put(skb, length);
4912 goto send_up;
4913 }
4914 skb_put(skb, igb_get_hlen(adapter, rx_desc));
4933 } 4915 }
4934 4916
4935 if (length) { 4917 if (length) {
@@ -4942,8 +4924,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4942 buffer_info->page_offset, 4924 buffer_info->page_offset,
4943 length); 4925 length);
4944 4926
4945 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 4927 if (page_count(buffer_info->page) != 1)
4946 (page_count(buffer_info->page) != 1))
4947 buffer_info->page = NULL; 4928 buffer_info->page = NULL;
4948 else 4929 else
4949 get_page(buffer_info->page); 4930 get_page(buffer_info->page);
@@ -5070,15 +5051,12 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
5070 i = rx_ring->next_to_use; 5051 i = rx_ring->next_to_use;
5071 buffer_info = &rx_ring->buffer_info[i]; 5052 buffer_info = &rx_ring->buffer_info[i];
5072 5053
5073 if (adapter->rx_ps_hdr_size) 5054 bufsz = adapter->rx_buffer_len;
5074 bufsz = adapter->rx_ps_hdr_size;
5075 else
5076 bufsz = adapter->rx_buffer_len;
5077 5055
5078 while (cleaned_count--) { 5056 while (cleaned_count--) {
5079 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5057 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5080 5058
5081 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 5059 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5082 if (!buffer_info->page) { 5060 if (!buffer_info->page) {
5083 buffer_info->page = alloc_page(GFP_ATOMIC); 5061 buffer_info->page = alloc_page(GFP_ATOMIC);
5084 if (!buffer_info->page) { 5062 if (!buffer_info->page) {
@@ -5110,7 +5088,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
5110 } 5088 }
5111 /* Refresh the desc even if buffer_addrs didn't change because 5089 /* Refresh the desc even if buffer_addrs didn't change because
5112 * each write-back erases this info. */ 5090 * each write-back erases this info. */
5113 if (adapter->rx_ps_hdr_size) { 5091 if (bufsz < IGB_RXBUFFER_1024) {
5114 rx_desc->read.pkt_addr = 5092 rx_desc->read.pkt_addr =
5115 cpu_to_le64(buffer_info->page_dma); 5093 cpu_to_le64(buffer_info->page_dma);
5116 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5094 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);