aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/e1000/e1000_main.c108
1 files changed, 61 insertions, 47 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index cf4fc5117032..41f44a3ded9a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1542,6 +1542,8 @@ setup_rx_desc_die:
1542 1542
1543 rxdr->next_to_clean = 0; 1543 rxdr->next_to_clean = 0;
1544 rxdr->next_to_use = 0; 1544 rxdr->next_to_use = 0;
1545 rxdr->rx_skb_top = NULL;
1546 rxdr->rx_skb_prev = NULL;
1545 1547
1546 return 0; 1548 return 0;
1547} 1549}
@@ -2010,19 +2012,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2010 2012
2011 dev_kfree_skb(buffer_info->skb); 2013 dev_kfree_skb(buffer_info->skb);
2012 buffer_info->skb = NULL; 2014 buffer_info->skb = NULL;
2013
2014 for(j = 0; j < adapter->rx_ps_pages; j++) {
2015 if(!ps_page->ps_page[j]) break;
2016 pci_unmap_single(pdev,
2017 ps_page_dma->ps_page_dma[j],
2018 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2019 ps_page_dma->ps_page_dma[j] = 0;
2020 put_page(ps_page->ps_page[j]);
2021 ps_page->ps_page[j] = NULL;
2022 }
2023 } 2015 }
2016 ps_page = &rx_ring->ps_page[i];
2017 ps_page_dma = &rx_ring->ps_page_dma[i];
2018 for (j = 0; j < adapter->rx_ps_pages; j++) {
2019 if (!ps_page->ps_page[j]) break;
2020 pci_unmap_page(pdev,
2021 ps_page_dma->ps_page_dma[j],
2022 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2023 ps_page_dma->ps_page_dma[j] = 0;
2024 put_page(ps_page->ps_page[j]);
2025 ps_page->ps_page[j] = NULL;
2026 }
2027 }
2028
2029 /* there also may be some cached data in our adapter */
2030 if (rx_ring->rx_skb_top) {
2031 dev_kfree_skb(rx_ring->rx_skb_top);
2032
2033 /* rx_skb_prev will be wiped out by rx_skb_top */
2034 rx_ring->rx_skb_top = NULL;
2035 rx_ring->rx_skb_prev = NULL;
2024 } 2036 }
2025 2037
2038
2026 size = sizeof(struct e1000_buffer) * rx_ring->count; 2039 size = sizeof(struct e1000_buffer) * rx_ring->count;
2027 memset(rx_ring->buffer_info, 0, size); 2040 memset(rx_ring->buffer_info, 0, size);
2028 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2041 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -2985,50 +2998,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2985 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 2998 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2986 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2999 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2987 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3000 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2988 return -EINVAL;
2989 }
2990
2991#define MAX_STD_JUMBO_FRAME_SIZE 9234
2992 /* might want this to be bigger enum check... */
2993 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2994 if ((adapter->hw.mac_type == e1000_82571 ||
2995 adapter->hw.mac_type == e1000_82572) &&
2996 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2997 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2998 "on 82571 and 82572 controllers.\n");
2999 return -EINVAL; 3001 return -EINVAL;
3000 } 3002 }
3001 3003
3002 if(adapter->hw.mac_type == e1000_82573 && 3004 /* Adapter-specific max frame size limits. */
3003 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3005 switch (adapter->hw.mac_type) {
3004 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3006 case e1000_82542_rev2_0:
3005 "on 82573\n"); 3007 case e1000_82542_rev2_1:
3006 return -EINVAL; 3008 case e1000_82573:
3007 } 3009 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3008 3010 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3009 if(adapter->hw.mac_type > e1000_82547_rev_2) {
3010 adapter->rx_buffer_len = max_frame;
3011 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3012 } else {
3013 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
3014 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
3015 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
3016 "on 82542\n");
3017 return -EINVAL; 3011 return -EINVAL;
3018
3019 } else {
3020 if(max_frame <= E1000_RXBUFFER_2048) {
3021 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3022 } else if(max_frame <= E1000_RXBUFFER_4096) {
3023 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3024 } else if(max_frame <= E1000_RXBUFFER_8192) {
3025 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3026 } else if(max_frame <= E1000_RXBUFFER_16384) {
3027 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3028 }
3029 } 3012 }
3013 break;
3014 case e1000_82571:
3015 case e1000_82572:
3016#define MAX_STD_JUMBO_FRAME_SIZE 9234
3017 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3018 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3019 return -EINVAL;
3020 }
3021 break;
3022 default:
3023 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3024 break;
3030 } 3025 }
3031 3026
3027 /* since the driver code now supports splitting a packet across
3028 * multiple descriptors, most of the fifo related limitations on
3029 * jumbo frame traffic have gone away.
3030 * simply use 2k descriptors for everything.
3031 *
3032 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3033 * means we reserve 2 more, this pushes us to allocate from the next
3034 * larger slab size
3035 * i.e. RXBUFFER_2048 --> size-4096 slab */
3036
3037 /* recent hardware supports 1KB granularity */
3038 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3039 adapter->rx_buffer_len =
3040 ((max_frame < E1000_RXBUFFER_2048) ?
3041 max_frame : E1000_RXBUFFER_2048);
3042 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3043 } else
3044 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3045
3032 netdev->mtu = new_mtu; 3046 netdev->mtu = new_mtu;
3033 3047
3034 if(netif_running(netdev)) { 3048 if(netif_running(netdev)) {