aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c476
1 files changed, 50 insertions, 426 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ad6da7b67e55..fac82152e4c8 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -137,15 +137,9 @@ static int e1000_clean(struct napi_struct *napi, int budget);
137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
138 struct e1000_rx_ring *rx_ring, 138 struct e1000_rx_ring *rx_ring,
139 int *work_done, int work_to_do); 139 int *work_done, int work_to_do);
140static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
141 struct e1000_rx_ring *rx_ring,
142 int *work_done, int work_to_do);
143static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 140static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
144 struct e1000_rx_ring *rx_ring, 141 struct e1000_rx_ring *rx_ring,
145 int cleaned_count); 142 int cleaned_count);
146static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int cleaned_count);
149static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 143static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
150static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 144static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
151 int cmd); 145 int cmd);
@@ -1053,6 +1047,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1053 1047
1054 netdev->features |= NETIF_F_LLTX; 1048 netdev->features |= NETIF_F_LLTX;
1055 1049
1050 netdev->vlan_features |= NETIF_F_TSO;
1051 netdev->vlan_features |= NETIF_F_TSO6;
1052 netdev->vlan_features |= NETIF_F_HW_CSUM;
1053 netdev->vlan_features |= NETIF_F_SG;
1054
1056 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1055 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1057 1056
1058 /* initialize eeprom parameters */ 1057 /* initialize eeprom parameters */
@@ -1331,7 +1330,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1331 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1330 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1332 1331
1333 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1332 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1334 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1335 hw->max_frame_size = netdev->mtu + 1333 hw->max_frame_size = netdev->mtu +
1336 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1334 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1337 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 1335 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1815,26 +1813,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1815 } 1813 }
1816 memset(rxdr->buffer_info, 0, size); 1814 memset(rxdr->buffer_info, 0, size);
1817 1815
1818 rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1819 GFP_KERNEL);
1820 if (!rxdr->ps_page) {
1821 vfree(rxdr->buffer_info);
1822 DPRINTK(PROBE, ERR,
1823 "Unable to allocate memory for the receive descriptor ring\n");
1824 return -ENOMEM;
1825 }
1826
1827 rxdr->ps_page_dma = kcalloc(rxdr->count,
1828 sizeof(struct e1000_ps_page_dma),
1829 GFP_KERNEL);
1830 if (!rxdr->ps_page_dma) {
1831 vfree(rxdr->buffer_info);
1832 kfree(rxdr->ps_page);
1833 DPRINTK(PROBE, ERR,
1834 "Unable to allocate memory for the receive descriptor ring\n");
1835 return -ENOMEM;
1836 }
1837
1838 if (hw->mac_type <= e1000_82547_rev_2) 1816 if (hw->mac_type <= e1000_82547_rev_2)
1839 desc_len = sizeof(struct e1000_rx_desc); 1817 desc_len = sizeof(struct e1000_rx_desc);
1840 else 1818 else
@@ -1852,8 +1830,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1852 "Unable to allocate memory for the receive descriptor ring\n"); 1830 "Unable to allocate memory for the receive descriptor ring\n");
1853setup_rx_desc_die: 1831setup_rx_desc_die:
1854 vfree(rxdr->buffer_info); 1832 vfree(rxdr->buffer_info);
1855 kfree(rxdr->ps_page);
1856 kfree(rxdr->ps_page_dma);
1857 return -ENOMEM; 1833 return -ENOMEM;
1858 } 1834 }
1859 1835
@@ -1932,11 +1908,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1932static void e1000_setup_rctl(struct e1000_adapter *adapter) 1908static void e1000_setup_rctl(struct e1000_adapter *adapter)
1933{ 1909{
1934 struct e1000_hw *hw = &adapter->hw; 1910 struct e1000_hw *hw = &adapter->hw;
1935 u32 rctl, rfctl; 1911 u32 rctl;
1936 u32 psrctl = 0;
1937#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1938 u32 pages = 0;
1939#endif
1940 1912
1941 rctl = er32(RCTL); 1913 rctl = er32(RCTL);
1942 1914
@@ -1988,55 +1960,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1988 break; 1960 break;
1989 } 1961 }
1990 1962
1991#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1992 /* 82571 and greater support packet-split where the protocol
1993 * header is placed in skb->data and the packet data is
1994 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1995 * In the case of a non-split, skb->data is linearly filled,
1996 * followed by the page buffers. Therefore, skb->data is
1997 * sized to hold the largest protocol header.
1998 */
1999 /* allocations using alloc_page take too long for regular MTU
2000 * so only enable packet split for jumbo frames */
2001 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2002 if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
2003 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
2004 adapter->rx_ps_pages = pages;
2005 else
2006 adapter->rx_ps_pages = 0;
2007#endif
2008 if (adapter->rx_ps_pages) {
2009 /* Configure extra packet-split registers */
2010 rfctl = er32(RFCTL);
2011 rfctl |= E1000_RFCTL_EXTEN;
2012 /* disable packet split support for IPv6 extension headers,
2013 * because some malformed IPv6 headers can hang the RX */
2014 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2015 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2016
2017 ew32(RFCTL, rfctl);
2018
2019 rctl |= E1000_RCTL_DTYP_PS;
2020
2021 psrctl |= adapter->rx_ps_bsize0 >>
2022 E1000_PSRCTL_BSIZE0_SHIFT;
2023
2024 switch (adapter->rx_ps_pages) {
2025 case 3:
2026 psrctl |= PAGE_SIZE <<
2027 E1000_PSRCTL_BSIZE3_SHIFT;
2028 case 2:
2029 psrctl |= PAGE_SIZE <<
2030 E1000_PSRCTL_BSIZE2_SHIFT;
2031 case 1:
2032 psrctl |= PAGE_SIZE >>
2033 E1000_PSRCTL_BSIZE1_SHIFT;
2034 break;
2035 }
2036
2037 ew32(PSRCTL, psrctl);
2038 }
2039
2040 ew32(RCTL, rctl); 1963 ew32(RCTL, rctl);
2041} 1964}
2042 1965
@@ -2053,18 +1976,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2053 struct e1000_hw *hw = &adapter->hw; 1976 struct e1000_hw *hw = &adapter->hw;
2054 u32 rdlen, rctl, rxcsum, ctrl_ext; 1977 u32 rdlen, rctl, rxcsum, ctrl_ext;
2055 1978
2056 if (adapter->rx_ps_pages) { 1979 rdlen = adapter->rx_ring[0].count *
2057 /* this is a 32 byte descriptor */ 1980 sizeof(struct e1000_rx_desc);
2058 rdlen = adapter->rx_ring[0].count * 1981 adapter->clean_rx = e1000_clean_rx_irq;
2059 sizeof(union e1000_rx_desc_packet_split); 1982 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2060 adapter->clean_rx = e1000_clean_rx_irq_ps;
2061 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2062 } else {
2063 rdlen = adapter->rx_ring[0].count *
2064 sizeof(struct e1000_rx_desc);
2065 adapter->clean_rx = e1000_clean_rx_irq;
2066 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2067 }
2068 1983
2069 /* disable receives while setting up the descriptors */ 1984 /* disable receives while setting up the descriptors */
2070 rctl = er32(RCTL); 1985 rctl = er32(RCTL);
@@ -2109,28 +2024,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2109 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2024 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2110 if (hw->mac_type >= e1000_82543) { 2025 if (hw->mac_type >= e1000_82543) {
2111 rxcsum = er32(RXCSUM); 2026 rxcsum = er32(RXCSUM);
2112 if (adapter->rx_csum) { 2027 if (adapter->rx_csum)
2113 rxcsum |= E1000_RXCSUM_TUOFL; 2028 rxcsum |= E1000_RXCSUM_TUOFL;
2114 2029 else
2115 /* Enable 82571 IPv4 payload checksum for UDP fragments
2116 * Must be used in conjunction with packet-split. */
2117 if ((hw->mac_type >= e1000_82571) &&
2118 (adapter->rx_ps_pages)) {
2119 rxcsum |= E1000_RXCSUM_IPPCSE;
2120 }
2121 } else {
2122 rxcsum &= ~E1000_RXCSUM_TUOFL;
2123 /* don't need to clear IPPCSE as it defaults to 0 */ 2030 /* don't need to clear IPPCSE as it defaults to 0 */
2124 } 2031 rxcsum &= ~E1000_RXCSUM_TUOFL;
2125 ew32(RXCSUM, rxcsum); 2032 ew32(RXCSUM, rxcsum);
2126 } 2033 }
2127 2034
2128 /* enable early receives on 82573, only takes effect if using > 2048
2129 * byte total frame size. for example only for jumbo frames */
2130#define E1000_ERT_2048 0x100
2131 if (hw->mac_type == e1000_82573)
2132 ew32(ERT, E1000_ERT_2048);
2133
2134 /* Enable Receives */ 2035 /* Enable Receives */
2135 ew32(RCTL, rctl); 2036 ew32(RCTL, rctl);
2136} 2037}
@@ -2256,10 +2157,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2256 2157
2257 vfree(rx_ring->buffer_info); 2158 vfree(rx_ring->buffer_info);
2258 rx_ring->buffer_info = NULL; 2159 rx_ring->buffer_info = NULL;
2259 kfree(rx_ring->ps_page);
2260 rx_ring->ps_page = NULL;
2261 kfree(rx_ring->ps_page_dma);
2262 rx_ring->ps_page_dma = NULL;
2263 2160
2264 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2161 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2265 2162
@@ -2292,11 +2189,9 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2292{ 2189{
2293 struct e1000_hw *hw = &adapter->hw; 2190 struct e1000_hw *hw = &adapter->hw;
2294 struct e1000_buffer *buffer_info; 2191 struct e1000_buffer *buffer_info;
2295 struct e1000_ps_page *ps_page;
2296 struct e1000_ps_page_dma *ps_page_dma;
2297 struct pci_dev *pdev = adapter->pdev; 2192 struct pci_dev *pdev = adapter->pdev;
2298 unsigned long size; 2193 unsigned long size;
2299 unsigned int i, j; 2194 unsigned int i;
2300 2195
2301 /* Free all the Rx ring sk_buffs */ 2196 /* Free all the Rx ring sk_buffs */
2302 for (i = 0; i < rx_ring->count; i++) { 2197 for (i = 0; i < rx_ring->count; i++) {
@@ -2310,25 +2205,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2310 dev_kfree_skb(buffer_info->skb); 2205 dev_kfree_skb(buffer_info->skb);
2311 buffer_info->skb = NULL; 2206 buffer_info->skb = NULL;
2312 } 2207 }
2313 ps_page = &rx_ring->ps_page[i];
2314 ps_page_dma = &rx_ring->ps_page_dma[i];
2315 for (j = 0; j < adapter->rx_ps_pages; j++) {
2316 if (!ps_page->ps_page[j]) break;
2317 pci_unmap_page(pdev,
2318 ps_page_dma->ps_page_dma[j],
2319 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2320 ps_page_dma->ps_page_dma[j] = 0;
2321 put_page(ps_page->ps_page[j]);
2322 ps_page->ps_page[j] = NULL;
2323 }
2324 } 2208 }
2325 2209
2326 size = sizeof(struct e1000_buffer) * rx_ring->count; 2210 size = sizeof(struct e1000_buffer) * rx_ring->count;
2327 memset(rx_ring->buffer_info, 0, size); 2211 memset(rx_ring->buffer_info, 0, size);
2328 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2329 memset(rx_ring->ps_page, 0, size);
2330 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2331 memset(rx_ring->ps_page_dma, 0, size);
2332 2212
2333 /* Zero out the descriptor ring */ 2213 /* Zero out the descriptor ring */
2334 2214
@@ -2998,32 +2878,49 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2998 struct e1000_buffer *buffer_info; 2878 struct e1000_buffer *buffer_info;
2999 unsigned int i; 2879 unsigned int i;
3000 u8 css; 2880 u8 css;
2881 u32 cmd_len = E1000_TXD_CMD_DEXT;
3001 2882
3002 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2883 if (skb->ip_summed != CHECKSUM_PARTIAL)
3003 css = skb_transport_offset(skb); 2884 return false;
3004 2885
3005 i = tx_ring->next_to_use; 2886 switch (skb->protocol) {
3006 buffer_info = &tx_ring->buffer_info[i]; 2887 case __constant_htons(ETH_P_IP):
3007 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2888 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2889 cmd_len |= E1000_TXD_CMD_TCP;
2890 break;
2891 case __constant_htons(ETH_P_IPV6):
2892 /* XXX not handling all IPV6 headers */
2893 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2894 cmd_len |= E1000_TXD_CMD_TCP;
2895 break;
2896 default:
2897 if (unlikely(net_ratelimit()))
2898 DPRINTK(DRV, WARNING,
2899 "checksum_partial proto=%x!\n", skb->protocol);
2900 break;
2901 }
3008 2902
3009 context_desc->lower_setup.ip_config = 0; 2903 css = skb_transport_offset(skb);
3010 context_desc->upper_setup.tcp_fields.tucss = css;
3011 context_desc->upper_setup.tcp_fields.tucso =
3012 css + skb->csum_offset;
3013 context_desc->upper_setup.tcp_fields.tucse = 0;
3014 context_desc->tcp_seg_setup.data = 0;
3015 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
3016 2904
3017 buffer_info->time_stamp = jiffies; 2905 i = tx_ring->next_to_use;
3018 buffer_info->next_to_watch = i; 2906 buffer_info = &tx_ring->buffer_info[i];
2907 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3019 2908
3020 if (unlikely(++i == tx_ring->count)) i = 0; 2909 context_desc->lower_setup.ip_config = 0;
3021 tx_ring->next_to_use = i; 2910 context_desc->upper_setup.tcp_fields.tucss = css;
2911 context_desc->upper_setup.tcp_fields.tucso =
2912 css + skb->csum_offset;
2913 context_desc->upper_setup.tcp_fields.tucse = 0;
2914 context_desc->tcp_seg_setup.data = 0;
2915 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
3022 2916
3023 return true; 2917 buffer_info->time_stamp = jiffies;
3024 } 2918 buffer_info->next_to_watch = i;
3025 2919
3026 return false; 2920 if (unlikely(++i == tx_ring->count)) i = 0;
2921 tx_ring->next_to_use = i;
2922
2923 return true;
3027} 2924}
3028 2925
3029#define E1000_MAX_TXD_PWR 12 2926#define E1000_MAX_TXD_PWR 12
@@ -4235,181 +4132,6 @@ next_desc:
4235} 4132}
4236 4133
4237/** 4134/**
4238 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4239 * @adapter: board private structure
4240 **/
4241
4242static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4243 struct e1000_rx_ring *rx_ring,
4244 int *work_done, int work_to_do)
4245{
4246 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
4247 struct net_device *netdev = adapter->netdev;
4248 struct pci_dev *pdev = adapter->pdev;
4249 struct e1000_buffer *buffer_info, *next_buffer;
4250 struct e1000_ps_page *ps_page;
4251 struct e1000_ps_page_dma *ps_page_dma;
4252 struct sk_buff *skb;
4253 unsigned int i, j;
4254 u32 length, staterr;
4255 int cleaned_count = 0;
4256 bool cleaned = false;
4257 unsigned int total_rx_bytes=0, total_rx_packets=0;
4258
4259 i = rx_ring->next_to_clean;
4260 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4261 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4262 buffer_info = &rx_ring->buffer_info[i];
4263
4264 while (staterr & E1000_RXD_STAT_DD) {
4265 ps_page = &rx_ring->ps_page[i];
4266 ps_page_dma = &rx_ring->ps_page_dma[i];
4267
4268 if (unlikely(*work_done >= work_to_do))
4269 break;
4270 (*work_done)++;
4271
4272 skb = buffer_info->skb;
4273
4274 /* in the packet split case this is header only */
4275 prefetch(skb->data - NET_IP_ALIGN);
4276
4277 if (++i == rx_ring->count) i = 0;
4278 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
4279 prefetch(next_rxd);
4280
4281 next_buffer = &rx_ring->buffer_info[i];
4282
4283 cleaned = true;
4284 cleaned_count++;
4285 pci_unmap_single(pdev, buffer_info->dma,
4286 buffer_info->length,
4287 PCI_DMA_FROMDEVICE);
4288
4289 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
4290 E1000_DBG("%s: Packet Split buffers didn't pick up"
4291 " the full packet\n", netdev->name);
4292 dev_kfree_skb_irq(skb);
4293 goto next_desc;
4294 }
4295
4296 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
4297 dev_kfree_skb_irq(skb);
4298 goto next_desc;
4299 }
4300
4301 length = le16_to_cpu(rx_desc->wb.middle.length0);
4302
4303 if (unlikely(!length)) {
4304 E1000_DBG("%s: Last part of the packet spanning"
4305 " multiple descriptors\n", netdev->name);
4306 dev_kfree_skb_irq(skb);
4307 goto next_desc;
4308 }
4309
4310 /* Good Receive */
4311 skb_put(skb, length);
4312
4313 {
4314 /* this looks ugly, but it seems compiler issues make it
4315 more efficient than reusing j */
4316 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4317
4318 /* page alloc/put takes too long and effects small packet
4319 * throughput, so unsplit small packets and save the alloc/put*/
4320 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
4321 u8 *vaddr;
4322 /* there is no documentation about how to call
4323 * kmap_atomic, so we can't hold the mapping
4324 * very long */
4325 pci_dma_sync_single_for_cpu(pdev,
4326 ps_page_dma->ps_page_dma[0],
4327 PAGE_SIZE,
4328 PCI_DMA_FROMDEVICE);
4329 vaddr = kmap_atomic(ps_page->ps_page[0],
4330 KM_SKB_DATA_SOFTIRQ);
4331 memcpy(skb_tail_pointer(skb), vaddr, l1);
4332 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4333 pci_dma_sync_single_for_device(pdev,
4334 ps_page_dma->ps_page_dma[0],
4335 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4336 /* remove the CRC */
4337 l1 -= 4;
4338 skb_put(skb, l1);
4339 goto copydone;
4340 } /* if */
4341 }
4342
4343 for (j = 0; j < adapter->rx_ps_pages; j++) {
4344 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
4345 if (!length)
4346 break;
4347 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4348 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4349 ps_page_dma->ps_page_dma[j] = 0;
4350 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4351 length);
4352 ps_page->ps_page[j] = NULL;
4353 skb->len += length;
4354 skb->data_len += length;
4355 skb->truesize += length;
4356 }
4357
4358 /* strip the ethernet crc, problem is we're using pages now so
4359 * this whole operation can get a little cpu intensive */
4360 pskb_trim(skb, skb->len - 4);
4361
4362copydone:
4363 total_rx_bytes += skb->len;
4364 total_rx_packets++;
4365
4366 e1000_rx_checksum(adapter, staterr,
4367 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4368 skb->protocol = eth_type_trans(skb, netdev);
4369
4370 if (likely(rx_desc->wb.upper.header_status &
4371 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
4372 adapter->rx_hdr_split++;
4373
4374 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4375 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4376 le16_to_cpu(rx_desc->wb.middle.vlan));
4377 } else {
4378 netif_receive_skb(skb);
4379 }
4380
4381 netdev->last_rx = jiffies;
4382
4383next_desc:
4384 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
4385 buffer_info->skb = NULL;
4386
4387 /* return some buffers to hardware, one at a time is too slow */
4388 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4389 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4390 cleaned_count = 0;
4391 }
4392
4393 /* use prefetched values */
4394 rx_desc = next_rxd;
4395 buffer_info = next_buffer;
4396
4397 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4398 }
4399 rx_ring->next_to_clean = i;
4400
4401 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4402 if (cleaned_count)
4403 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4404
4405 adapter->total_rx_packets += total_rx_packets;
4406 adapter->total_rx_bytes += total_rx_bytes;
4407 adapter->net_stats.rx_bytes += total_rx_bytes;
4408 adapter->net_stats.rx_packets += total_rx_packets;
4409 return cleaned;
4410}
4411
4412/**
4413 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4135 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4414 * @adapter: address of board private structure 4136 * @adapter: address of board private structure
4415 **/ 4137 **/
@@ -4521,104 +4243,6 @@ map_skb:
4521} 4243}
4522 4244
4523/** 4245/**
4524 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4525 * @adapter: address of board private structure
4526 **/
4527
4528static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4529 struct e1000_rx_ring *rx_ring,
4530 int cleaned_count)
4531{
4532 struct e1000_hw *hw = &adapter->hw;
4533 struct net_device *netdev = adapter->netdev;
4534 struct pci_dev *pdev = adapter->pdev;
4535 union e1000_rx_desc_packet_split *rx_desc;
4536 struct e1000_buffer *buffer_info;
4537 struct e1000_ps_page *ps_page;
4538 struct e1000_ps_page_dma *ps_page_dma;
4539 struct sk_buff *skb;
4540 unsigned int i, j;
4541
4542 i = rx_ring->next_to_use;
4543 buffer_info = &rx_ring->buffer_info[i];
4544 ps_page = &rx_ring->ps_page[i];
4545 ps_page_dma = &rx_ring->ps_page_dma[i];
4546
4547 while (cleaned_count--) {
4548 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4549
4550 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4551 if (j < adapter->rx_ps_pages) {
4552 if (likely(!ps_page->ps_page[j])) {
4553 ps_page->ps_page[j] =
4554 alloc_page(GFP_ATOMIC);
4555 if (unlikely(!ps_page->ps_page[j])) {
4556 adapter->alloc_rx_buff_failed++;
4557 goto no_buffers;
4558 }
4559 ps_page_dma->ps_page_dma[j] =
4560 pci_map_page(pdev,
4561 ps_page->ps_page[j],
4562 0, PAGE_SIZE,
4563 PCI_DMA_FROMDEVICE);
4564 }
4565 /* Refresh the desc even if buffer_addrs didn't
4566 * change because each write-back erases
4567 * this info.
4568 */
4569 rx_desc->read.buffer_addr[j+1] =
4570 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4571 } else
4572 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
4573 }
4574
4575 skb = netdev_alloc_skb(netdev,
4576 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4577
4578 if (unlikely(!skb)) {
4579 adapter->alloc_rx_buff_failed++;
4580 break;
4581 }
4582
4583 /* Make buffer alignment 2 beyond a 16 byte boundary
4584 * this will result in a 16 byte aligned IP header after
4585 * the 14 byte MAC header is removed
4586 */
4587 skb_reserve(skb, NET_IP_ALIGN);
4588
4589 buffer_info->skb = skb;
4590 buffer_info->length = adapter->rx_ps_bsize0;
4591 buffer_info->dma = pci_map_single(pdev, skb->data,
4592 adapter->rx_ps_bsize0,
4593 PCI_DMA_FROMDEVICE);
4594
4595 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4596
4597 if (unlikely(++i == rx_ring->count)) i = 0;
4598 buffer_info = &rx_ring->buffer_info[i];
4599 ps_page = &rx_ring->ps_page[i];
4600 ps_page_dma = &rx_ring->ps_page_dma[i];
4601 }
4602
4603no_buffers:
4604 if (likely(rx_ring->next_to_use != i)) {
4605 rx_ring->next_to_use = i;
4606 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4607
4608 /* Force memory writes to complete before letting h/w
4609 * know there are new descriptors to fetch. (Only
4610 * applicable for weak-ordered memory model archs,
4611 * such as IA-64). */
4612 wmb();
4613 /* Hardware increments by 16 bytes, but packet split
4614 * descriptors are 32 bytes...so we increment tail
4615 * twice as much.
4616 */
4617 writel(i<<1, hw->hw_addr + rx_ring->rdt);
4618 }
4619}
4620
4621/**
4622 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4246 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4623 * @adapter: 4247 * @adapter:
4624 **/ 4248 **/