aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000.h17
-rw-r--r--drivers/net/e1000/e1000_main.c416
2 files changed, 9 insertions, 424 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 19e317eaf5bc..62f62970f978 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -155,8 +155,6 @@ do { \
155#endif 155#endif
156 156
157#define E1000_MNG_VLAN_NONE (-1) 157#define E1000_MNG_VLAN_NONE (-1)
158/* Number of packet split data buffers (not including the header buffer) */
159#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
160 158
161/* wrapper around a pointer to a socket buffer, 159/* wrapper around a pointer to a socket buffer,
162 * so a DMA handle can be stored along with the buffer */ 160 * so a DMA handle can be stored along with the buffer */
@@ -168,14 +166,6 @@ struct e1000_buffer {
168 u16 next_to_watch; 166 u16 next_to_watch;
169}; 167};
170 168
171struct e1000_ps_page {
172 struct page *ps_page[PS_PAGE_BUFFERS];
173};
174
175struct e1000_ps_page_dma {
176 u64 ps_page_dma[PS_PAGE_BUFFERS];
177};
178
179struct e1000_tx_ring { 169struct e1000_tx_ring {
180 /* pointer to the descriptor ring memory */ 170 /* pointer to the descriptor ring memory */
181 void *desc; 171 void *desc;
@@ -213,9 +203,6 @@ struct e1000_rx_ring {
213 unsigned int next_to_clean; 203 unsigned int next_to_clean;
214 /* array of buffer information structs */ 204 /* array of buffer information structs */
215 struct e1000_buffer *buffer_info; 205 struct e1000_buffer *buffer_info;
216 /* arrays of page information for packet split */
217 struct e1000_ps_page *ps_page;
218 struct e1000_ps_page_dma *ps_page_dma;
219 206
220 /* cpu for rx queue */ 207 /* cpu for rx queue */
221 int cpu; 208 int cpu;
@@ -228,8 +215,6 @@ struct e1000_rx_ring {
228 ((((R)->next_to_clean > (R)->next_to_use) \ 215 ((((R)->next_to_clean > (R)->next_to_use) \
229 ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) 216 ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
230 217
231#define E1000_RX_DESC_PS(R, i) \
232 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
233#define E1000_RX_DESC_EXT(R, i) \ 218#define E1000_RX_DESC_EXT(R, i) \
234 (&(((union e1000_rx_desc_extended *)((R).desc))[i])) 219 (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
235#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 220#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
@@ -311,10 +296,8 @@ struct e1000_adapter {
311 u32 rx_int_delay; 296 u32 rx_int_delay;
312 u32 rx_abs_int_delay; 297 u32 rx_abs_int_delay;
313 bool rx_csum; 298 bool rx_csum;
314 unsigned int rx_ps_pages;
315 u32 gorcl; 299 u32 gorcl;
316 u64 gorcl_old; 300 u64 gorcl_old;
317 u16 rx_ps_bsize0;
318 301
319 /* OS defined structs */ 302 /* OS defined structs */
320 struct net_device *netdev; 303 struct net_device *netdev;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ad6da7b67e55..2ab44db29fac 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -137,15 +137,9 @@ static int e1000_clean(struct napi_struct *napi, int budget);
137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
138 struct e1000_rx_ring *rx_ring, 138 struct e1000_rx_ring *rx_ring,
139 int *work_done, int work_to_do); 139 int *work_done, int work_to_do);
140static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
141 struct e1000_rx_ring *rx_ring,
142 int *work_done, int work_to_do);
143static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 140static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
144 struct e1000_rx_ring *rx_ring, 141 struct e1000_rx_ring *rx_ring,
145 int cleaned_count); 142 int cleaned_count);
146static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int cleaned_count);
149static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 143static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
150static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 144static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
151 int cmd); 145 int cmd);
@@ -1331,7 +1325,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1331 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1325 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1332 1326
1333 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1327 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1334 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1335 hw->max_frame_size = netdev->mtu + 1328 hw->max_frame_size = netdev->mtu +
1336 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1329 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1337 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 1330 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1815,26 +1808,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1815 } 1808 }
1816 memset(rxdr->buffer_info, 0, size); 1809 memset(rxdr->buffer_info, 0, size);
1817 1810
1818 rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1819 GFP_KERNEL);
1820 if (!rxdr->ps_page) {
1821 vfree(rxdr->buffer_info);
1822 DPRINTK(PROBE, ERR,
1823 "Unable to allocate memory for the receive descriptor ring\n");
1824 return -ENOMEM;
1825 }
1826
1827 rxdr->ps_page_dma = kcalloc(rxdr->count,
1828 sizeof(struct e1000_ps_page_dma),
1829 GFP_KERNEL);
1830 if (!rxdr->ps_page_dma) {
1831 vfree(rxdr->buffer_info);
1832 kfree(rxdr->ps_page);
1833 DPRINTK(PROBE, ERR,
1834 "Unable to allocate memory for the receive descriptor ring\n");
1835 return -ENOMEM;
1836 }
1837
1838 if (hw->mac_type <= e1000_82547_rev_2) 1811 if (hw->mac_type <= e1000_82547_rev_2)
1839 desc_len = sizeof(struct e1000_rx_desc); 1812 desc_len = sizeof(struct e1000_rx_desc);
1840 else 1813 else
@@ -1852,8 +1825,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1852 "Unable to allocate memory for the receive descriptor ring\n"); 1825 "Unable to allocate memory for the receive descriptor ring\n");
1853setup_rx_desc_die: 1826setup_rx_desc_die:
1854 vfree(rxdr->buffer_info); 1827 vfree(rxdr->buffer_info);
1855 kfree(rxdr->ps_page);
1856 kfree(rxdr->ps_page_dma);
1857 return -ENOMEM; 1828 return -ENOMEM;
1858 } 1829 }
1859 1830
@@ -1932,11 +1903,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1932static void e1000_setup_rctl(struct e1000_adapter *adapter) 1903static void e1000_setup_rctl(struct e1000_adapter *adapter)
1933{ 1904{
1934 struct e1000_hw *hw = &adapter->hw; 1905 struct e1000_hw *hw = &adapter->hw;
1935 u32 rctl, rfctl; 1906 u32 rctl;
1936 u32 psrctl = 0;
1937#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1938 u32 pages = 0;
1939#endif
1940 1907
1941 rctl = er32(RCTL); 1908 rctl = er32(RCTL);
1942 1909
@@ -1988,55 +1955,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1988 break; 1955 break;
1989 } 1956 }
1990 1957
1991#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1992 /* 82571 and greater support packet-split where the protocol
1993 * header is placed in skb->data and the packet data is
1994 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1995 * In the case of a non-split, skb->data is linearly filled,
1996 * followed by the page buffers. Therefore, skb->data is
1997 * sized to hold the largest protocol header.
1998 */
1999 /* allocations using alloc_page take too long for regular MTU
2000 * so only enable packet split for jumbo frames */
2001 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2002 if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
2003 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
2004 adapter->rx_ps_pages = pages;
2005 else
2006 adapter->rx_ps_pages = 0;
2007#endif
2008 if (adapter->rx_ps_pages) {
2009 /* Configure extra packet-split registers */
2010 rfctl = er32(RFCTL);
2011 rfctl |= E1000_RFCTL_EXTEN;
2012 /* disable packet split support for IPv6 extension headers,
2013 * because some malformed IPv6 headers can hang the RX */
2014 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2015 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2016
2017 ew32(RFCTL, rfctl);
2018
2019 rctl |= E1000_RCTL_DTYP_PS;
2020
2021 psrctl |= adapter->rx_ps_bsize0 >>
2022 E1000_PSRCTL_BSIZE0_SHIFT;
2023
2024 switch (adapter->rx_ps_pages) {
2025 case 3:
2026 psrctl |= PAGE_SIZE <<
2027 E1000_PSRCTL_BSIZE3_SHIFT;
2028 case 2:
2029 psrctl |= PAGE_SIZE <<
2030 E1000_PSRCTL_BSIZE2_SHIFT;
2031 case 1:
2032 psrctl |= PAGE_SIZE >>
2033 E1000_PSRCTL_BSIZE1_SHIFT;
2034 break;
2035 }
2036
2037 ew32(PSRCTL, psrctl);
2038 }
2039
2040 ew32(RCTL, rctl); 1958 ew32(RCTL, rctl);
2041} 1959}
2042 1960
@@ -2053,18 +1971,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2053 struct e1000_hw *hw = &adapter->hw; 1971 struct e1000_hw *hw = &adapter->hw;
2054 u32 rdlen, rctl, rxcsum, ctrl_ext; 1972 u32 rdlen, rctl, rxcsum, ctrl_ext;
2055 1973
2056 if (adapter->rx_ps_pages) { 1974 rdlen = adapter->rx_ring[0].count *
2057 /* this is a 32 byte descriptor */ 1975 sizeof(struct e1000_rx_desc);
2058 rdlen = adapter->rx_ring[0].count * 1976 adapter->clean_rx = e1000_clean_rx_irq;
2059 sizeof(union e1000_rx_desc_packet_split); 1977 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2060 adapter->clean_rx = e1000_clean_rx_irq_ps;
2061 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2062 } else {
2063 rdlen = adapter->rx_ring[0].count *
2064 sizeof(struct e1000_rx_desc);
2065 adapter->clean_rx = e1000_clean_rx_irq;
2066 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2067 }
2068 1978
2069 /* disable receives while setting up the descriptors */ 1979 /* disable receives while setting up the descriptors */
2070 rctl = er32(RCTL); 1980 rctl = er32(RCTL);
@@ -2109,28 +2019,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2109 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2019 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2110 if (hw->mac_type >= e1000_82543) { 2020 if (hw->mac_type >= e1000_82543) {
2111 rxcsum = er32(RXCSUM); 2021 rxcsum = er32(RXCSUM);
2112 if (adapter->rx_csum) { 2022 if (adapter->rx_csum)
2113 rxcsum |= E1000_RXCSUM_TUOFL; 2023 rxcsum |= E1000_RXCSUM_TUOFL;
2114 2024 else
2115 /* Enable 82571 IPv4 payload checksum for UDP fragments
2116 * Must be used in conjunction with packet-split. */
2117 if ((hw->mac_type >= e1000_82571) &&
2118 (adapter->rx_ps_pages)) {
2119 rxcsum |= E1000_RXCSUM_IPPCSE;
2120 }
2121 } else {
2122 rxcsum &= ~E1000_RXCSUM_TUOFL;
2123 /* don't need to clear IPPCSE as it defaults to 0 */ 2025 /* don't need to clear IPPCSE as it defaults to 0 */
2124 } 2026 rxcsum &= ~E1000_RXCSUM_TUOFL;
2125 ew32(RXCSUM, rxcsum); 2027 ew32(RXCSUM, rxcsum);
2126 } 2028 }
2127 2029
2128 /* enable early receives on 82573, only takes effect if using > 2048
2129 * byte total frame size. for example only for jumbo frames */
2130#define E1000_ERT_2048 0x100
2131 if (hw->mac_type == e1000_82573)
2132 ew32(ERT, E1000_ERT_2048);
2133
2134 /* Enable Receives */ 2030 /* Enable Receives */
2135 ew32(RCTL, rctl); 2031 ew32(RCTL, rctl);
2136} 2032}
@@ -2256,10 +2152,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2256 2152
2257 vfree(rx_ring->buffer_info); 2153 vfree(rx_ring->buffer_info);
2258 rx_ring->buffer_info = NULL; 2154 rx_ring->buffer_info = NULL;
2259 kfree(rx_ring->ps_page);
2260 rx_ring->ps_page = NULL;
2261 kfree(rx_ring->ps_page_dma);
2262 rx_ring->ps_page_dma = NULL;
2263 2155
2264 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2156 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2265 2157
@@ -2292,11 +2184,9 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2292{ 2184{
2293 struct e1000_hw *hw = &adapter->hw; 2185 struct e1000_hw *hw = &adapter->hw;
2294 struct e1000_buffer *buffer_info; 2186 struct e1000_buffer *buffer_info;
2295 struct e1000_ps_page *ps_page;
2296 struct e1000_ps_page_dma *ps_page_dma;
2297 struct pci_dev *pdev = adapter->pdev; 2187 struct pci_dev *pdev = adapter->pdev;
2298 unsigned long size; 2188 unsigned long size;
2299 unsigned int i, j; 2189 unsigned int i;
2300 2190
2301 /* Free all the Rx ring sk_buffs */ 2191 /* Free all the Rx ring sk_buffs */
2302 for (i = 0; i < rx_ring->count; i++) { 2192 for (i = 0; i < rx_ring->count; i++) {
@@ -2310,25 +2200,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2310 dev_kfree_skb(buffer_info->skb); 2200 dev_kfree_skb(buffer_info->skb);
2311 buffer_info->skb = NULL; 2201 buffer_info->skb = NULL;
2312 } 2202 }
2313 ps_page = &rx_ring->ps_page[i];
2314 ps_page_dma = &rx_ring->ps_page_dma[i];
2315 for (j = 0; j < adapter->rx_ps_pages; j++) {
2316 if (!ps_page->ps_page[j]) break;
2317 pci_unmap_page(pdev,
2318 ps_page_dma->ps_page_dma[j],
2319 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2320 ps_page_dma->ps_page_dma[j] = 0;
2321 put_page(ps_page->ps_page[j]);
2322 ps_page->ps_page[j] = NULL;
2323 }
2324 } 2203 }
2325 2204
2326 size = sizeof(struct e1000_buffer) * rx_ring->count; 2205 size = sizeof(struct e1000_buffer) * rx_ring->count;
2327 memset(rx_ring->buffer_info, 0, size); 2206 memset(rx_ring->buffer_info, 0, size);
2328 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2329 memset(rx_ring->ps_page, 0, size);
2330 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2331 memset(rx_ring->ps_page_dma, 0, size);
2332 2207
2333 /* Zero out the descriptor ring */ 2208 /* Zero out the descriptor ring */
2334 2209
@@ -4235,181 +4110,6 @@ next_desc:
4235} 4110}
4236 4111
4237/** 4112/**
4238 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4239 * @adapter: board private structure
4240 **/
4241
4242static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4243 struct e1000_rx_ring *rx_ring,
4244 int *work_done, int work_to_do)
4245{
4246 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
4247 struct net_device *netdev = adapter->netdev;
4248 struct pci_dev *pdev = adapter->pdev;
4249 struct e1000_buffer *buffer_info, *next_buffer;
4250 struct e1000_ps_page *ps_page;
4251 struct e1000_ps_page_dma *ps_page_dma;
4252 struct sk_buff *skb;
4253 unsigned int i, j;
4254 u32 length, staterr;
4255 int cleaned_count = 0;
4256 bool cleaned = false;
4257 unsigned int total_rx_bytes=0, total_rx_packets=0;
4258
4259 i = rx_ring->next_to_clean;
4260 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4261 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4262 buffer_info = &rx_ring->buffer_info[i];
4263
4264 while (staterr & E1000_RXD_STAT_DD) {
4265 ps_page = &rx_ring->ps_page[i];
4266 ps_page_dma = &rx_ring->ps_page_dma[i];
4267
4268 if (unlikely(*work_done >= work_to_do))
4269 break;
4270 (*work_done)++;
4271
4272 skb = buffer_info->skb;
4273
4274 /* in the packet split case this is header only */
4275 prefetch(skb->data - NET_IP_ALIGN);
4276
4277 if (++i == rx_ring->count) i = 0;
4278 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
4279 prefetch(next_rxd);
4280
4281 next_buffer = &rx_ring->buffer_info[i];
4282
4283 cleaned = true;
4284 cleaned_count++;
4285 pci_unmap_single(pdev, buffer_info->dma,
4286 buffer_info->length,
4287 PCI_DMA_FROMDEVICE);
4288
4289 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
4290 E1000_DBG("%s: Packet Split buffers didn't pick up"
4291 " the full packet\n", netdev->name);
4292 dev_kfree_skb_irq(skb);
4293 goto next_desc;
4294 }
4295
4296 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
4297 dev_kfree_skb_irq(skb);
4298 goto next_desc;
4299 }
4300
4301 length = le16_to_cpu(rx_desc->wb.middle.length0);
4302
4303 if (unlikely(!length)) {
4304 E1000_DBG("%s: Last part of the packet spanning"
4305 " multiple descriptors\n", netdev->name);
4306 dev_kfree_skb_irq(skb);
4307 goto next_desc;
4308 }
4309
4310 /* Good Receive */
4311 skb_put(skb, length);
4312
4313 {
4314 /* this looks ugly, but it seems compiler issues make it
4315 more efficient than reusing j */
4316 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4317
4318 /* page alloc/put takes too long and effects small packet
4319 * throughput, so unsplit small packets and save the alloc/put*/
4320 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
4321 u8 *vaddr;
4322 /* there is no documentation about how to call
4323 * kmap_atomic, so we can't hold the mapping
4324 * very long */
4325 pci_dma_sync_single_for_cpu(pdev,
4326 ps_page_dma->ps_page_dma[0],
4327 PAGE_SIZE,
4328 PCI_DMA_FROMDEVICE);
4329 vaddr = kmap_atomic(ps_page->ps_page[0],
4330 KM_SKB_DATA_SOFTIRQ);
4331 memcpy(skb_tail_pointer(skb), vaddr, l1);
4332 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4333 pci_dma_sync_single_for_device(pdev,
4334 ps_page_dma->ps_page_dma[0],
4335 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4336 /* remove the CRC */
4337 l1 -= 4;
4338 skb_put(skb, l1);
4339 goto copydone;
4340 } /* if */
4341 }
4342
4343 for (j = 0; j < adapter->rx_ps_pages; j++) {
4344 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
4345 if (!length)
4346 break;
4347 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4348 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4349 ps_page_dma->ps_page_dma[j] = 0;
4350 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4351 length);
4352 ps_page->ps_page[j] = NULL;
4353 skb->len += length;
4354 skb->data_len += length;
4355 skb->truesize += length;
4356 }
4357
4358 /* strip the ethernet crc, problem is we're using pages now so
4359 * this whole operation can get a little cpu intensive */
4360 pskb_trim(skb, skb->len - 4);
4361
4362copydone:
4363 total_rx_bytes += skb->len;
4364 total_rx_packets++;
4365
4366 e1000_rx_checksum(adapter, staterr,
4367 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4368 skb->protocol = eth_type_trans(skb, netdev);
4369
4370 if (likely(rx_desc->wb.upper.header_status &
4371 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
4372 adapter->rx_hdr_split++;
4373
4374 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4375 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4376 le16_to_cpu(rx_desc->wb.middle.vlan));
4377 } else {
4378 netif_receive_skb(skb);
4379 }
4380
4381 netdev->last_rx = jiffies;
4382
4383next_desc:
4384 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
4385 buffer_info->skb = NULL;
4386
4387 /* return some buffers to hardware, one at a time is too slow */
4388 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4389 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4390 cleaned_count = 0;
4391 }
4392
4393 /* use prefetched values */
4394 rx_desc = next_rxd;
4395 buffer_info = next_buffer;
4396
4397 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4398 }
4399 rx_ring->next_to_clean = i;
4400
4401 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4402 if (cleaned_count)
4403 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4404
4405 adapter->total_rx_packets += total_rx_packets;
4406 adapter->total_rx_bytes += total_rx_bytes;
4407 adapter->net_stats.rx_bytes += total_rx_bytes;
4408 adapter->net_stats.rx_packets += total_rx_packets;
4409 return cleaned;
4410}
4411
4412/**
4413 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4113 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4414 * @adapter: address of board private structure 4114 * @adapter: address of board private structure
4415 **/ 4115 **/
@@ -4521,104 +4221,6 @@ map_skb:
4521} 4221}
4522 4222
4523/** 4223/**
4524 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4525 * @adapter: address of board private structure
4526 **/
4527
4528static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4529 struct e1000_rx_ring *rx_ring,
4530 int cleaned_count)
4531{
4532 struct e1000_hw *hw = &adapter->hw;
4533 struct net_device *netdev = adapter->netdev;
4534 struct pci_dev *pdev = adapter->pdev;
4535 union e1000_rx_desc_packet_split *rx_desc;
4536 struct e1000_buffer *buffer_info;
4537 struct e1000_ps_page *ps_page;
4538 struct e1000_ps_page_dma *ps_page_dma;
4539 struct sk_buff *skb;
4540 unsigned int i, j;
4541
4542 i = rx_ring->next_to_use;
4543 buffer_info = &rx_ring->buffer_info[i];
4544 ps_page = &rx_ring->ps_page[i];
4545 ps_page_dma = &rx_ring->ps_page_dma[i];
4546
4547 while (cleaned_count--) {
4548 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4549
4550 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4551 if (j < adapter->rx_ps_pages) {
4552 if (likely(!ps_page->ps_page[j])) {
4553 ps_page->ps_page[j] =
4554 alloc_page(GFP_ATOMIC);
4555 if (unlikely(!ps_page->ps_page[j])) {
4556 adapter->alloc_rx_buff_failed++;
4557 goto no_buffers;
4558 }
4559 ps_page_dma->ps_page_dma[j] =
4560 pci_map_page(pdev,
4561 ps_page->ps_page[j],
4562 0, PAGE_SIZE,
4563 PCI_DMA_FROMDEVICE);
4564 }
4565 /* Refresh the desc even if buffer_addrs didn't
4566 * change because each write-back erases
4567 * this info.
4568 */
4569 rx_desc->read.buffer_addr[j+1] =
4570 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4571 } else
4572 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
4573 }
4574
4575 skb = netdev_alloc_skb(netdev,
4576 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4577
4578 if (unlikely(!skb)) {
4579 adapter->alloc_rx_buff_failed++;
4580 break;
4581 }
4582
4583 /* Make buffer alignment 2 beyond a 16 byte boundary
4584 * this will result in a 16 byte aligned IP header after
4585 * the 14 byte MAC header is removed
4586 */
4587 skb_reserve(skb, NET_IP_ALIGN);
4588
4589 buffer_info->skb = skb;
4590 buffer_info->length = adapter->rx_ps_bsize0;
4591 buffer_info->dma = pci_map_single(pdev, skb->data,
4592 adapter->rx_ps_bsize0,
4593 PCI_DMA_FROMDEVICE);
4594
4595 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4596
4597 if (unlikely(++i == rx_ring->count)) i = 0;
4598 buffer_info = &rx_ring->buffer_info[i];
4599 ps_page = &rx_ring->ps_page[i];
4600 ps_page_dma = &rx_ring->ps_page_dma[i];
4601 }
4602
4603no_buffers:
4604 if (likely(rx_ring->next_to_use != i)) {
4605 rx_ring->next_to_use = i;
4606 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4607
4608 /* Force memory writes to complete before letting h/w
4609 * know there are new descriptors to fetch. (Only
4610 * applicable for weak-ordered memory model archs,
4611 * such as IA-64). */
4612 wmb();
4613 /* Hardware increments by 16 bytes, but packet split
4614 * descriptors are 32 bytes...so we increment tail
4615 * twice as much.
4616 */
4617 writel(i<<1, hw->hw_addr + rx_ring->rdt);
4618 }
4619}
4620
4621/**
4622 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4224 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4623 * @adapter: 4225 * @adapter:
4624 **/ 4226 **/