aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
authorBruce Allan <bruce.w.allan@intel.com>2008-03-28 12:15:03 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-28 22:14:56 -0400
commitad68076e07fa01bd0c98278a959d0fd2bb26f1ac (patch)
treef0b664ecdb38478f9b995aff10dcb39a09221fb6 /drivers/net/e1000e/netdev.c
parent652f093fdf14c7ca1e13c052da429ae385e4dc21 (diff)
e1000e: reformat comment blocks, cosmetic changes only
Adjusting the comment blocks here to be code-style compliant. no code changes. Changed some copyright dates to 2008. Indentation fixes. Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c428
1 files changed, 279 insertions, 149 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index f501dd5e7b16..88fac392d4e0 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -82,7 +82,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
82} 82}
83 83
84/** 84/**
85 * e1000_receive_skb - helper function to handle rx indications 85 * e1000_receive_skb - helper function to handle Rx indications
86 * @adapter: board private structure 86 * @adapter: board private structure
87 * @status: descriptor status field as written by hardware 87 * @status: descriptor status field as written by hardware
88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
@@ -138,8 +138,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
138 /* TCP checksum is good */ 138 /* TCP checksum is good */
139 skb->ip_summed = CHECKSUM_UNNECESSARY; 139 skb->ip_summed = CHECKSUM_UNNECESSARY;
140 } else { 140 } else {
141 /* IP fragment with UDP payload */ 141 /*
142 /* Hardware complements the payload checksum, so we undo it 142 * IP fragment with UDP payload
143 * Hardware complements the payload checksum, so we undo it
143 * and then put the value in host order for further stack use. 144 * and then put the value in host order for further stack use.
144 */ 145 */
145 __sum16 sum = (__force __sum16)htons(csum); 146 __sum16 sum = (__force __sum16)htons(csum);
@@ -182,7 +183,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
182 break; 183 break;
183 } 184 }
184 185
185 /* Make buffer alignment 2 beyond a 16 byte boundary 186 /*
187 * Make buffer alignment 2 beyond a 16 byte boundary
186 * this will result in a 16 byte aligned IP header after 188 * this will result in a 16 byte aligned IP header after
187 * the 14 byte MAC header is removed 189 * the 14 byte MAC header is removed
188 */ 190 */
@@ -213,10 +215,12 @@ map_skb:
213 if (i-- == 0) 215 if (i-- == 0)
214 i = (rx_ring->count - 1); 216 i = (rx_ring->count - 1);
215 217
216 /* Force memory writes to complete before letting h/w 218 /*
219 * Force memory writes to complete before letting h/w
217 * know there are new descriptors to fetch. (Only 220 * know there are new descriptors to fetch. (Only
218 * applicable for weak-ordered memory model archs, 221 * applicable for weak-ordered memory model archs,
219 * such as IA-64). */ 222 * such as IA-64).
223 */
220 wmb(); 224 wmb();
221 writel(i, adapter->hw.hw_addr + rx_ring->tail); 225 writel(i, adapter->hw.hw_addr + rx_ring->tail);
222 } 226 }
@@ -285,7 +289,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
285 break; 289 break;
286 } 290 }
287 291
288 /* Make buffer alignment 2 beyond a 16 byte boundary 292 /*
293 * Make buffer alignment 2 beyond a 16 byte boundary
289 * this will result in a 16 byte aligned IP header after 294 * this will result in a 16 byte aligned IP header after
290 * the 14 byte MAC header is removed 295 * the 14 byte MAC header is removed
291 */ 296 */
@@ -319,12 +324,15 @@ no_buffers:
319 if (!(i--)) 324 if (!(i--))
320 i = (rx_ring->count - 1); 325 i = (rx_ring->count - 1);
321 326
322 /* Force memory writes to complete before letting h/w 327 /*
328 * Force memory writes to complete before letting h/w
323 * know there are new descriptors to fetch. (Only 329 * know there are new descriptors to fetch. (Only
324 * applicable for weak-ordered memory model archs, 330 * applicable for weak-ordered memory model archs,
325 * such as IA-64). */ 331 * such as IA-64).
332 */
326 wmb(); 333 wmb();
327 /* Hardware increments by 16 bytes, but packet split 334 /*
335 * Hardware increments by 16 bytes, but packet split
328 * descriptors are 32 bytes...so we increment tail 336 * descriptors are 32 bytes...so we increment tail
329 * twice as much. 337 * twice as much.
330 */ 338 */
@@ -409,9 +417,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
409 total_rx_bytes += length; 417 total_rx_bytes += length;
410 total_rx_packets++; 418 total_rx_packets++;
411 419
412 /* code added for copybreak, this should improve 420 /*
421 * code added for copybreak, this should improve
413 * performance for small packets with large amounts 422 * performance for small packets with large amounts
414 * of reassembly being done in the stack */ 423 * of reassembly being done in the stack
424 */
415 if (length < copybreak) { 425 if (length < copybreak) {
416 struct sk_buff *new_skb = 426 struct sk_buff *new_skb =
417 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 427 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
@@ -581,14 +591,15 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
581 } 591 }
582 592
583 if (adapter->detect_tx_hung) { 593 if (adapter->detect_tx_hung) {
584 /* Detect a transmit hang in hardware, this serializes the 594 /*
585 * check with the clearing of time_stamp and movement of i */ 595 * Detect a transmit hang in hardware, this serializes the
596 * check with the clearing of time_stamp and movement of i
597 */
586 adapter->detect_tx_hung = 0; 598 adapter->detect_tx_hung = 0;
587 if (tx_ring->buffer_info[eop].dma && 599 if (tx_ring->buffer_info[eop].dma &&
588 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp 600 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
589 + (adapter->tx_timeout_factor * HZ)) 601 + (adapter->tx_timeout_factor * HZ))
590 && !(er32(STATUS) & 602 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
591 E1000_STATUS_TXOFF)) {
592 e1000_print_tx_hang(adapter); 603 e1000_print_tx_hang(adapter);
593 netif_stop_queue(netdev); 604 netif_stop_queue(netdev);
594 } 605 }
@@ -677,21 +688,28 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
677 skb_put(skb, length); 688 skb_put(skb, length);
678 689
679 { 690 {
680 /* this looks ugly, but it seems compiler issues make it 691 /*
681 more efficient than reusing j */ 692 * this looks ugly, but it seems compiler issues make it
693 * more efficient than reusing j
694 */
682 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 695 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
683 696
684 /* page alloc/put takes too long and effects small packet 697 /*
685 * throughput, so unsplit small packets and save the alloc/put*/ 698 * page alloc/put takes too long and effects small packet
699 * throughput, so unsplit small packets and save the alloc/put
700 * only valid in softirq (napi) context to call kmap_*
701 */
686 if (l1 && (l1 <= copybreak) && 702 if (l1 && (l1 <= copybreak) &&
687 ((length + l1) <= adapter->rx_ps_bsize0)) { 703 ((length + l1) <= adapter->rx_ps_bsize0)) {
688 u8 *vaddr; 704 u8 *vaddr;
689 705
690 ps_page = &buffer_info->ps_pages[0]; 706 ps_page = &buffer_info->ps_pages[0];
691 707
692 /* there is no documentation about how to call 708 /*
709 * there is no documentation about how to call
693 * kmap_atomic, so we can't hold the mapping 710 * kmap_atomic, so we can't hold the mapping
694 * very long */ 711 * very long
712 */
695 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 713 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
696 PAGE_SIZE, PCI_DMA_FROMDEVICE); 714 PAGE_SIZE, PCI_DMA_FROMDEVICE);
697 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 715 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
@@ -836,19 +854,25 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
836 struct e1000_hw *hw = &adapter->hw; 854 struct e1000_hw *hw = &adapter->hw;
837 u32 icr = er32(ICR); 855 u32 icr = er32(ICR);
838 856
839 /* read ICR disables interrupts using IAM */ 857 /*
858 * read ICR disables interrupts using IAM
859 */
840 860
841 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 861 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
842 hw->mac.get_link_status = 1; 862 hw->mac.get_link_status = 1;
843 /* ICH8 workaround-- Call gig speed drop workaround on cable 863 /*
844 * disconnect (LSC) before accessing any PHY registers */ 864 * ICH8 workaround-- Call gig speed drop workaround on cable
865 * disconnect (LSC) before accessing any PHY registers
866 */
845 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 867 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
846 (!(er32(STATUS) & E1000_STATUS_LU))) 868 (!(er32(STATUS) & E1000_STATUS_LU)))
847 e1000e_gig_downshift_workaround_ich8lan(hw); 869 e1000e_gig_downshift_workaround_ich8lan(hw);
848 870
849 /* 80003ES2LAN workaround-- For packet buffer work-around on 871 /*
872 * 80003ES2LAN workaround-- For packet buffer work-around on
850 * link down event; disable receives here in the ISR and reset 873 * link down event; disable receives here in the ISR and reset
851 * adapter in watchdog */ 874 * adapter in watchdog
875 */
852 if (netif_carrier_ok(netdev) && 876 if (netif_carrier_ok(netdev) &&
853 adapter->flags & FLAG_RX_NEEDS_RESTART) { 877 adapter->flags & FLAG_RX_NEEDS_RESTART) {
854 /* disable receives */ 878 /* disable receives */
@@ -886,23 +910,31 @@ static irqreturn_t e1000_intr(int irq, void *data)
886 if (!icr) 910 if (!icr)
887 return IRQ_NONE; /* Not our interrupt */ 911 return IRQ_NONE; /* Not our interrupt */
888 912
889 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 913 /*
890 * not set, then the adapter didn't send an interrupt */ 914 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
915 * not set, then the adapter didn't send an interrupt
916 */
891 if (!(icr & E1000_ICR_INT_ASSERTED)) 917 if (!(icr & E1000_ICR_INT_ASSERTED))
892 return IRQ_NONE; 918 return IRQ_NONE;
893 919
894 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 920 /*
895 * need for the IMC write */ 921 * Interrupt Auto-Mask...upon reading ICR,
922 * interrupts are masked. No need for the
923 * IMC write
924 */
896 925
897 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 926 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
898 hw->mac.get_link_status = 1; 927 hw->mac.get_link_status = 1;
899 /* ICH8 workaround-- Call gig speed drop workaround on cable 928 /*
900 * disconnect (LSC) before accessing any PHY registers */ 929 * ICH8 workaround-- Call gig speed drop workaround on cable
930 * disconnect (LSC) before accessing any PHY registers
931 */
901 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 932 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
902 (!(er32(STATUS) & E1000_STATUS_LU))) 933 (!(er32(STATUS) & E1000_STATUS_LU)))
903 e1000e_gig_downshift_workaround_ich8lan(hw); 934 e1000e_gig_downshift_workaround_ich8lan(hw);
904 935
905 /* 80003ES2LAN workaround-- 936 /*
937 * 80003ES2LAN workaround--
906 * For packet buffer work-around on link down event; 938 * For packet buffer work-around on link down event;
907 * disable receives here in the ISR and 939 * disable receives here in the ISR and
908 * reset adapter in watchdog 940 * reset adapter in watchdog
@@ -1011,8 +1043,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
1011 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 1043 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1012 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1044 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1013 ctrl_ext = er32(CTRL_EXT); 1045 ctrl_ext = er32(CTRL_EXT);
1014 ew32(CTRL_EXT, 1046 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1015 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1016 } 1047 }
1017} 1048}
1018 1049
@@ -1038,8 +1069,7 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
1038 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1069 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1039 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1070 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1040 ctrl_ext = er32(CTRL_EXT); 1071 ctrl_ext = er32(CTRL_EXT);
1041 ew32(CTRL_EXT, 1072 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1042 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1043 } 1073 }
1044} 1074}
1045 1075
@@ -1341,9 +1371,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
1341 1371
1342set_itr_now: 1372set_itr_now:
1343 if (new_itr != adapter->itr) { 1373 if (new_itr != adapter->itr) {
1344 /* this attempts to bias the interrupt rate towards Bulk 1374 /*
1375 * this attempts to bias the interrupt rate towards Bulk
1345 * by adding intermediate steps when interrupt rate is 1376 * by adding intermediate steps when interrupt rate is
1346 * increasing */ 1377 * increasing
1378 */
1347 new_itr = new_itr > adapter->itr ? 1379 new_itr = new_itr > adapter->itr ?
1348 min(adapter->itr + (new_itr >> 2), new_itr) : 1380 min(adapter->itr + (new_itr >> 2), new_itr) :
1349 new_itr; 1381 new_itr;
@@ -1354,7 +1386,7 @@ set_itr_now:
1354 1386
1355/** 1387/**
1356 * e1000_clean - NAPI Rx polling callback 1388 * e1000_clean - NAPI Rx polling callback
1357 * @adapter: board private structure 1389 * @napi: struct associated with this polling callback
1358 * @budget: amount of packets driver is allowed to process this poll 1390 * @budget: amount of packets driver is allowed to process this poll
1359 **/ 1391 **/
1360static int e1000_clean(struct napi_struct *napi, int budget) 1392static int e1000_clean(struct napi_struct *napi, int budget)
@@ -1366,10 +1398,12 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1366 /* Must NOT use netdev_priv macro here. */ 1398 /* Must NOT use netdev_priv macro here. */
1367 adapter = poll_dev->priv; 1399 adapter = poll_dev->priv;
1368 1400
1369 /* e1000_clean is called per-cpu. This lock protects 1401 /*
1402 * e1000_clean is called per-cpu. This lock protects
1370 * tx_ring from being cleaned by multiple cpus 1403 * tx_ring from being cleaned by multiple cpus
1371 * simultaneously. A failure obtaining the lock means 1404 * simultaneously. A failure obtaining the lock means
1372 * tx_ring is currently being cleaned anyway. */ 1405 * tx_ring is currently being cleaned anyway.
1406 */
1373 if (spin_trylock(&adapter->tx_queue_lock)) { 1407 if (spin_trylock(&adapter->tx_queue_lock)) {
1374 tx_cleaned = e1000_clean_tx_irq(adapter); 1408 tx_cleaned = e1000_clean_tx_irq(adapter);
1375 spin_unlock(&adapter->tx_queue_lock); 1409 spin_unlock(&adapter->tx_queue_lock);
@@ -1539,9 +1573,11 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
1539 1573
1540 manc = er32(MANC); 1574 manc = er32(MANC);
1541 1575
1542 /* enable receiving management packets to the host. this will probably 1576 /*
1577 * enable receiving management packets to the host. this will probably
1543 * generate destination unreachable messages from the host OS, but 1578 * generate destination unreachable messages from the host OS, but
1544 * the packets will be handled on SMBUS */ 1579 * the packets will be handled on SMBUS
1580 */
1545 manc |= E1000_MANC_EN_MNG2HOST; 1581 manc |= E1000_MANC_EN_MNG2HOST;
1546 manc2h = er32(MANC2H); 1582 manc2h = er32(MANC2H);
1547#define E1000_MNG2HOST_PORT_623 (1 << 5) 1583#define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -1591,7 +1627,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1591 1627
1592 /* Set the Tx Interrupt Delay register */ 1628 /* Set the Tx Interrupt Delay register */
1593 ew32(TIDV, adapter->tx_int_delay); 1629 ew32(TIDV, adapter->tx_int_delay);
1594 /* tx irq moderation */ 1630 /* Tx irq moderation */
1595 ew32(TADV, adapter->tx_abs_int_delay); 1631 ew32(TADV, adapter->tx_abs_int_delay);
1596 1632
1597 /* Program the Transmit Control Register */ 1633 /* Program the Transmit Control Register */
@@ -1602,8 +1638,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1602 1638
1603 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 1639 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1604 tarc = er32(TARC0); 1640 tarc = er32(TARC0);
1605 /* set the speed mode bit, we'll clear it if we're not at 1641 /*
1606 * gigabit link later */ 1642 * set the speed mode bit, we'll clear it if we're not at
1643 * gigabit link later
1644 */
1607#define SPEED_MODE_BIT (1 << 21) 1645#define SPEED_MODE_BIT (1 << 21)
1608 tarc |= SPEED_MODE_BIT; 1646 tarc |= SPEED_MODE_BIT;
1609 ew32(TARC0, tarc); 1647 ew32(TARC0, tarc);
@@ -1724,8 +1762,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1724 /* Configure extra packet-split registers */ 1762 /* Configure extra packet-split registers */
1725 rfctl = er32(RFCTL); 1763 rfctl = er32(RFCTL);
1726 rfctl |= E1000_RFCTL_EXTEN; 1764 rfctl |= E1000_RFCTL_EXTEN;
1727 /* disable packet split support for IPv6 extension headers, 1765 /*
1728 * because some malformed IPv6 headers can hang the RX */ 1766 * disable packet split support for IPv6 extension headers,
1767 * because some malformed IPv6 headers can hang the Rx
1768 */
1729 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 1769 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1730 E1000_RFCTL_NEW_IPV6_EXT_DIS); 1770 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1731 1771
@@ -1794,8 +1834,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1794 /* irq moderation */ 1834 /* irq moderation */
1795 ew32(RADV, adapter->rx_abs_int_delay); 1835 ew32(RADV, adapter->rx_abs_int_delay);
1796 if (adapter->itr_setting != 0) 1836 if (adapter->itr_setting != 0)
1797 ew32(ITR, 1837 ew32(ITR, 1000000000 / (adapter->itr * 256));
1798 1000000000 / (adapter->itr * 256));
1799 1838
1800 ctrl_ext = er32(CTRL_EXT); 1839 ctrl_ext = er32(CTRL_EXT);
1801 /* Reset delay timers after every interrupt */ 1840 /* Reset delay timers after every interrupt */
@@ -1806,8 +1845,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1806 ew32(CTRL_EXT, ctrl_ext); 1845 ew32(CTRL_EXT, ctrl_ext);
1807 e1e_flush(); 1846 e1e_flush();
1808 1847
1809 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1848 /*
1810 * the Base and Length of the Rx Descriptor Ring */ 1849 * Setup the HW Rx Head and Tail Descriptor Pointers and
1850 * the Base and Length of the Rx Descriptor Ring
1851 */
1811 rdba = rx_ring->dma; 1852 rdba = rx_ring->dma;
1812 ew32(RDBAL, (rdba & DMA_32BIT_MASK)); 1853 ew32(RDBAL, (rdba & DMA_32BIT_MASK));
1813 ew32(RDBAH, (rdba >> 32)); 1854 ew32(RDBAH, (rdba >> 32));
@@ -1822,8 +1863,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1822 if (adapter->flags & FLAG_RX_CSUM_ENABLED) { 1863 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
1823 rxcsum |= E1000_RXCSUM_TUOFL; 1864 rxcsum |= E1000_RXCSUM_TUOFL;
1824 1865
1825 /* IPv4 payload checksum for UDP fragments must be 1866 /*
1826 * used in conjunction with packet-split. */ 1867 * IPv4 payload checksum for UDP fragments must be
1868 * used in conjunction with packet-split.
1869 */
1827 if (adapter->rx_ps_pages) 1870 if (adapter->rx_ps_pages)
1828 rxcsum |= E1000_RXCSUM_IPPCSE; 1871 rxcsum |= E1000_RXCSUM_IPPCSE;
1829 } else { 1872 } else {
@@ -1832,9 +1875,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1832 } 1875 }
1833 ew32(RXCSUM, rxcsum); 1876 ew32(RXCSUM, rxcsum);
1834 1877
1835 /* Enable early receives on supported devices, only takes effect when 1878 /*
1879 * Enable early receives on supported devices, only takes effect when
1836 * packet size is equal or larger than the specified value (in 8 byte 1880 * packet size is equal or larger than the specified value (in 8 byte
1837 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ 1881 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
1882 */
1838 if ((adapter->flags & FLAG_HAS_ERT) && 1883 if ((adapter->flags & FLAG_HAS_ERT) &&
1839 (adapter->netdev->mtu > ETH_DATA_LEN)) 1884 (adapter->netdev->mtu > ETH_DATA_LEN))
1840 ew32(ERT, E1000_ERT_2048); 1885 ew32(ERT, E1000_ERT_2048);
@@ -1930,7 +1975,7 @@ static void e1000_set_multi(struct net_device *netdev)
1930} 1975}
1931 1976
1932/** 1977/**
1933 * e1000_configure - configure the hardware for RX and TX 1978 * e1000_configure - configure the hardware for Rx and Tx
1934 * @adapter: private board structure 1979 * @adapter: private board structure
1935 **/ 1980 **/
1936static void e1000_configure(struct e1000_adapter *adapter) 1981static void e1000_configure(struct e1000_adapter *adapter)
@@ -1943,8 +1988,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
1943 e1000_configure_tx(adapter); 1988 e1000_configure_tx(adapter);
1944 e1000_setup_rctl(adapter); 1989 e1000_setup_rctl(adapter);
1945 e1000_configure_rx(adapter); 1990 e1000_configure_rx(adapter);
1946 adapter->alloc_rx_buf(adapter, 1991 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
1947 e1000_desc_unused(adapter->rx_ring));
1948} 1992}
1949 1993
1950/** 1994/**
@@ -1961,8 +2005,10 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
1961 2005
1962 /* Just clear the power down bit to wake the phy back up */ 2006 /* Just clear the power down bit to wake the phy back up */
1963 if (adapter->hw.media_type == e1000_media_type_copper) { 2007 if (adapter->hw.media_type == e1000_media_type_copper) {
1964 /* according to the manual, the phy will retain its 2008 /*
1965 * settings across a power-down/up cycle */ 2009 * According to the manual, the phy will retain its
2010 * settings across a power-down/up cycle
2011 */
1966 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); 2012 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
1967 mii_reg &= ~MII_CR_POWER_DOWN; 2013 mii_reg &= ~MII_CR_POWER_DOWN;
1968 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); 2014 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
@@ -1991,8 +2037,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
1991 return; 2037 return;
1992 2038
1993 /* reset is blocked because of a SoL/IDER session */ 2039 /* reset is blocked because of a SoL/IDER session */
1994 if (e1000e_check_mng_mode(hw) || 2040 if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
1995 e1000_check_reset_block(hw))
1996 return; 2041 return;
1997 2042
1998 /* manageability (AMT) is enabled */ 2043 /* manageability (AMT) is enabled */
@@ -2012,7 +2057,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
2012 * This function boots the hardware and enables some settings that 2057 * This function boots the hardware and enables some settings that
2013 * require a configuration cycle of the hardware - those cannot be 2058 * require a configuration cycle of the hardware - those cannot be
2014 * set/changed during runtime. After reset the device needs to be 2059 * set/changed during runtime. After reset the device needs to be
2015 * properly configured for rx, tx etc. 2060 * properly configured for Rx, Tx etc.
2016 */ 2061 */
2017void e1000e_reset(struct e1000_adapter *adapter) 2062void e1000e_reset(struct e1000_adapter *adapter)
2018{ 2063{
@@ -2022,23 +2067,27 @@ void e1000e_reset(struct e1000_adapter *adapter)
2022 u32 pba; 2067 u32 pba;
2023 u16 hwm; 2068 u16 hwm;
2024 2069
2070 /* reset Packet Buffer Allocation to default */
2025 ew32(PBA, adapter->pba); 2071 ew32(PBA, adapter->pba);
2026 2072
2027 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { 2073 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
2028 /* To maintain wire speed transmits, the Tx FIFO should be 2074 /*
2075 * To maintain wire speed transmits, the Tx FIFO should be
2029 * large enough to accommodate two full transmit packets, 2076 * large enough to accommodate two full transmit packets,
2030 * rounded up to the next 1KB and expressed in KB. Likewise, 2077 * rounded up to the next 1KB and expressed in KB. Likewise,
2031 * the Rx FIFO should be large enough to accommodate at least 2078 * the Rx FIFO should be large enough to accommodate at least
2032 * one full receive packet and is similarly rounded up and 2079 * one full receive packet and is similarly rounded up and
2033 * expressed in KB. */ 2080 * expressed in KB.
2081 */
2034 pba = er32(PBA); 2082 pba = er32(PBA);
2035 /* upper 16 bits has Tx packet buffer allocation size in KB */ 2083 /* upper 16 bits has Tx packet buffer allocation size in KB */
2036 tx_space = pba >> 16; 2084 tx_space = pba >> 16;
2037 /* lower 16 bits has Rx packet buffer allocation size in KB */ 2085 /* lower 16 bits has Rx packet buffer allocation size in KB */
2038 pba &= 0xffff; 2086 pba &= 0xffff;
2039 /* the tx fifo also stores 16 bytes of information about the tx 2087 /*
2040 * but don't include ethernet FCS because hardware appends it */ 2088 * the Tx fifo also stores 16 bytes of information about the tx
2041 min_tx_space = (mac->max_frame_size + 2089 * but don't include ethernet FCS because hardware appends it
2090 */ min_tx_space = (mac->max_frame_size +
2042 sizeof(struct e1000_tx_desc) - 2091 sizeof(struct e1000_tx_desc) -
2043 ETH_FCS_LEN) * 2; 2092 ETH_FCS_LEN) * 2;
2044 min_tx_space = ALIGN(min_tx_space, 1024); 2093 min_tx_space = ALIGN(min_tx_space, 1024);
@@ -2048,15 +2097,19 @@ void e1000e_reset(struct e1000_adapter *adapter)
2048 min_rx_space = ALIGN(min_rx_space, 1024); 2097 min_rx_space = ALIGN(min_rx_space, 1024);
2049 min_rx_space >>= 10; 2098 min_rx_space >>= 10;
2050 2099
2051 /* If current Tx allocation is less than the min Tx FIFO size, 2100 /*
2101 * If current Tx allocation is less than the min Tx FIFO size,
2052 * and the min Tx FIFO size is less than the current Rx FIFO 2102 * and the min Tx FIFO size is less than the current Rx FIFO
2053 * allocation, take space away from current Rx allocation */ 2103 * allocation, take space away from current Rx allocation
2104 */
2054 if ((tx_space < min_tx_space) && 2105 if ((tx_space < min_tx_space) &&
2055 ((min_tx_space - tx_space) < pba)) { 2106 ((min_tx_space - tx_space) < pba)) {
2056 pba -= min_tx_space - tx_space; 2107 pba -= min_tx_space - tx_space;
2057 2108
2058 /* if short on rx space, rx wins and must trump tx 2109 /*
2059 * adjustment or use Early Receive if available */ 2110 * if short on Rx space, Rx wins and must trump tx
2111 * adjustment or use Early Receive if available
2112 */
2060 if ((pba < min_rx_space) && 2113 if ((pba < min_rx_space) &&
2061 (!(adapter->flags & FLAG_HAS_ERT))) 2114 (!(adapter->flags & FLAG_HAS_ERT)))
2062 /* ERT enabled in e1000_configure_rx */ 2115 /* ERT enabled in e1000_configure_rx */
@@ -2067,14 +2120,17 @@ void e1000e_reset(struct e1000_adapter *adapter)
2067 } 2120 }
2068 2121
2069 2122
2070 /* flow control settings */ 2123 /*
2071 /* The high water mark must be low enough to fit one full frame 2124 * flow control settings
2125 *
2126 * The high water mark must be low enough to fit one full frame
2072 * (or the size used for early receive) above it in the Rx FIFO. 2127 * (or the size used for early receive) above it in the Rx FIFO.
2073 * Set it to the lower of: 2128 * Set it to the lower of:
2074 * - 90% of the Rx FIFO size, and 2129 * - 90% of the Rx FIFO size, and
2075 * - the full Rx FIFO size minus the early receive size (for parts 2130 * - the full Rx FIFO size minus the early receive size (for parts
2076 * with ERT support assuming ERT set to E1000_ERT_2048), or 2131 * with ERT support assuming ERT set to E1000_ERT_2048), or
2077 * - the full Rx FIFO size minus one full frame */ 2132 * - the full Rx FIFO size minus one full frame
2133 */
2078 if (adapter->flags & FLAG_HAS_ERT) 2134 if (adapter->flags & FLAG_HAS_ERT)
2079 hwm = min(((adapter->pba << 10) * 9 / 10), 2135 hwm = min(((adapter->pba << 10) * 9 / 10),
2080 ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); 2136 ((adapter->pba << 10) - (E1000_ERT_2048 << 3)));
@@ -2108,9 +2164,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
2108 2164
2109 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { 2165 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2110 u16 phy_data = 0; 2166 u16 phy_data = 0;
2111 /* speed up time to link by disabling smart power down, ignore 2167 /*
2168 * speed up time to link by disabling smart power down, ignore
2112 * the return value of this function because there is nothing 2169 * the return value of this function because there is nothing
2113 * different we would do if it failed */ 2170 * different we would do if it failed
2171 */
2114 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 2172 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2115 phy_data &= ~IGP02E1000_PM_SPD; 2173 phy_data &= ~IGP02E1000_PM_SPD;
2116 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 2174 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
@@ -2140,8 +2198,10 @@ void e1000e_down(struct e1000_adapter *adapter)
2140 struct e1000_hw *hw = &adapter->hw; 2198 struct e1000_hw *hw = &adapter->hw;
2141 u32 tctl, rctl; 2199 u32 tctl, rctl;
2142 2200
2143 /* signal that we're down so the interrupt handler does not 2201 /*
2144 * reschedule our watchdog timer */ 2202 * signal that we're down so the interrupt handler does not
2203 * reschedule our watchdog timer
2204 */
2145 set_bit(__E1000_DOWN, &adapter->state); 2205 set_bit(__E1000_DOWN, &adapter->state);
2146 2206
2147 /* disable receives in the hardware */ 2207 /* disable receives in the hardware */
@@ -2272,16 +2332,20 @@ static int e1000_open(struct net_device *netdev)
2272 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 2332 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
2273 e1000_update_mng_vlan(adapter); 2333 e1000_update_mng_vlan(adapter);
2274 2334
2275 /* If AMT is enabled, let the firmware know that the network 2335 /*
2276 * interface is now open */ 2336 * If AMT is enabled, let the firmware know that the network
2337 * interface is now open
2338 */
2277 if ((adapter->flags & FLAG_HAS_AMT) && 2339 if ((adapter->flags & FLAG_HAS_AMT) &&
2278 e1000e_check_mng_mode(&adapter->hw)) 2340 e1000e_check_mng_mode(&adapter->hw))
2279 e1000_get_hw_control(adapter); 2341 e1000_get_hw_control(adapter);
2280 2342
2281 /* before we allocate an interrupt, we must be ready to handle it. 2343 /*
2344 * before we allocate an interrupt, we must be ready to handle it.
2282 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2345 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2283 * as soon as we call pci_request_irq, so we have to setup our 2346 * as soon as we call pci_request_irq, so we have to setup our
2284 * clean_rx handler before we do so. */ 2347 * clean_rx handler before we do so.
2348 */
2285 e1000_configure(adapter); 2349 e1000_configure(adapter);
2286 2350
2287 err = e1000_request_irq(adapter); 2351 err = e1000_request_irq(adapter);
@@ -2335,16 +2399,20 @@ static int e1000_close(struct net_device *netdev)
2335 e1000e_free_tx_resources(adapter); 2399 e1000e_free_tx_resources(adapter);
2336 e1000e_free_rx_resources(adapter); 2400 e1000e_free_rx_resources(adapter);
2337 2401
2338 /* kill manageability vlan ID if supported, but not if a vlan with 2402 /*
2339 * the same ID is registered on the host OS (let 8021q kill it) */ 2403 * kill manageability vlan ID if supported, but not if a vlan with
2404 * the same ID is registered on the host OS (let 8021q kill it)
2405 */
2340 if ((adapter->hw.mng_cookie.status & 2406 if ((adapter->hw.mng_cookie.status &
2341 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2407 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2342 !(adapter->vlgrp && 2408 !(adapter->vlgrp &&
2343 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) 2409 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2344 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2410 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2345 2411
2346 /* If AMT is enabled, let the firmware know that the network 2412 /*
2347 * interface is now closed */ 2413 * If AMT is enabled, let the firmware know that the network
2414 * interface is now closed
2415 */
2348 if ((adapter->flags & FLAG_HAS_AMT) && 2416 if ((adapter->flags & FLAG_HAS_AMT) &&
2349 e1000e_check_mng_mode(&adapter->hw)) 2417 e1000e_check_mng_mode(&adapter->hw))
2350 e1000_release_hw_control(adapter); 2418 e1000_release_hw_control(adapter);
@@ -2375,12 +2443,14 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2375 /* activate the work around */ 2443 /* activate the work around */
2376 e1000e_set_laa_state_82571(&adapter->hw, 1); 2444 e1000e_set_laa_state_82571(&adapter->hw, 1);
2377 2445
2378 /* Hold a copy of the LAA in RAR[14] This is done so that 2446 /*
2447 * Hold a copy of the LAA in RAR[14] This is done so that
2379 * between the time RAR[0] gets clobbered and the time it 2448 * between the time RAR[0] gets clobbered and the time it
2380 * gets fixed (in e1000_watchdog), the actual LAA is in one 2449 * gets fixed (in e1000_watchdog), the actual LAA is in one
2381 * of the RARs and no incoming packets directed to this port 2450 * of the RARs and no incoming packets directed to this port
2382 * are dropped. Eventually the LAA will be in RAR[0] and 2451 * are dropped. Eventually the LAA will be in RAR[0] and
2383 * RAR[14] */ 2452 * RAR[14]
2453 */
2384 e1000e_rar_set(&adapter->hw, 2454 e1000e_rar_set(&adapter->hw,
2385 adapter->hw.mac.addr, 2455 adapter->hw.mac.addr,
2386 adapter->hw.mac.rar_entry_count - 1); 2456 adapter->hw.mac.rar_entry_count - 1);
@@ -2389,8 +2459,10 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2389 return 0; 2459 return 0;
2390} 2460}
2391 2461
2392/* Need to wait a few seconds after link up to get diagnostic information from 2462/*
2393 * the phy */ 2463 * Need to wait a few seconds after link up to get diagnostic information from
2464 * the phy
2465 */
2394static void e1000_update_phy_info(unsigned long data) 2466static void e1000_update_phy_info(unsigned long data)
2395{ 2467{
2396 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2468 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -2421,7 +2493,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2421 2493
2422 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 2494 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2423 2495
2424 /* these counters are modified from e1000_adjust_tbi_stats, 2496 /*
2497 * these counters are modified from e1000_adjust_tbi_stats,
2425 * called from the interrupt context, so they must only 2498 * called from the interrupt context, so they must only
2426 * be written while holding adapter->stats_lock 2499 * be written while holding adapter->stats_lock
2427 */ 2500 */
@@ -2515,8 +2588,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2515 2588
2516 /* Rx Errors */ 2589 /* Rx Errors */
2517 2590
2518 /* RLEC on some newer hardware can be incorrect so build 2591 /*
2519 * our own version based on RUC and ROC */ 2592 * RLEC on some newer hardware can be incorrect so build
2593 * our own version based on RUC and ROC
2594 */
2520 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2595 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2521 adapter->stats.crcerrs + adapter->stats.algnerrc + 2596 adapter->stats.crcerrs + adapter->stats.algnerrc +
2522 adapter->stats.ruc + adapter->stats.roc + 2597 adapter->stats.ruc + adapter->stats.roc +
@@ -2628,8 +2703,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2628 &adapter->link_speed, 2703 &adapter->link_speed,
2629 &adapter->link_duplex); 2704 &adapter->link_duplex);
2630 e1000_print_link_info(adapter); 2705 e1000_print_link_info(adapter);
2631 /* tweak tx_queue_len according to speed/duplex 2706 /*
2632 * and adjust the timeout factor */ 2707 * tweak tx_queue_len according to speed/duplex
2708 * and adjust the timeout factor
2709 */
2633 netdev->tx_queue_len = adapter->tx_queue_len; 2710 netdev->tx_queue_len = adapter->tx_queue_len;
2634 adapter->tx_timeout_factor = 1; 2711 adapter->tx_timeout_factor = 1;
2635 switch (adapter->link_speed) { 2712 switch (adapter->link_speed) {
@@ -2645,8 +2722,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2645 break; 2722 break;
2646 } 2723 }
2647 2724
2648 /* workaround: re-program speed mode bit after 2725 /*
2649 * link-up event */ 2726 * workaround: re-program speed mode bit after
2727 * link-up event
2728 */
2650 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 2729 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2651 !txb2b) { 2730 !txb2b) {
2652 u32 tarc0; 2731 u32 tarc0;
@@ -2655,8 +2734,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2655 ew32(TARC0, tarc0); 2734 ew32(TARC0, tarc0);
2656 } 2735 }
2657 2736
2658 /* disable TSO for pcie and 10/100 speeds, to avoid 2737 /*
2659 * some hardware issues */ 2738 * disable TSO for pcie and 10/100 speeds, to avoid
2739 * some hardware issues
2740 */
2660 if (!(adapter->flags & FLAG_TSO_FORCE)) { 2741 if (!(adapter->flags & FLAG_TSO_FORCE)) {
2661 switch (adapter->link_speed) { 2742 switch (adapter->link_speed) {
2662 case SPEED_10: 2743 case SPEED_10:
@@ -2676,8 +2757,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2676 } 2757 }
2677 } 2758 }
2678 2759
2679 /* enable transmits in the hardware, need to do this 2760 /*
2680 * after setting TARC0 */ 2761 * enable transmits in the hardware, need to do this
2762 * after setting TARC(0)
2763 */
2681 tctl = er32(TCTL); 2764 tctl = er32(TCTL);
2682 tctl |= E1000_TCTL_EN; 2765 tctl |= E1000_TCTL_EN;
2683 ew32(TCTL, tctl); 2766 ew32(TCTL, tctl);
@@ -2731,23 +2814,27 @@ link_up:
2731 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 2814 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
2732 tx_ring->count); 2815 tx_ring->count);
2733 if (tx_pending) { 2816 if (tx_pending) {
2734 /* We've lost link, so the controller stops DMA, 2817 /*
2818 * We've lost link, so the controller stops DMA,
2735 * but we've got queued Tx work that's never going 2819 * but we've got queued Tx work that's never going
2736 * to get done, so reset controller to flush Tx. 2820 * to get done, so reset controller to flush Tx.
2737 * (Do the reset outside of interrupt context). */ 2821 * (Do the reset outside of interrupt context).
2822 */
2738 adapter->tx_timeout_count++; 2823 adapter->tx_timeout_count++;
2739 schedule_work(&adapter->reset_task); 2824 schedule_work(&adapter->reset_task);
2740 } 2825 }
2741 } 2826 }
2742 2827
2743 /* Cause software interrupt to ensure rx ring is cleaned */ 2828 /* Cause software interrupt to ensure Rx ring is cleaned */
2744 ew32(ICS, E1000_ICS_RXDMT0); 2829 ew32(ICS, E1000_ICS_RXDMT0);
2745 2830
2746 /* Force detection of hung controller every watchdog period */ 2831 /* Force detection of hung controller every watchdog period */
2747 adapter->detect_tx_hung = 1; 2832 adapter->detect_tx_hung = 1;
2748 2833
2749 /* With 82571 controllers, LAA may be overwritten due to controller 2834 /*
2750 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2835 * With 82571 controllers, LAA may be overwritten due to controller
2836 * reset from the other port. Set the appropriate LAA in RAR[0]
2837 */
2751 if (e1000e_get_laa_state_82571(hw)) 2838 if (e1000e_get_laa_state_82571(hw))
2752 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 2839 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
2753 2840
@@ -3023,16 +3110,20 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3023 3110
3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3111 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3025 3112
3026 /* Force memory writes to complete before letting h/w 3113 /*
3114 * Force memory writes to complete before letting h/w
3027 * know there are new descriptors to fetch. (Only 3115 * know there are new descriptors to fetch. (Only
3028 * applicable for weak-ordered memory model archs, 3116 * applicable for weak-ordered memory model archs,
3029 * such as IA-64). */ 3117 * such as IA-64).
3118 */
3030 wmb(); 3119 wmb();
3031 3120
3032 tx_ring->next_to_use = i; 3121 tx_ring->next_to_use = i;
3033 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3122 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3034 /* we need this if more than one processor can write to our tail 3123 /*
3035 * at a time, it synchronizes IO on IA64/Altix systems */ 3124 * we need this if more than one processor can write to our tail
3125 * at a time, it synchronizes IO on IA64/Altix systems
3126 */
3036 mmiowb(); 3127 mmiowb();
3037} 3128}
3038 3129
@@ -3080,13 +3171,17 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3080 struct e1000_adapter *adapter = netdev_priv(netdev); 3171 struct e1000_adapter *adapter = netdev_priv(netdev);
3081 3172
3082 netif_stop_queue(netdev); 3173 netif_stop_queue(netdev);
3083 /* Herbert's original patch had: 3174 /*
3175 * Herbert's original patch had:
3084 * smp_mb__after_netif_stop_queue(); 3176 * smp_mb__after_netif_stop_queue();
3085 * but since that doesn't exist yet, just open code it. */ 3177 * but since that doesn't exist yet, just open code it.
3178 */
3086 smp_mb(); 3179 smp_mb();
3087 3180
3088 /* We need to check again in a case another CPU has just 3181 /*
3089 * made room available. */ 3182 * We need to check again in a case another CPU has just
3183 * made room available.
3184 */
3090 if (e1000_desc_unused(adapter->tx_ring) < size) 3185 if (e1000_desc_unused(adapter->tx_ring) < size)
3091 return -EBUSY; 3186 return -EBUSY;
3092 3187
@@ -3133,21 +3228,29 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3133 } 3228 }
3134 3229
3135 mss = skb_shinfo(skb)->gso_size; 3230 mss = skb_shinfo(skb)->gso_size;
3136 /* The controller does a simple calculation to 3231 /*
3232 * The controller does a simple calculation to
3137 * make sure there is enough room in the FIFO before 3233 * make sure there is enough room in the FIFO before
3138 * initiating the DMA for each buffer. The calc is: 3234 * initiating the DMA for each buffer. The calc is:
3139 * 4 = ceil(buffer len/mss). To make sure we don't 3235 * 4 = ceil(buffer len/mss). To make sure we don't
3140 * overrun the FIFO, adjust the max buffer len if mss 3236 * overrun the FIFO, adjust the max buffer len if mss
3141 * drops. */ 3237 * drops.
3238 */
3142 if (mss) { 3239 if (mss) {
3143 u8 hdr_len; 3240 u8 hdr_len;
3144 max_per_txd = min(mss << 2, max_per_txd); 3241 max_per_txd = min(mss << 2, max_per_txd);
3145 max_txd_pwr = fls(max_per_txd) - 1; 3242 max_txd_pwr = fls(max_per_txd) - 1;
3146 3243
3147 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 3244 /*
3148 * points to just header, pull a few bytes of payload from 3245 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
3149 * frags into skb->data */ 3246 * points to just header, pull a few bytes of payload from
3247 * frags into skb->data
3248 */
3150 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3249 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3250 /*
3251 * we do this workaround for ES2LAN, but it is un-necessary,
3252 * avoiding it could save a lot of cycles
3253 */
3151 if (skb->data_len && (hdr_len == len)) { 3254 if (skb->data_len && (hdr_len == len)) {
3152 unsigned int pull_size; 3255 unsigned int pull_size;
3153 3256
@@ -3181,8 +3284,10 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3181 /* Collision - tell upper layer to requeue */ 3284 /* Collision - tell upper layer to requeue */
3182 return NETDEV_TX_LOCKED; 3285 return NETDEV_TX_LOCKED;
3183 3286
3184 /* need: count + 2 desc gap to keep tail from touching 3287 /*
3185 * head, otherwise try next time */ 3288 * need: count + 2 desc gap to keep tail from touching
3289 * head, otherwise try next time
3290 */
3186 if (e1000_maybe_stop_tx(netdev, count + 2)) { 3291 if (e1000_maybe_stop_tx(netdev, count + 2)) {
3187 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); 3292 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3188 return NETDEV_TX_BUSY; 3293 return NETDEV_TX_BUSY;
@@ -3207,9 +3312,11 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3207 else if (e1000_tx_csum(adapter, skb)) 3312 else if (e1000_tx_csum(adapter, skb))
3208 tx_flags |= E1000_TX_FLAGS_CSUM; 3313 tx_flags |= E1000_TX_FLAGS_CSUM;
3209 3314
3210 /* Old method was to assume IPv4 packet by default if TSO was enabled. 3315 /*
3316 * Old method was to assume IPv4 packet by default if TSO was enabled.
3211 * 82571 hardware supports TSO capabilities for IPv6 as well... 3317 * 82571 hardware supports TSO capabilities for IPv6 as well...
3212 * no longer assume, we must. */ 3318 * no longer assume, we must.
3319 */
3213 if (skb->protocol == htons(ETH_P_IP)) 3320 if (skb->protocol == htons(ETH_P_IP))
3214 tx_flags |= E1000_TX_FLAGS_IPV4; 3321 tx_flags |= E1000_TX_FLAGS_IPV4;
3215 3322
@@ -3311,10 +3418,12 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3311 if (netif_running(netdev)) 3418 if (netif_running(netdev))
3312 e1000e_down(adapter); 3419 e1000e_down(adapter);
3313 3420
3314 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3421 /*
3422 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3315 * means we reserve 2 more, this pushes us to allocate from the next 3423 * means we reserve 2 more, this pushes us to allocate from the next
3316 * larger slab size. 3424 * larger slab size.
3317 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3425 * i.e. RXBUFFER_2048 --> size-4096 slab
3426 */
3318 3427
3319 if (max_frame <= 256) 3428 if (max_frame <= 256)
3320 adapter->rx_buffer_len = 256; 3429 adapter->rx_buffer_len = 256;
@@ -3331,7 +3440,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3331 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3440 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3332 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 3441 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
3333 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3442 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3334 + ETH_FCS_LEN ; 3443 + ETH_FCS_LEN;
3335 3444
3336 ndev_info(netdev, "changing MTU from %d to %d\n", 3445 ndev_info(netdev, "changing MTU from %d to %d\n",
3337 netdev->mtu, new_mtu); 3446 netdev->mtu, new_mtu);
@@ -3467,8 +3576,10 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3467 if (adapter->hw.phy.type == e1000_phy_igp_3) 3576 if (adapter->hw.phy.type == e1000_phy_igp_3)
3468 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 3577 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3469 3578
3470 /* Release control of h/w to f/w. If f/w is AMT enabled, this 3579 /*
3471 * would have already happened in close and is redundant. */ 3580 * Release control of h/w to f/w. If f/w is AMT enabled, this
3581 * would have already happened in close and is redundant.
3582 */
3472 e1000_release_hw_control(adapter); 3583 e1000_release_hw_control(adapter);
3473 3584
3474 pci_disable_device(pdev); 3585 pci_disable_device(pdev);
@@ -3543,9 +3654,11 @@ static int e1000_resume(struct pci_dev *pdev)
3543 3654
3544 netif_device_attach(netdev); 3655 netif_device_attach(netdev);
3545 3656
3546 /* If the controller has AMT, do not set DRV_LOAD until the interface 3657 /*
3658 * If the controller has AMT, do not set DRV_LOAD until the interface
3547 * is up. For all other cases, let the f/w know that the h/w is now 3659 * is up. For all other cases, let the f/w know that the h/w is now
3548 * under the control of the driver. */ 3660 * under the control of the driver.
3661 */
3549 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 3662 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
3550 e1000_get_hw_control(adapter); 3663 e1000_get_hw_control(adapter);
3551 3664
@@ -3656,9 +3769,11 @@ static void e1000_io_resume(struct pci_dev *pdev)
3656 3769
3657 netif_device_attach(netdev); 3770 netif_device_attach(netdev);
3658 3771
3659 /* If the controller has AMT, do not set DRV_LOAD until the interface 3772 /*
3773 * If the controller has AMT, do not set DRV_LOAD until the interface
3660 * is up. For all other cases, let the f/w know that the h/w is now 3774 * is up. For all other cases, let the f/w know that the h/w is now
3661 * under the control of the driver. */ 3775 * under the control of the driver.
3776 */
3662 if (!(adapter->flags & FLAG_HAS_AMT) || 3777 if (!(adapter->flags & FLAG_HAS_AMT) ||
3663 !e1000e_check_mng_mode(&adapter->hw)) 3778 !e1000e_check_mng_mode(&adapter->hw))
3664 e1000_get_hw_control(adapter); 3779 e1000_get_hw_control(adapter);
@@ -3852,15 +3967,19 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3852 if (pci_using_dac) 3967 if (pci_using_dac)
3853 netdev->features |= NETIF_F_HIGHDMA; 3968 netdev->features |= NETIF_F_HIGHDMA;
3854 3969
3855 /* We should not be using LLTX anymore, but we are still TX faster with 3970 /*
3856 * it. */ 3971 * We should not be using LLTX anymore, but we are still Tx faster with
3972 * it.
3973 */
3857 netdev->features |= NETIF_F_LLTX; 3974 netdev->features |= NETIF_F_LLTX;
3858 3975
3859 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 3976 if (e1000e_enable_mng_pass_thru(&adapter->hw))
3860 adapter->flags |= FLAG_MNG_PT_ENABLED; 3977 adapter->flags |= FLAG_MNG_PT_ENABLED;
3861 3978
3862 /* before reading the NVM, reset the controller to 3979 /*
3863 * put the device in a known good starting state */ 3980 * before reading the NVM, reset the controller to
3981 * put the device in a known good starting state
3982 */
3864 adapter->hw.mac.ops.reset_hw(&adapter->hw); 3983 adapter->hw.mac.ops.reset_hw(&adapter->hw);
3865 3984
3866 /* 3985 /*
@@ -3954,9 +4073,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3954 /* reset the hardware with the new settings */ 4073 /* reset the hardware with the new settings */
3955 e1000e_reset(adapter); 4074 e1000e_reset(adapter);
3956 4075
3957 /* If the controller has AMT, do not set DRV_LOAD until the interface 4076 /*
4077 * If the controller has AMT, do not set DRV_LOAD until the interface
3958 * is up. For all other cases, let the f/w know that the h/w is now 4078 * is up. For all other cases, let the f/w know that the h/w is now
3959 * under the control of the driver. */ 4079 * under the control of the driver.
4080 */
3960 if (!(adapter->flags & FLAG_HAS_AMT) || 4081 if (!(adapter->flags & FLAG_HAS_AMT) ||
3961 !e1000e_check_mng_mode(&adapter->hw)) 4082 !e1000e_check_mng_mode(&adapter->hw))
3962 e1000_get_hw_control(adapter); 4083 e1000_get_hw_control(adapter);
@@ -4013,16 +4134,20 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4013 struct net_device *netdev = pci_get_drvdata(pdev); 4134 struct net_device *netdev = pci_get_drvdata(pdev);
4014 struct e1000_adapter *adapter = netdev_priv(netdev); 4135 struct e1000_adapter *adapter = netdev_priv(netdev);
4015 4136
4016 /* flush_scheduled work may reschedule our watchdog task, so 4137 /*
4017 * explicitly disable watchdog tasks from being rescheduled */ 4138 * flush_scheduled work may reschedule our watchdog task, so
4139 * explicitly disable watchdog tasks from being rescheduled
4140 */
4018 set_bit(__E1000_DOWN, &adapter->state); 4141 set_bit(__E1000_DOWN, &adapter->state);
4019 del_timer_sync(&adapter->watchdog_timer); 4142 del_timer_sync(&adapter->watchdog_timer);
4020 del_timer_sync(&adapter->phy_info_timer); 4143 del_timer_sync(&adapter->phy_info_timer);
4021 4144
4022 flush_scheduled_work(); 4145 flush_scheduled_work();
4023 4146
4024 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4147 /*
4025 * would have already happened in close and is redundant. */ 4148 * Release control of h/w to f/w. If f/w is AMT enabled, this
4149 * would have already happened in close and is redundant.
4150 */
4026 e1000_release_hw_control(adapter); 4151 e1000_release_hw_control(adapter);
4027 4152
4028 unregister_netdev(netdev); 4153 unregister_netdev(netdev);
@@ -4060,13 +4185,16 @@ static struct pci_device_id e1000_pci_tbl[] = {
4060 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 4185 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
4061 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 4186 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
4062 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 4187 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
4188
4063 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 4189 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
4064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 4190 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
4065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 4191 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
4066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 4192 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
4193
4067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 4194 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
4068 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 4195 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 4196 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4197
4070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 4198 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4071 board_80003es2lan }, 4199 board_80003es2lan },
4072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 4200 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4075,6 +4203,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4075 board_80003es2lan }, 4203 board_80003es2lan },
4076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 4204 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
4077 board_80003es2lan }, 4205 board_80003es2lan },
4206
4078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 4207 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
4079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 4208 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
4080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 4209 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
@@ -4082,6 +4211,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 4211 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
4083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 4212 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
4084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 4213 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
4214
4085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 4215 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
4086 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 4216 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
4087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4217 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
@@ -4099,7 +4229,7 @@ static struct pci_driver e1000_driver = {
4099 .probe = e1000_probe, 4229 .probe = e1000_probe,
4100 .remove = __devexit_p(e1000_remove), 4230 .remove = __devexit_p(e1000_remove),
4101#ifdef CONFIG_PM 4231#ifdef CONFIG_PM
4102 /* Power Managment Hooks */ 4232 /* Power Management Hooks */
4103 .suspend = e1000_suspend, 4233 .suspend = e1000_suspend,
4104 .resume = e1000_resume, 4234 .resume = e1000_resume,
4105#endif 4235#endif
@@ -4118,7 +4248,7 @@ static int __init e1000_init_module(void)
4118 int ret; 4248 int ret;
4119 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 4249 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
4120 e1000e_driver_name, e1000e_driver_version); 4250 e1000e_driver_name, e1000e_driver_version);
4121 printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", 4251 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4122 e1000e_driver_name); 4252 e1000e_driver_name);
4123 ret = pci_register_driver(&e1000_driver); 4253 ret = pci_register_driver(&e1000_driver);
4124 4254