diff options
author | Bruce Allan <bruce.w.allan@intel.com> | 2012-11-28 04:28:37 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-12-01 06:37:22 -0500 |
commit | e921eb1ac411a32b98fa1a9ccbba1b24fae8de2d (patch) | |
tree | 9a3e5a63a4fec7eef24fd3e4929cf2b97ed1a9ed /drivers/net/ethernet/intel/e1000e/netdev.c | |
parent | daf56e406a94675d454f996cab56c3d0b0a0d8a6 (diff) |
e1000e: cosmetic cleanup of comments
Update comments to conform to the preferred style for networking code as
described in ./Documentation/CodingStyle and checked for in the recently
added checkpatch NETWORKING_BLOCK_COMMENT_STYLE test.
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e/netdev.c')
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 313 |
1 files changed, 118 insertions, 195 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index dadb13be479a..6d06ed4e34b1 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -146,9 +146,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
146 | {0, NULL} | 146 | {0, NULL} |
147 | }; | 147 | }; |
148 | 148 | ||
149 | /* | 149 | /** |
150 | * e1000_regdump - register printout routine | 150 | * e1000_regdump - register printout routine |
151 | */ | 151 | * @hw: pointer to the HW structure |
152 | * @reginfo: pointer to the register info table | ||
153 | **/ | ||
152 | static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | 154 | static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) |
153 | { | 155 | { |
154 | int n = 0; | 156 | int n = 0; |
@@ -196,9 +198,10 @@ static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, | |||
196 | } | 198 | } |
197 | } | 199 | } |
198 | 200 | ||
199 | /* | 201 | /** |
200 | * e1000e_dump - Print registers, Tx-ring and Rx-ring | 202 | * e1000e_dump - Print registers, Tx-ring and Rx-ring |
201 | */ | 203 | * @adapter: board private structure |
204 | **/ | ||
202 | static void e1000e_dump(struct e1000_adapter *adapter) | 205 | static void e1000e_dump(struct e1000_adapter *adapter) |
203 | { | 206 | { |
204 | struct net_device *netdev = adapter->netdev; | 207 | struct net_device *netdev = adapter->netdev; |
@@ -623,8 +626,7 @@ map_skb: | |||
623 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 626 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
624 | 627 | ||
625 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | 628 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { |
626 | /* | 629 | /* Force memory writes to complete before letting h/w |
627 | * Force memory writes to complete before letting h/w | ||
628 | * know there are new descriptors to fetch. (Only | 630 | * know there are new descriptors to fetch. (Only |
629 | * applicable for weak-ordered memory model archs, | 631 | * applicable for weak-ordered memory model archs, |
630 | * such as IA-64). | 632 | * such as IA-64). |
@@ -692,8 +694,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, | |||
692 | goto no_buffers; | 694 | goto no_buffers; |
693 | } | 695 | } |
694 | } | 696 | } |
695 | /* | 697 | /* Refresh the desc even if buffer_addrs |
696 | * Refresh the desc even if buffer_addrs | ||
697 | * didn't change because each write-back | 698 | * didn't change because each write-back |
698 | * erases this info. | 699 | * erases this info. |
699 | */ | 700 | */ |
@@ -726,8 +727,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, | |||
726 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | 727 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); |
727 | 728 | ||
728 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | 729 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { |
729 | /* | 730 | /* Force memory writes to complete before letting h/w |
730 | * Force memory writes to complete before letting h/w | ||
731 | * know there are new descriptors to fetch. (Only | 731 | * know there are new descriptors to fetch. (Only |
732 | * applicable for weak-ordered memory model archs, | 732 | * applicable for weak-ordered memory model archs, |
733 | * such as IA-64). | 733 | * such as IA-64). |
@@ -817,7 +817,8 @@ check_page: | |||
817 | /* Force memory writes to complete before letting h/w | 817 | /* Force memory writes to complete before letting h/w |
818 | * know there are new descriptors to fetch. (Only | 818 | * know there are new descriptors to fetch. (Only |
819 | * applicable for weak-ordered memory model archs, | 819 | * applicable for weak-ordered memory model archs, |
820 | * such as IA-64). */ | 820 | * such as IA-64). |
821 | */ | ||
821 | wmb(); | 822 | wmb(); |
822 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | 823 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
823 | e1000e_update_rdt_wa(rx_ring, i); | 824 | e1000e_update_rdt_wa(rx_ring, i); |
@@ -891,8 +892,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, | |||
891 | 892 | ||
892 | length = le16_to_cpu(rx_desc->wb.upper.length); | 893 | length = le16_to_cpu(rx_desc->wb.upper.length); |
893 | 894 | ||
894 | /* | 895 | /* !EOP means multiple descriptors were used to store a single |
895 | * !EOP means multiple descriptors were used to store a single | ||
896 | * packet, if that's the case we need to toss it. In fact, we | 896 | * packet, if that's the case we need to toss it. In fact, we |
897 | * need to toss every packet with the EOP bit clear and the | 897 | * need to toss every packet with the EOP bit clear and the |
898 | * next frame that _does_ have the EOP bit set, as it is by | 898 | * next frame that _does_ have the EOP bit set, as it is by |
@@ -933,8 +933,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, | |||
933 | total_rx_bytes += length; | 933 | total_rx_bytes += length; |
934 | total_rx_packets++; | 934 | total_rx_packets++; |
935 | 935 | ||
936 | /* | 936 | /* code added for copybreak, this should improve |
937 | * code added for copybreak, this should improve | ||
938 | * performance for small packets with large amounts | 937 | * performance for small packets with large amounts |
939 | * of reassembly being done in the stack | 938 | * of reassembly being done in the stack |
940 | */ | 939 | */ |
@@ -1032,15 +1031,13 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
1032 | 1031 | ||
1033 | if (!adapter->tx_hang_recheck && | 1032 | if (!adapter->tx_hang_recheck && |
1034 | (adapter->flags2 & FLAG2_DMA_BURST)) { | 1033 | (adapter->flags2 & FLAG2_DMA_BURST)) { |
1035 | /* | 1034 | /* May be block on write-back, flush and detect again |
1036 | * May be block on write-back, flush and detect again | ||
1037 | * flush pending descriptor writebacks to memory | 1035 | * flush pending descriptor writebacks to memory |
1038 | */ | 1036 | */ |
1039 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | 1037 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); |
1040 | /* execute the writes immediately */ | 1038 | /* execute the writes immediately */ |
1041 | e1e_flush(); | 1039 | e1e_flush(); |
1042 | /* | 1040 | /* Due to rare timing issues, write to TIDV again to ensure |
1043 | * Due to rare timing issues, write to TIDV again to ensure | ||
1044 | * the write is successful | 1041 | * the write is successful |
1045 | */ | 1042 | */ |
1046 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | 1043 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); |
@@ -1169,8 +1166,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) | |||
1169 | } | 1166 | } |
1170 | 1167 | ||
1171 | if (adapter->detect_tx_hung) { | 1168 | if (adapter->detect_tx_hung) { |
1172 | /* | 1169 | /* Detect a transmit hang in hardware, this serializes the |
1173 | * Detect a transmit hang in hardware, this serializes the | ||
1174 | * check with the clearing of time_stamp and movement of i | 1170 | * check with the clearing of time_stamp and movement of i |
1175 | */ | 1171 | */ |
1176 | adapter->detect_tx_hung = false; | 1172 | adapter->detect_tx_hung = false; |
@@ -1270,14 +1266,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, | |||
1270 | skb_put(skb, length); | 1266 | skb_put(skb, length); |
1271 | 1267 | ||
1272 | { | 1268 | { |
1273 | /* | 1269 | /* this looks ugly, but it seems compiler issues make |
1274 | * this looks ugly, but it seems compiler issues make | ||
1275 | * it more efficient than reusing j | 1270 | * it more efficient than reusing j |
1276 | */ | 1271 | */ |
1277 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | 1272 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); |
1278 | 1273 | ||
1279 | /* | 1274 | /* page alloc/put takes too long and effects small |
1280 | * page alloc/put takes too long and effects small | ||
1281 | * packet throughput, so unsplit small packets and | 1275 | * packet throughput, so unsplit small packets and |
1282 | * save the alloc/put only valid in softirq (napi) | 1276 | * save the alloc/put only valid in softirq (napi) |
1283 | * context to call kmap_* | 1277 | * context to call kmap_* |
@@ -1288,8 +1282,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, | |||
1288 | 1282 | ||
1289 | ps_page = &buffer_info->ps_pages[0]; | 1283 | ps_page = &buffer_info->ps_pages[0]; |
1290 | 1284 | ||
1291 | /* | 1285 | /* there is no documentation about how to call |
1292 | * there is no documentation about how to call | ||
1293 | * kmap_atomic, so we can't hold the mapping | 1286 | * kmap_atomic, so we can't hold the mapping |
1294 | * very long | 1287 | * very long |
1295 | */ | 1288 | */ |
@@ -1486,14 +1479,16 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, | |||
1486 | skb_shinfo(rxtop)->nr_frags, | 1479 | skb_shinfo(rxtop)->nr_frags, |
1487 | buffer_info->page, 0, length); | 1480 | buffer_info->page, 0, length); |
1488 | /* re-use the current skb, we only consumed the | 1481 | /* re-use the current skb, we only consumed the |
1489 | * page */ | 1482 | * page |
1483 | */ | ||
1490 | buffer_info->skb = skb; | 1484 | buffer_info->skb = skb; |
1491 | skb = rxtop; | 1485 | skb = rxtop; |
1492 | rxtop = NULL; | 1486 | rxtop = NULL; |
1493 | e1000_consume_page(buffer_info, skb, length); | 1487 | e1000_consume_page(buffer_info, skb, length); |
1494 | } else { | 1488 | } else { |
1495 | /* no chain, got EOP, this buf is the packet | 1489 | /* no chain, got EOP, this buf is the packet |
1496 | * copybreak to save the put_page/alloc_page */ | 1490 | * copybreak to save the put_page/alloc_page |
1491 | */ | ||
1497 | if (length <= copybreak && | 1492 | if (length <= copybreak && |
1498 | skb_tailroom(skb) >= length) { | 1493 | skb_tailroom(skb) >= length) { |
1499 | u8 *vaddr; | 1494 | u8 *vaddr; |
@@ -1502,7 +1497,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, | |||
1502 | length); | 1497 | length); |
1503 | kunmap_atomic(vaddr); | 1498 | kunmap_atomic(vaddr); |
1504 | /* re-use the page, so don't erase | 1499 | /* re-use the page, so don't erase |
1505 | * buffer_info->page */ | 1500 | * buffer_info->page |
1501 | */ | ||
1506 | skb_put(skb, length); | 1502 | skb_put(skb, length); |
1507 | } else { | 1503 | } else { |
1508 | skb_fill_page_desc(skb, 0, | 1504 | skb_fill_page_desc(skb, 0, |
@@ -1656,22 +1652,17 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
1656 | struct e1000_hw *hw = &adapter->hw; | 1652 | struct e1000_hw *hw = &adapter->hw; |
1657 | u32 icr = er32(ICR); | 1653 | u32 icr = er32(ICR); |
1658 | 1654 | ||
1659 | /* | 1655 | /* read ICR disables interrupts using IAM */ |
1660 | * read ICR disables interrupts using IAM | ||
1661 | */ | ||
1662 | |||
1663 | if (icr & E1000_ICR_LSC) { | 1656 | if (icr & E1000_ICR_LSC) { |
1664 | hw->mac.get_link_status = true; | 1657 | hw->mac.get_link_status = true; |
1665 | /* | 1658 | /* ICH8 workaround-- Call gig speed drop workaround on cable |
1666 | * ICH8 workaround-- Call gig speed drop workaround on cable | ||
1667 | * disconnect (LSC) before accessing any PHY registers | 1659 | * disconnect (LSC) before accessing any PHY registers |
1668 | */ | 1660 | */ |
1669 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | 1661 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1670 | (!(er32(STATUS) & E1000_STATUS_LU))) | 1662 | (!(er32(STATUS) & E1000_STATUS_LU))) |
1671 | schedule_work(&adapter->downshift_task); | 1663 | schedule_work(&adapter->downshift_task); |
1672 | 1664 | ||
1673 | /* | 1665 | /* 80003ES2LAN workaround-- For packet buffer work-around on |
1674 | * 80003ES2LAN workaround-- For packet buffer work-around on | ||
1675 | * link down event; disable receives here in the ISR and reset | 1666 | * link down event; disable receives here in the ISR and reset |
1676 | * adapter in watchdog | 1667 | * adapter in watchdog |
1677 | */ | 1668 | */ |
@@ -1713,31 +1704,27 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1713 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) | 1704 | if (!icr || test_bit(__E1000_DOWN, &adapter->state)) |
1714 | return IRQ_NONE; /* Not our interrupt */ | 1705 | return IRQ_NONE; /* Not our interrupt */ |
1715 | 1706 | ||
1716 | /* | 1707 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
1717 | * IMS will not auto-mask if INT_ASSERTED is not set, and if it is | ||
1718 | * not set, then the adapter didn't send an interrupt | 1708 | * not set, then the adapter didn't send an interrupt |
1719 | */ | 1709 | */ |
1720 | if (!(icr & E1000_ICR_INT_ASSERTED)) | 1710 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
1721 | return IRQ_NONE; | 1711 | return IRQ_NONE; |
1722 | 1712 | ||
1723 | /* | 1713 | /* Interrupt Auto-Mask...upon reading ICR, |
1724 | * Interrupt Auto-Mask...upon reading ICR, | ||
1725 | * interrupts are masked. No need for the | 1714 | * interrupts are masked. No need for the |
1726 | * IMC write | 1715 | * IMC write |
1727 | */ | 1716 | */ |
1728 | 1717 | ||
1729 | if (icr & E1000_ICR_LSC) { | 1718 | if (icr & E1000_ICR_LSC) { |
1730 | hw->mac.get_link_status = true; | 1719 | hw->mac.get_link_status = true; |
1731 | /* | 1720 | /* ICH8 workaround-- Call gig speed drop workaround on cable |
1732 | * ICH8 workaround-- Call gig speed drop workaround on cable | ||
1733 | * disconnect (LSC) before accessing any PHY registers | 1721 | * disconnect (LSC) before accessing any PHY registers |
1734 | */ | 1722 | */ |
1735 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | 1723 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1736 | (!(er32(STATUS) & E1000_STATUS_LU))) | 1724 | (!(er32(STATUS) & E1000_STATUS_LU))) |
1737 | schedule_work(&adapter->downshift_task); | 1725 | schedule_work(&adapter->downshift_task); |
1738 | 1726 | ||
1739 | /* | 1727 | /* 80003ES2LAN workaround-- |
1740 | * 80003ES2LAN workaround-- | ||
1741 | * For packet buffer work-around on link down event; | 1728 | * For packet buffer work-around on link down event; |
1742 | * disable receives here in the ISR and | 1729 | * disable receives here in the ISR and |
1743 | * reset adapter in watchdog | 1730 | * reset adapter in watchdog |
@@ -2469,8 +2456,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter) | |||
2469 | 2456 | ||
2470 | set_itr_now: | 2457 | set_itr_now: |
2471 | if (new_itr != adapter->itr) { | 2458 | if (new_itr != adapter->itr) { |
2472 | /* | 2459 | /* this attempts to bias the interrupt rate towards Bulk |
2473 | * this attempts to bias the interrupt rate towards Bulk | ||
2474 | * by adding intermediate steps when interrupt rate is | 2460 | * by adding intermediate steps when interrupt rate is |
2475 | * increasing | 2461 | * increasing |
2476 | */ | 2462 | */ |
@@ -2740,8 +2726,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | |||
2740 | 2726 | ||
2741 | manc = er32(MANC); | 2727 | manc = er32(MANC); |
2742 | 2728 | ||
2743 | /* | 2729 | /* enable receiving management packets to the host. this will probably |
2744 | * enable receiving management packets to the host. this will probably | ||
2745 | * generate destination unreachable messages from the host OS, but | 2730 | * generate destination unreachable messages from the host OS, but |
2746 | * the packets will be handled on SMBUS | 2731 | * the packets will be handled on SMBUS |
2747 | */ | 2732 | */ |
@@ -2754,8 +2739,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | |||
2754 | break; | 2739 | break; |
2755 | case e1000_82574: | 2740 | case e1000_82574: |
2756 | case e1000_82583: | 2741 | case e1000_82583: |
2757 | /* | 2742 | /* Check if IPMI pass-through decision filter already exists; |
2758 | * Check if IPMI pass-through decision filter already exists; | ||
2759 | * if so, enable it. | 2743 | * if so, enable it. |
2760 | */ | 2744 | */ |
2761 | for (i = 0, j = 0; i < 8; i++) { | 2745 | for (i = 0, j = 0; i < 8; i++) { |
@@ -2827,8 +2811,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2827 | u32 txdctl = er32(TXDCTL(0)); | 2811 | u32 txdctl = er32(TXDCTL(0)); |
2828 | txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | | 2812 | txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | |
2829 | E1000_TXDCTL_WTHRESH); | 2813 | E1000_TXDCTL_WTHRESH); |
2830 | /* | 2814 | /* set up some performance related parameters to encourage the |
2831 | * set up some performance related parameters to encourage the | ||
2832 | * hardware to use the bus more efficiently in bursts, depends | 2815 | * hardware to use the bus more efficiently in bursts, depends |
2833 | * on the tx_int_delay to be enabled, | 2816 | * on the tx_int_delay to be enabled, |
2834 | * wthresh = 1 ==> burst write is disabled to avoid Tx stalls | 2817 | * wthresh = 1 ==> burst write is disabled to avoid Tx stalls |
@@ -2845,8 +2828,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2845 | 2828 | ||
2846 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | 2829 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { |
2847 | tarc = er32(TARC(0)); | 2830 | tarc = er32(TARC(0)); |
2848 | /* | 2831 | /* set the speed mode bit, we'll clear it if we're not at |
2849 | * set the speed mode bit, we'll clear it if we're not at | ||
2850 | * gigabit link later | 2832 | * gigabit link later |
2851 | */ | 2833 | */ |
2852 | #define SPEED_MODE_BIT (1 << 21) | 2834 | #define SPEED_MODE_BIT (1 << 21) |
@@ -2967,8 +2949,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2967 | rfctl |= E1000_RFCTL_EXTEN; | 2949 | rfctl |= E1000_RFCTL_EXTEN; |
2968 | ew32(RFCTL, rfctl); | 2950 | ew32(RFCTL, rfctl); |
2969 | 2951 | ||
2970 | /* | 2952 | /* 82571 and greater support packet-split where the protocol |
2971 | * 82571 and greater support packet-split where the protocol | ||
2972 | * header is placed in skb->data and the packet data is | 2953 | * header is placed in skb->data and the packet data is |
2973 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. | 2954 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. |
2974 | * In the case of a non-split, skb->data is linearly filled, | 2955 | * In the case of a non-split, skb->data is linearly filled, |
@@ -3016,7 +2997,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
3016 | /* This is useful for sniffing bad packets. */ | 2997 | /* This is useful for sniffing bad packets. */ |
3017 | if (adapter->netdev->features & NETIF_F_RXALL) { | 2998 | if (adapter->netdev->features & NETIF_F_RXALL) { |
3018 | /* UPE and MPE will be handled by normal PROMISC logic | 2999 | /* UPE and MPE will be handled by normal PROMISC logic |
3019 | * in e1000e_set_rx_mode */ | 3000 | * in e1000e_set_rx_mode |
3001 | */ | ||
3020 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ | 3002 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ |
3021 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ | 3003 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ |
3022 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | 3004 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ |
@@ -3071,8 +3053,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
3071 | usleep_range(10000, 20000); | 3053 | usleep_range(10000, 20000); |
3072 | 3054 | ||
3073 | if (adapter->flags2 & FLAG2_DMA_BURST) { | 3055 | if (adapter->flags2 & FLAG2_DMA_BURST) { |
3074 | /* | 3056 | /* set the writeback threshold (only takes effect if the RDTR |
3075 | * set the writeback threshold (only takes effect if the RDTR | ||
3076 | * is set). set GRAN=1 and write back up to 0x4 worth, and | 3057 | * is set). set GRAN=1 and write back up to 0x4 worth, and |
3077 | * enable prefetching of 0x20 Rx descriptors | 3058 | * enable prefetching of 0x20 Rx descriptors |
3078 | * granularity = 01 | 3059 | * granularity = 01 |
@@ -3083,8 +3064,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
3083 | ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); | 3064 | ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); |
3084 | ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); | 3065 | ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); |
3085 | 3066 | ||
3086 | /* | 3067 | /* override the delay timers for enabling bursting, only if |
3087 | * override the delay timers for enabling bursting, only if | ||
3088 | * the value was not set by the user via module options | 3068 | * the value was not set by the user via module options |
3089 | */ | 3069 | */ |
3090 | if (adapter->rx_int_delay == DEFAULT_RDTR) | 3070 | if (adapter->rx_int_delay == DEFAULT_RDTR) |
@@ -3108,8 +3088,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
3108 | ew32(CTRL_EXT, ctrl_ext); | 3088 | ew32(CTRL_EXT, ctrl_ext); |
3109 | e1e_flush(); | 3089 | e1e_flush(); |
3110 | 3090 | ||
3111 | /* | 3091 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
3112 | * Setup the HW Rx Head and Tail Descriptor Pointers and | ||
3113 | * the Base and Length of the Rx Descriptor Ring | 3092 | * the Base and Length of the Rx Descriptor Ring |
3114 | */ | 3093 | */ |
3115 | rdba = rx_ring->dma; | 3094 | rdba = rx_ring->dma; |
@@ -3130,8 +3109,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
3130 | ew32(RXCSUM, rxcsum); | 3109 | ew32(RXCSUM, rxcsum); |
3131 | 3110 | ||
3132 | if (adapter->hw.mac.type == e1000_pch2lan) { | 3111 | if (adapter->hw.mac.type == e1000_pch2lan) { |
3133 | /* | 3112 | /* With jumbo frames, excessive C-state transition |
3134 | * With jumbo frames, excessive C-state transition | ||
3135 | * latencies result in dropped transactions. | 3113 | * latencies result in dropped transactions. |
3136 | */ | 3114 | */ |
3137 | if (adapter->netdev->mtu > ETH_DATA_LEN) { | 3115 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
@@ -3216,8 +3194,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) | |||
3216 | if (!netdev_uc_empty(netdev) && rar_entries) { | 3194 | if (!netdev_uc_empty(netdev) && rar_entries) { |
3217 | struct netdev_hw_addr *ha; | 3195 | struct netdev_hw_addr *ha; |
3218 | 3196 | ||
3219 | /* | 3197 | /* write the addresses in reverse order to avoid write |
3220 | * write the addresses in reverse order to avoid write | ||
3221 | * combining | 3198 | * combining |
3222 | */ | 3199 | */ |
3223 | netdev_for_each_uc_addr(ha, netdev) { | 3200 | netdev_for_each_uc_addr(ha, netdev) { |
@@ -3269,8 +3246,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) | |||
3269 | if (netdev->flags & IFF_ALLMULTI) { | 3246 | if (netdev->flags & IFF_ALLMULTI) { |
3270 | rctl |= E1000_RCTL_MPE; | 3247 | rctl |= E1000_RCTL_MPE; |
3271 | } else { | 3248 | } else { |
3272 | /* | 3249 | /* Write addresses to the MTA, if the attempt fails |
3273 | * Write addresses to the MTA, if the attempt fails | ||
3274 | * then we should just turn on promiscuous mode so | 3250 | * then we should just turn on promiscuous mode so |
3275 | * that we can at least receive multicast traffic | 3251 | * that we can at least receive multicast traffic |
3276 | */ | 3252 | */ |
@@ -3279,8 +3255,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) | |||
3279 | rctl |= E1000_RCTL_MPE; | 3255 | rctl |= E1000_RCTL_MPE; |
3280 | } | 3256 | } |
3281 | e1000e_vlan_filter_enable(adapter); | 3257 | e1000e_vlan_filter_enable(adapter); |
3282 | /* | 3258 | /* Write addresses to available RAR registers, if there is not |
3283 | * Write addresses to available RAR registers, if there is not | ||
3284 | * sufficient space to store all the addresses then enable | 3259 | * sufficient space to store all the addresses then enable |
3285 | * unicast promiscuous mode | 3260 | * unicast promiscuous mode |
3286 | */ | 3261 | */ |
@@ -3315,8 +3290,7 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) | |||
3315 | for (i = 0; i < 32; i++) | 3290 | for (i = 0; i < 32; i++) |
3316 | ew32(RETA(i), 0); | 3291 | ew32(RETA(i), 0); |
3317 | 3292 | ||
3318 | /* | 3293 | /* Disable raw packet checksumming so that RSS hash is placed in |
3319 | * Disable raw packet checksumming so that RSS hash is placed in | ||
3320 | * descriptor on writeback. | 3294 | * descriptor on writeback. |
3321 | */ | 3295 | */ |
3322 | rxcsum = er32(RXCSUM); | 3296 | rxcsum = er32(RXCSUM); |
@@ -3408,8 +3382,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3408 | ew32(PBA, pba); | 3382 | ew32(PBA, pba); |
3409 | 3383 | ||
3410 | if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { | 3384 | if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { |
3411 | /* | 3385 | /* To maintain wire speed transmits, the Tx FIFO should be |
3412 | * To maintain wire speed transmits, the Tx FIFO should be | ||
3413 | * large enough to accommodate two full transmit packets, | 3386 | * large enough to accommodate two full transmit packets, |
3414 | * rounded up to the next 1KB and expressed in KB. Likewise, | 3387 | * rounded up to the next 1KB and expressed in KB. Likewise, |
3415 | * the Rx FIFO should be large enough to accommodate at least | 3388 | * the Rx FIFO should be large enough to accommodate at least |
@@ -3421,8 +3394,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3421 | tx_space = pba >> 16; | 3394 | tx_space = pba >> 16; |
3422 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 3395 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
3423 | pba &= 0xffff; | 3396 | pba &= 0xffff; |
3424 | /* | 3397 | /* the Tx fifo also stores 16 bytes of information about the Tx |
3425 | * the Tx fifo also stores 16 bytes of information about the Tx | ||
3426 | * but don't include ethernet FCS because hardware appends it | 3398 | * but don't include ethernet FCS because hardware appends it |
3427 | */ | 3399 | */ |
3428 | min_tx_space = (adapter->max_frame_size + | 3400 | min_tx_space = (adapter->max_frame_size + |
@@ -3435,8 +3407,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3435 | min_rx_space = ALIGN(min_rx_space, 1024); | 3407 | min_rx_space = ALIGN(min_rx_space, 1024); |
3436 | min_rx_space >>= 10; | 3408 | min_rx_space >>= 10; |
3437 | 3409 | ||
3438 | /* | 3410 | /* If current Tx allocation is less than the min Tx FIFO size, |
3439 | * If current Tx allocation is less than the min Tx FIFO size, | ||
3440 | * and the min Tx FIFO size is less than the current Rx FIFO | 3411 | * and the min Tx FIFO size is less than the current Rx FIFO |
3441 | * allocation, take space away from current Rx allocation | 3412 | * allocation, take space away from current Rx allocation |
3442 | */ | 3413 | */ |
@@ -3444,8 +3415,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3444 | ((min_tx_space - tx_space) < pba)) { | 3415 | ((min_tx_space - tx_space) < pba)) { |
3445 | pba -= min_tx_space - tx_space; | 3416 | pba -= min_tx_space - tx_space; |
3446 | 3417 | ||
3447 | /* | 3418 | /* if short on Rx space, Rx wins and must trump Tx |
3448 | * if short on Rx space, Rx wins and must trump Tx | ||
3449 | * adjustment | 3419 | * adjustment |
3450 | */ | 3420 | */ |
3451 | if (pba < min_rx_space) | 3421 | if (pba < min_rx_space) |
@@ -3455,8 +3425,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3455 | ew32(PBA, pba); | 3425 | ew32(PBA, pba); |
3456 | } | 3426 | } |
3457 | 3427 | ||
3458 | /* | 3428 | /* flow control settings |
3459 | * flow control settings | ||
3460 | * | 3429 | * |
3461 | * The high water mark must be low enough to fit one full frame | 3430 | * The high water mark must be low enough to fit one full frame |
3462 | * (or the size used for early receive) above it in the Rx FIFO. | 3431 | * (or the size used for early receive) above it in the Rx FIFO. |
@@ -3490,8 +3459,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3490 | fc->low_water = fc->high_water - 8; | 3459 | fc->low_water = fc->high_water - 8; |
3491 | break; | 3460 | break; |
3492 | case e1000_pchlan: | 3461 | case e1000_pchlan: |
3493 | /* | 3462 | /* Workaround PCH LOM adapter hangs with certain network |
3494 | * Workaround PCH LOM adapter hangs with certain network | ||
3495 | * loads. If hangs persist, try disabling Tx flow control. | 3463 | * loads. If hangs persist, try disabling Tx flow control. |
3496 | */ | 3464 | */ |
3497 | if (adapter->netdev->mtu > ETH_DATA_LEN) { | 3465 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
@@ -3516,8 +3484,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3516 | break; | 3484 | break; |
3517 | } | 3485 | } |
3518 | 3486 | ||
3519 | /* | 3487 | /* Alignment of Tx data is on an arbitrary byte boundary with the |
3520 | * Alignment of Tx data is on an arbitrary byte boundary with the | ||
3521 | * maximum size per Tx descriptor limited only to the transmit | 3488 | * maximum size per Tx descriptor limited only to the transmit |
3522 | * allocation of the packet buffer minus 96 bytes with an upper | 3489 | * allocation of the packet buffer minus 96 bytes with an upper |
3523 | * limit of 24KB due to receive synchronization limitations. | 3490 | * limit of 24KB due to receive synchronization limitations. |
@@ -3525,8 +3492,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3525 | adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, | 3492 | adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, |
3526 | 24 << 10); | 3493 | 24 << 10); |
3527 | 3494 | ||
3528 | /* | 3495 | /* Disable Adaptive Interrupt Moderation if 2 full packets cannot |
3529 | * Disable Adaptive Interrupt Moderation if 2 full packets cannot | ||
3530 | * fit in receive buffer. | 3496 | * fit in receive buffer. |
3531 | */ | 3497 | */ |
3532 | if (adapter->itr_setting & 0x3) { | 3498 | if (adapter->itr_setting & 0x3) { |
@@ -3549,8 +3515,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3549 | /* Allow time for pending master requests to run */ | 3515 | /* Allow time for pending master requests to run */ |
3550 | mac->ops.reset_hw(hw); | 3516 | mac->ops.reset_hw(hw); |
3551 | 3517 | ||
3552 | /* | 3518 | /* For parts with AMT enabled, let the firmware know |
3553 | * For parts with AMT enabled, let the firmware know | ||
3554 | * that the network interface is in control | 3519 | * that the network interface is in control |
3555 | */ | 3520 | */ |
3556 | if (adapter->flags & FLAG_HAS_AMT) | 3521 | if (adapter->flags & FLAG_HAS_AMT) |
@@ -3579,8 +3544,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3579 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && | 3544 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && |
3580 | !(adapter->flags & FLAG_SMART_POWER_DOWN)) { | 3545 | !(adapter->flags & FLAG_SMART_POWER_DOWN)) { |
3581 | u16 phy_data = 0; | 3546 | u16 phy_data = 0; |
3582 | /* | 3547 | /* speed up time to link by disabling smart power down, ignore |
3583 | * speed up time to link by disabling smart power down, ignore | ||
3584 | * the return value of this function because there is nothing | 3548 | * the return value of this function because there is nothing |
3585 | * different we would do if it failed | 3549 | * different we would do if it failed |
3586 | */ | 3550 | */ |
@@ -3628,8 +3592,7 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | |||
3628 | /* execute the writes immediately */ | 3592 | /* execute the writes immediately */ |
3629 | e1e_flush(); | 3593 | e1e_flush(); |
3630 | 3594 | ||
3631 | /* | 3595 | /* due to rare timing issues, write to TIDV/RDTR again to ensure the |
3632 | * due to rare timing issues, write to TIDV/RDTR again to ensure the | ||
3633 | * write is successful | 3596 | * write is successful |
3634 | */ | 3597 | */ |
3635 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | 3598 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); |
@@ -3647,8 +3610,7 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3647 | struct e1000_hw *hw = &adapter->hw; | 3610 | struct e1000_hw *hw = &adapter->hw; |
3648 | u32 tctl, rctl; | 3611 | u32 tctl, rctl; |
3649 | 3612 | ||
3650 | /* | 3613 | /* signal that we're down so the interrupt handler does not |
3651 | * signal that we're down so the interrupt handler does not | ||
3652 | * reschedule our watchdog timer | 3614 | * reschedule our watchdog timer |
3653 | */ | 3615 | */ |
3654 | set_bit(__E1000_DOWN, &adapter->state); | 3616 | set_bit(__E1000_DOWN, &adapter->state); |
@@ -3691,8 +3653,7 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3691 | if (!pci_channel_offline(adapter->pdev)) | 3653 | if (!pci_channel_offline(adapter->pdev)) |
3692 | e1000e_reset(adapter); | 3654 | e1000e_reset(adapter); |
3693 | 3655 | ||
3694 | /* | 3656 | /* TODO: for power management, we could drop the link and |
3695 | * TODO: for power management, we could drop the link and | ||
3696 | * pci_disable_device here. | 3657 | * pci_disable_device here. |
3697 | */ | 3658 | */ |
3698 | } | 3659 | } |
@@ -3755,8 +3716,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) | |||
3755 | e_dbg("icr is %08X\n", icr); | 3716 | e_dbg("icr is %08X\n", icr); |
3756 | if (icr & E1000_ICR_RXSEQ) { | 3717 | if (icr & E1000_ICR_RXSEQ) { |
3757 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; | 3718 | adapter->flags &= ~FLAG_MSI_TEST_FAILED; |
3758 | /* | 3719 | /* Force memory writes to complete before acknowledging the |
3759 | * Force memory writes to complete before acknowledging the | ||
3760 | * interrupt is handled. | 3720 | * interrupt is handled. |
3761 | */ | 3721 | */ |
3762 | wmb(); | 3722 | wmb(); |
@@ -3786,7 +3746,8 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3786 | e1000e_reset_interrupt_capability(adapter); | 3746 | e1000e_reset_interrupt_capability(adapter); |
3787 | 3747 | ||
3788 | /* Assume that the test fails, if it succeeds then the test | 3748 | /* Assume that the test fails, if it succeeds then the test |
3789 | * MSI irq handler will unset this flag */ | 3749 | * MSI irq handler will unset this flag |
3750 | */ | ||
3790 | adapter->flags |= FLAG_MSI_TEST_FAILED; | 3751 | adapter->flags |= FLAG_MSI_TEST_FAILED; |
3791 | 3752 | ||
3792 | err = pci_enable_msi(adapter->pdev); | 3753 | err = pci_enable_msi(adapter->pdev); |
@@ -3800,8 +3761,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) | |||
3800 | goto msi_test_failed; | 3761 | goto msi_test_failed; |
3801 | } | 3762 | } |
3802 | 3763 | ||
3803 | /* | 3764 | /* Force memory writes to complete before enabling and firing an |
3804 | * Force memory writes to complete before enabling and firing an | ||
3805 | * interrupt. | 3765 | * interrupt. |
3806 | */ | 3766 | */ |
3807 | wmb(); | 3767 | wmb(); |
@@ -3901,8 +3861,7 @@ static int e1000_open(struct net_device *netdev) | |||
3901 | if (err) | 3861 | if (err) |
3902 | goto err_setup_rx; | 3862 | goto err_setup_rx; |
3903 | 3863 | ||
3904 | /* | 3864 | /* If AMT is enabled, let the firmware know that the network |
3905 | * If AMT is enabled, let the firmware know that the network | ||
3906 | * interface is now open and reset the part to a known state. | 3865 | * interface is now open and reset the part to a known state. |
3907 | */ | 3866 | */ |
3908 | if (adapter->flags & FLAG_HAS_AMT) { | 3867 | if (adapter->flags & FLAG_HAS_AMT) { |
@@ -3923,8 +3882,7 @@ static int e1000_open(struct net_device *netdev) | |||
3923 | PM_QOS_CPU_DMA_LATENCY, | 3882 | PM_QOS_CPU_DMA_LATENCY, |
3924 | PM_QOS_DEFAULT_VALUE); | 3883 | PM_QOS_DEFAULT_VALUE); |
3925 | 3884 | ||
3926 | /* | 3885 | /* before we allocate an interrupt, we must be ready to handle it. |
3927 | * before we allocate an interrupt, we must be ready to handle it. | ||
3928 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 3886 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
3929 | * as soon as we call pci_request_irq, so we have to setup our | 3887 | * as soon as we call pci_request_irq, so we have to setup our |
3930 | * clean_rx handler before we do so. | 3888 | * clean_rx handler before we do so. |
@@ -3935,8 +3893,7 @@ static int e1000_open(struct net_device *netdev) | |||
3935 | if (err) | 3893 | if (err) |
3936 | goto err_req_irq; | 3894 | goto err_req_irq; |
3937 | 3895 | ||
3938 | /* | 3896 | /* Work around PCIe errata with MSI interrupts causing some chipsets to |
3939 | * Work around PCIe errata with MSI interrupts causing some chipsets to | ||
3940 | * ignore e1000e MSI messages, which means we need to test our MSI | 3897 | * ignore e1000e MSI messages, which means we need to test our MSI |
3941 | * interrupt now | 3898 | * interrupt now |
3942 | */ | 3899 | */ |
@@ -4017,16 +3974,14 @@ static int e1000_close(struct net_device *netdev) | |||
4017 | e1000e_free_tx_resources(adapter->tx_ring); | 3974 | e1000e_free_tx_resources(adapter->tx_ring); |
4018 | e1000e_free_rx_resources(adapter->rx_ring); | 3975 | e1000e_free_rx_resources(adapter->rx_ring); |
4019 | 3976 | ||
4020 | /* | 3977 | /* kill manageability vlan ID if supported, but not if a vlan with |
4021 | * kill manageability vlan ID if supported, but not if a vlan with | ||
4022 | * the same ID is registered on the host OS (let 8021q kill it) | 3978 | * the same ID is registered on the host OS (let 8021q kill it) |
4023 | */ | 3979 | */ |
4024 | if (adapter->hw.mng_cookie.status & | 3980 | if (adapter->hw.mng_cookie.status & |
4025 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) | 3981 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) |
4026 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 3982 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
4027 | 3983 | ||
4028 | /* | 3984 | /* If AMT is enabled, let the firmware know that the network |
4029 | * If AMT is enabled, let the firmware know that the network | ||
4030 | * interface is now closed | 3985 | * interface is now closed |
4031 | */ | 3986 | */ |
4032 | if ((adapter->flags & FLAG_HAS_AMT) && | 3987 | if ((adapter->flags & FLAG_HAS_AMT) && |
@@ -4065,8 +4020,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
4065 | /* activate the work around */ | 4020 | /* activate the work around */ |
4066 | e1000e_set_laa_state_82571(&adapter->hw, 1); | 4021 | e1000e_set_laa_state_82571(&adapter->hw, 1); |
4067 | 4022 | ||
4068 | /* | 4023 | /* Hold a copy of the LAA in RAR[14] This is done so that |
4069 | * Hold a copy of the LAA in RAR[14] This is done so that | ||
4070 | * between the time RAR[0] gets clobbered and the time it | 4024 | * between the time RAR[0] gets clobbered and the time it |
4071 | * gets fixed (in e1000_watchdog), the actual LAA is in one | 4025 | * gets fixed (in e1000_watchdog), the actual LAA is in one |
4072 | * of the RARs and no incoming packets directed to this port | 4026 | * of the RARs and no incoming packets directed to this port |
@@ -4099,10 +4053,13 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
4099 | e1000_get_phy_info(&adapter->hw); | 4053 | e1000_get_phy_info(&adapter->hw); |
4100 | } | 4054 | } |
4101 | 4055 | ||
4102 | /* | 4056 | /** |
4057 | * e1000_update_phy_info - timre call-back to update PHY info | ||
4058 | * @data: pointer to adapter cast into an unsigned long | ||
4059 | * | ||
4103 | * Need to wait a few seconds after link up to get diagnostic information from | 4060 | * Need to wait a few seconds after link up to get diagnostic information from |
4104 | * the phy | 4061 | * the phy |
4105 | */ | 4062 | **/ |
4106 | static void e1000_update_phy_info(unsigned long data) | 4063 | static void e1000_update_phy_info(unsigned long data) |
4107 | { | 4064 | { |
4108 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 4065 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
@@ -4129,8 +4086,7 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter) | |||
4129 | if (ret_val) | 4086 | if (ret_val) |
4130 | return; | 4087 | return; |
4131 | 4088 | ||
4132 | /* | 4089 | /* A page set is expensive so check if already on desired page. |
4133 | * A page set is expensive so check if already on desired page. | ||
4134 | * If not, set to the page with the PHY status registers. | 4090 | * If not, set to the page with the PHY status registers. |
4135 | */ | 4091 | */ |
4136 | hw->phy.addr = 1; | 4092 | hw->phy.addr = 1; |
@@ -4201,8 +4157,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) | |||
4201 | struct e1000_hw *hw = &adapter->hw; | 4157 | struct e1000_hw *hw = &adapter->hw; |
4202 | struct pci_dev *pdev = adapter->pdev; | 4158 | struct pci_dev *pdev = adapter->pdev; |
4203 | 4159 | ||
4204 | /* | 4160 | /* Prevent stats update while adapter is being reset, or if the pci |
4205 | * Prevent stats update while adapter is being reset, or if the pci | ||
4206 | * connection is down. | 4161 | * connection is down. |
4207 | */ | 4162 | */ |
4208 | if (adapter->link_speed == 0) | 4163 | if (adapter->link_speed == 0) |
@@ -4270,8 +4225,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) | |||
4270 | 4225 | ||
4271 | /* Rx Errors */ | 4226 | /* Rx Errors */ |
4272 | 4227 | ||
4273 | /* | 4228 | /* RLEC on some newer hardware can be incorrect so build |
4274 | * RLEC on some newer hardware can be incorrect so build | ||
4275 | * our own version based on RUC and ROC | 4229 | * our own version based on RUC and ROC |
4276 | */ | 4230 | */ |
4277 | netdev->stats.rx_errors = adapter->stats.rxerrc + | 4231 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
@@ -4323,8 +4277,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
4323 | if (ret_val) | 4277 | if (ret_val) |
4324 | e_warn("Error reading PHY register\n"); | 4278 | e_warn("Error reading PHY register\n"); |
4325 | } else { | 4279 | } else { |
4326 | /* | 4280 | /* Do not read PHY registers if link is not up |
4327 | * Do not read PHY registers if link is not up | ||
4328 | * Set values to typical power-on defaults | 4281 | * Set values to typical power-on defaults |
4329 | */ | 4282 | */ |
4330 | phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); | 4283 | phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); |
@@ -4362,8 +4315,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) | |||
4362 | bool link_active = false; | 4315 | bool link_active = false; |
4363 | s32 ret_val = 0; | 4316 | s32 ret_val = 0; |
4364 | 4317 | ||
4365 | /* | 4318 | /* get_link_status is set on LSC (link status) interrupt or |
4366 | * get_link_status is set on LSC (link status) interrupt or | ||
4367 | * Rx sequence error interrupt. get_link_status will stay | 4319 | * Rx sequence error interrupt. get_link_status will stay |
4368 | * false until the check_for_link establishes link | 4320 | * false until the check_for_link establishes link |
4369 | * for copper adapters ONLY | 4321 | * for copper adapters ONLY |
@@ -4415,8 +4367,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) | |||
4415 | { | 4367 | { |
4416 | struct e1000_hw *hw = &adapter->hw; | 4368 | struct e1000_hw *hw = &adapter->hw; |
4417 | 4369 | ||
4418 | /* | 4370 | /* With 82574 controllers, PHY needs to be checked periodically |
4419 | * With 82574 controllers, PHY needs to be checked periodically | ||
4420 | * for hung state and reset, if two calls return true | 4371 | * for hung state and reset, if two calls return true |
4421 | */ | 4372 | */ |
4422 | if (e1000_check_phy_82574(hw)) | 4373 | if (e1000_check_phy_82574(hw)) |
@@ -4484,8 +4435,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4484 | &adapter->link_speed, | 4435 | &adapter->link_speed, |
4485 | &adapter->link_duplex); | 4436 | &adapter->link_duplex); |
4486 | e1000_print_link_info(adapter); | 4437 | e1000_print_link_info(adapter); |
4487 | /* | 4438 | /* On supported PHYs, check for duplex mismatch only |
4488 | * On supported PHYs, check for duplex mismatch only | ||
4489 | * if link has autonegotiated at 10/100 half | 4439 | * if link has autonegotiated at 10/100 half |
4490 | */ | 4440 | */ |
4491 | if ((hw->phy.type == e1000_phy_igp_3 || | 4441 | if ((hw->phy.type == e1000_phy_igp_3 || |
@@ -4515,8 +4465,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4515 | break; | 4465 | break; |
4516 | } | 4466 | } |
4517 | 4467 | ||
4518 | /* | 4468 | /* workaround: re-program speed mode bit after |
4519 | * workaround: re-program speed mode bit after | ||
4520 | * link-up event | 4469 | * link-up event |
4521 | */ | 4470 | */ |
4522 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && | 4471 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && |
@@ -4527,8 +4476,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4527 | ew32(TARC(0), tarc0); | 4476 | ew32(TARC(0), tarc0); |
4528 | } | 4477 | } |
4529 | 4478 | ||
4530 | /* | 4479 | /* disable TSO for pcie and 10/100 speeds, to avoid |
4531 | * disable TSO for pcie and 10/100 speeds, to avoid | ||
4532 | * some hardware issues | 4480 | * some hardware issues |
4533 | */ | 4481 | */ |
4534 | if (!(adapter->flags & FLAG_TSO_FORCE)) { | 4482 | if (!(adapter->flags & FLAG_TSO_FORCE)) { |
@@ -4549,16 +4497,14 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4549 | } | 4497 | } |
4550 | } | 4498 | } |
4551 | 4499 | ||
4552 | /* | 4500 | /* enable transmits in the hardware, need to do this |
4553 | * enable transmits in the hardware, need to do this | ||
4554 | * after setting TARC(0) | 4501 | * after setting TARC(0) |
4555 | */ | 4502 | */ |
4556 | tctl = er32(TCTL); | 4503 | tctl = er32(TCTL); |
4557 | tctl |= E1000_TCTL_EN; | 4504 | tctl |= E1000_TCTL_EN; |
4558 | ew32(TCTL, tctl); | 4505 | ew32(TCTL, tctl); |
4559 | 4506 | ||
4560 | /* | 4507 | /* Perform any post-link-up configuration before |
4561 | * Perform any post-link-up configuration before | ||
4562 | * reporting link up. | 4508 | * reporting link up. |
4563 | */ | 4509 | */ |
4564 | if (phy->ops.cfg_on_link_up) | 4510 | if (phy->ops.cfg_on_link_up) |
@@ -4609,8 +4555,7 @@ link_up: | |||
4609 | 4555 | ||
4610 | if (!netif_carrier_ok(netdev) && | 4556 | if (!netif_carrier_ok(netdev) && |
4611 | (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { | 4557 | (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { |
4612 | /* | 4558 | /* We've lost link, so the controller stops DMA, |
4613 | * We've lost link, so the controller stops DMA, | ||
4614 | * but we've got queued Tx work that's never going | 4559 | * but we've got queued Tx work that's never going |
4615 | * to get done, so reset controller to flush Tx. | 4560 | * to get done, so reset controller to flush Tx. |
4616 | * (Do the reset outside of interrupt context). | 4561 | * (Do the reset outside of interrupt context). |
@@ -4622,8 +4567,7 @@ link_up: | |||
4622 | 4567 | ||
4623 | /* Simple mode for Interrupt Throttle Rate (ITR) */ | 4568 | /* Simple mode for Interrupt Throttle Rate (ITR) */ |
4624 | if (adapter->itr_setting == 4) { | 4569 | if (adapter->itr_setting == 4) { |
4625 | /* | 4570 | /* Symmetric Tx/Rx gets a reduced ITR=2000; |
4626 | * Symmetric Tx/Rx gets a reduced ITR=2000; | ||
4627 | * Total asymmetrical Tx or Rx gets ITR=8000; | 4571 | * Total asymmetrical Tx or Rx gets ITR=8000; |
4628 | * everyone else is between 2000-8000. | 4572 | * everyone else is between 2000-8000. |
4629 | */ | 4573 | */ |
@@ -4648,8 +4592,7 @@ link_up: | |||
4648 | /* Force detection of hung controller every watchdog period */ | 4592 | /* Force detection of hung controller every watchdog period */ |
4649 | adapter->detect_tx_hung = true; | 4593 | adapter->detect_tx_hung = true; |
4650 | 4594 | ||
4651 | /* | 4595 | /* With 82571 controllers, LAA may be overwritten due to controller |
4652 | * With 82571 controllers, LAA may be overwritten due to controller | ||
4653 | * reset from the other port. Set the appropriate LAA in RAR[0] | 4596 | * reset from the other port. Set the appropriate LAA in RAR[0] |
4654 | */ | 4597 | */ |
4655 | if (e1000e_get_laa_state_82571(hw)) | 4598 | if (e1000e_get_laa_state_82571(hw)) |
@@ -4948,8 +4891,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) | |||
4948 | if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) | 4891 | if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) |
4949 | tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); | 4892 | tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); |
4950 | 4893 | ||
4951 | /* | 4894 | /* Force memory writes to complete before letting h/w |
4952 | * Force memory writes to complete before letting h/w | ||
4953 | * know there are new descriptors to fetch. (Only | 4895 | * know there are new descriptors to fetch. (Only |
4954 | * applicable for weak-ordered memory model archs, | 4896 | * applicable for weak-ordered memory model archs, |
4955 | * such as IA-64). | 4897 | * such as IA-64). |
@@ -4963,8 +4905,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) | |||
4963 | else | 4905 | else |
4964 | writel(i, tx_ring->tail); | 4906 | writel(i, tx_ring->tail); |
4965 | 4907 | ||
4966 | /* | 4908 | /* we need this if more than one processor can write to our tail |
4967 | * we need this if more than one processor can write to our tail | ||
4968 | * at a time, it synchronizes IO on IA64/Altix systems | 4909 | * at a time, it synchronizes IO on IA64/Altix systems |
4969 | */ | 4910 | */ |
4970 | mmiowb(); | 4911 | mmiowb(); |
@@ -5014,15 +4955,13 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | |||
5014 | struct e1000_adapter *adapter = tx_ring->adapter; | 4955 | struct e1000_adapter *adapter = tx_ring->adapter; |
5015 | 4956 | ||
5016 | netif_stop_queue(adapter->netdev); | 4957 | netif_stop_queue(adapter->netdev); |
5017 | /* | 4958 | /* Herbert's original patch had: |
5018 | * Herbert's original patch had: | ||
5019 | * smp_mb__after_netif_stop_queue(); | 4959 | * smp_mb__after_netif_stop_queue(); |
5020 | * but since that doesn't exist yet, just open code it. | 4960 | * but since that doesn't exist yet, just open code it. |
5021 | */ | 4961 | */ |
5022 | smp_mb(); | 4962 | smp_mb(); |
5023 | 4963 | ||
5024 | /* | 4964 | /* We need to check again in a case another CPU has just |
5025 | * We need to check again in a case another CPU has just | ||
5026 | * made room available. | 4965 | * made room available. |
5027 | */ | 4966 | */ |
5028 | if (e1000_desc_unused(tx_ring) < size) | 4967 | if (e1000_desc_unused(tx_ring) < size) |
@@ -5067,8 +5006,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5067 | return NETDEV_TX_OK; | 5006 | return NETDEV_TX_OK; |
5068 | } | 5007 | } |
5069 | 5008 | ||
5070 | /* | 5009 | /* The minimum packet size with TCTL.PSP set is 17 bytes so |
5071 | * The minimum packet size with TCTL.PSP set is 17 bytes so | ||
5072 | * pad skb in order to meet this minimum size requirement | 5010 | * pad skb in order to meet this minimum size requirement |
5073 | */ | 5011 | */ |
5074 | if (unlikely(skb->len < 17)) { | 5012 | if (unlikely(skb->len < 17)) { |
@@ -5082,14 +5020,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5082 | if (mss) { | 5020 | if (mss) { |
5083 | u8 hdr_len; | 5021 | u8 hdr_len; |
5084 | 5022 | ||
5085 | /* | 5023 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
5086 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data | ||
5087 | * points to just header, pull a few bytes of payload from | 5024 | * points to just header, pull a few bytes of payload from |
5088 | * frags into skb->data | 5025 | * frags into skb->data |
5089 | */ | 5026 | */ |
5090 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 5027 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
5091 | /* | 5028 | /* we do this workaround for ES2LAN, but it is un-necessary, |
5092 | * we do this workaround for ES2LAN, but it is un-necessary, | ||
5093 | * avoiding it could save a lot of cycles | 5029 | * avoiding it could save a lot of cycles |
5094 | */ | 5030 | */ |
5095 | if (skb->data_len && (hdr_len == len)) { | 5031 | if (skb->data_len && (hdr_len == len)) { |
@@ -5120,8 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5120 | if (adapter->hw.mac.tx_pkt_filtering) | 5056 | if (adapter->hw.mac.tx_pkt_filtering) |
5121 | e1000_transfer_dhcp_info(adapter, skb); | 5057 | e1000_transfer_dhcp_info(adapter, skb); |
5122 | 5058 | ||
5123 | /* | 5059 | /* need: count + 2 desc gap to keep tail from touching |
5124 | * need: count + 2 desc gap to keep tail from touching | ||
5125 | * head, otherwise try next time | 5060 | * head, otherwise try next time |
5126 | */ | 5061 | */ |
5127 | if (e1000_maybe_stop_tx(tx_ring, count + 2)) | 5062 | if (e1000_maybe_stop_tx(tx_ring, count + 2)) |
@@ -5145,8 +5080,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5145 | else if (e1000_tx_csum(tx_ring, skb)) | 5080 | else if (e1000_tx_csum(tx_ring, skb)) |
5146 | tx_flags |= E1000_TX_FLAGS_CSUM; | 5081 | tx_flags |= E1000_TX_FLAGS_CSUM; |
5147 | 5082 | ||
5148 | /* | 5083 | /* Old method was to assume IPv4 packet by default if TSO was enabled. |
5149 | * Old method was to assume IPv4 packet by default if TSO was enabled. | ||
5150 | * 82571 hardware supports TSO capabilities for IPv6 as well... | 5084 | * 82571 hardware supports TSO capabilities for IPv6 as well... |
5151 | * no longer assume, we must. | 5085 | * no longer assume, we must. |
5152 | */ | 5086 | */ |
@@ -5233,8 +5167,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | |||
5233 | 5167 | ||
5234 | /* Rx Errors */ | 5168 | /* Rx Errors */ |
5235 | 5169 | ||
5236 | /* | 5170 | /* RLEC on some newer hardware can be incorrect so build |
5237 | * RLEC on some newer hardware can be incorrect so build | ||
5238 | * our own version based on RUC and ROC | 5171 | * our own version based on RUC and ROC |
5239 | */ | 5172 | */ |
5240 | stats->rx_errors = adapter->stats.rxerrc + | 5173 | stats->rx_errors = adapter->stats.rxerrc + |
@@ -5303,8 +5236,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
5303 | if (netif_running(netdev)) | 5236 | if (netif_running(netdev)) |
5304 | e1000e_down(adapter); | 5237 | e1000e_down(adapter); |
5305 | 5238 | ||
5306 | /* | 5239 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
5307 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
5308 | * means we reserve 2 more, this pushes us to allocate from the next | 5240 | * means we reserve 2 more, this pushes us to allocate from the next |
5309 | * larger slab size. | 5241 | * larger slab size. |
5310 | * i.e. RXBUFFER_2048 --> size-4096 slab | 5242 | * i.e. RXBUFFER_2048 --> size-4096 slab |
@@ -5566,8 +5498,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
5566 | if (adapter->hw.phy.type == e1000_phy_igp_3) | 5498 | if (adapter->hw.phy.type == e1000_phy_igp_3) |
5567 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | 5499 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
5568 | 5500 | ||
5569 | /* | 5501 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
5570 | * Release control of h/w to f/w. If f/w is AMT enabled, this | ||
5571 | * would have already happened in close and is redundant. | 5502 | * would have already happened in close and is redundant. |
5572 | */ | 5503 | */ |
5573 | e1000e_release_hw_control(adapter); | 5504 | e1000e_release_hw_control(adapter); |
@@ -5594,8 +5525,7 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | |||
5594 | struct net_device *netdev = pci_get_drvdata(pdev); | 5525 | struct net_device *netdev = pci_get_drvdata(pdev); |
5595 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5526 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5596 | 5527 | ||
5597 | /* | 5528 | /* The pci-e switch on some quad port adapters will report a |
5598 | * The pci-e switch on some quad port adapters will report a | ||
5599 | * correctable error when the MAC transitions from D0 to D3. To | 5529 | * correctable error when the MAC transitions from D0 to D3. To |
5600 | * prevent this we need to mask off the correctable errors on the | 5530 | * prevent this we need to mask off the correctable errors on the |
5601 | * downstream port of the pci-e switch. | 5531 | * downstream port of the pci-e switch. |
@@ -5624,8 +5554,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |||
5624 | #else | 5554 | #else |
5625 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | 5555 | static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) |
5626 | { | 5556 | { |
5627 | /* | 5557 | /* Both device and parent should have the same ASPM setting. |
5628 | * Both device and parent should have the same ASPM setting. | ||
5629 | * Disable ASPM in downstream component first and then upstream. | 5558 | * Disable ASPM in downstream component first and then upstream. |
5630 | */ | 5559 | */ |
5631 | pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); | 5560 | pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); |
@@ -5719,8 +5648,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
5719 | 5648 | ||
5720 | netif_device_attach(netdev); | 5649 | netif_device_attach(netdev); |
5721 | 5650 | ||
5722 | /* | 5651 | /* If the controller has AMT, do not set DRV_LOAD until the interface |
5723 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
5724 | * is up. For all other cases, let the f/w know that the h/w is now | 5652 | * is up. For all other cases, let the f/w know that the h/w is now |
5725 | * under the control of the driver. | 5653 | * under the control of the driver. |
5726 | */ | 5654 | */ |
@@ -5848,7 +5776,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data) | |||
5848 | return IRQ_HANDLED; | 5776 | return IRQ_HANDLED; |
5849 | } | 5777 | } |
5850 | 5778 | ||
5851 | /* | 5779 | /** |
5780 | * e1000_netpoll | ||
5781 | * @netdev: network interface device structure | ||
5782 | * | ||
5852 | * Polling 'interrupt' - used by things like netconsole to send skbs | 5783 | * Polling 'interrupt' - used by things like netconsole to send skbs |
5853 | * without having to re-enable interrupts. It's not called while | 5784 | * without having to re-enable interrupts. It's not called while |
5854 | * the interrupt routine is executing. | 5785 | * the interrupt routine is executing. |
@@ -5973,8 +5904,7 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5973 | 5904 | ||
5974 | netif_device_attach(netdev); | 5905 | netif_device_attach(netdev); |
5975 | 5906 | ||
5976 | /* | 5907 | /* If the controller has AMT, do not set DRV_LOAD until the interface |
5977 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
5978 | * is up. For all other cases, let the f/w know that the h/w is now | 5908 | * is up. For all other cases, let the f/w know that the h/w is now |
5979 | * under the control of the driver. | 5909 | * under the control of the driver. |
5980 | */ | 5910 | */ |
@@ -6273,14 +6203,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6273 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) | 6203 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) |
6274 | adapter->flags |= FLAG_MNG_PT_ENABLED; | 6204 | adapter->flags |= FLAG_MNG_PT_ENABLED; |
6275 | 6205 | ||
6276 | /* | 6206 | /* before reading the NVM, reset the controller to |
6277 | * before reading the NVM, reset the controller to | ||
6278 | * put the device in a known good starting state | 6207 | * put the device in a known good starting state |
6279 | */ | 6208 | */ |
6280 | adapter->hw.mac.ops.reset_hw(&adapter->hw); | 6209 | adapter->hw.mac.ops.reset_hw(&adapter->hw); |
6281 | 6210 | ||
6282 | /* | 6211 | /* systems with ASPM and others may see the checksum fail on the first |
6283 | * systems with ASPM and others may see the checksum fail on the first | ||
6284 | * attempt. Let's give it a few tries | 6212 | * attempt. Let's give it a few tries |
6285 | */ | 6213 | */ |
6286 | for (i = 0;; i++) { | 6214 | for (i = 0;; i++) { |
@@ -6335,8 +6263,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6335 | adapter->rx_ring->count = E1000_DEFAULT_RXD; | 6263 | adapter->rx_ring->count = E1000_DEFAULT_RXD; |
6336 | adapter->tx_ring->count = E1000_DEFAULT_TXD; | 6264 | adapter->tx_ring->count = E1000_DEFAULT_TXD; |
6337 | 6265 | ||
6338 | /* | 6266 | /* Initial Wake on LAN setting - If APM wake is enabled in |
6339 | * Initial Wake on LAN setting - If APM wake is enabled in | ||
6340 | * the EEPROM, enable the ACPI Magic Packet filter | 6267 | * the EEPROM, enable the ACPI Magic Packet filter |
6341 | */ | 6268 | */ |
6342 | if (adapter->flags & FLAG_APME_IN_WUC) { | 6269 | if (adapter->flags & FLAG_APME_IN_WUC) { |
@@ -6360,8 +6287,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6360 | if (eeprom_data & eeprom_apme_mask) | 6287 | if (eeprom_data & eeprom_apme_mask) |
6361 | adapter->eeprom_wol |= E1000_WUFC_MAG; | 6288 | adapter->eeprom_wol |= E1000_WUFC_MAG; |
6362 | 6289 | ||
6363 | /* | 6290 | /* now that we have the eeprom settings, apply the special cases |
6364 | * now that we have the eeprom settings, apply the special cases | ||
6365 | * where the eeprom may be wrong or the board simply won't support | 6291 | * where the eeprom may be wrong or the board simply won't support |
6366 | * wake on lan on a particular port | 6292 | * wake on lan on a particular port |
6367 | */ | 6293 | */ |
@@ -6378,8 +6304,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6378 | /* reset the hardware with the new settings */ | 6304 | /* reset the hardware with the new settings */ |
6379 | e1000e_reset(adapter); | 6305 | e1000e_reset(adapter); |
6380 | 6306 | ||
6381 | /* | 6307 | /* If the controller has AMT, do not set DRV_LOAD until the interface |
6382 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
6383 | * is up. For all other cases, let the f/w know that the h/w is now | 6308 | * is up. For all other cases, let the f/w know that the h/w is now |
6384 | * under the control of the driver. | 6309 | * under the control of the driver. |
6385 | */ | 6310 | */ |
@@ -6442,8 +6367,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
6442 | struct e1000_adapter *adapter = netdev_priv(netdev); | 6367 | struct e1000_adapter *adapter = netdev_priv(netdev); |
6443 | bool down = test_bit(__E1000_DOWN, &adapter->state); | 6368 | bool down = test_bit(__E1000_DOWN, &adapter->state); |
6444 | 6369 | ||
6445 | /* | 6370 | /* The timers may be rescheduled, so explicitly disable them |
6446 | * The timers may be rescheduled, so explicitly disable them | ||
6447 | * from being rescheduled. | 6371 | * from being rescheduled. |
6448 | */ | 6372 | */ |
6449 | if (!down) | 6373 | if (!down) |
@@ -6468,8 +6392,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
6468 | if (pci_dev_run_wake(pdev)) | 6392 | if (pci_dev_run_wake(pdev)) |
6469 | pm_runtime_get_noresume(&pdev->dev); | 6393 | pm_runtime_get_noresume(&pdev->dev); |
6470 | 6394 | ||
6471 | /* | 6395 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
6472 | * Release control of h/w to f/w. If f/w is AMT enabled, this | ||
6473 | * would have already happened in close and is redundant. | 6396 | * would have already happened in close and is redundant. |
6474 | */ | 6397 | */ |
6475 | e1000e_release_hw_control(adapter); | 6398 | e1000e_release_hw_control(adapter); |