aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c352
1 files changed, 231 insertions, 121 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 9f13b660b801..36d31a416320 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,9 @@
52 52
53#include "e1000.h" 53#include "e1000.h"
54 54
55#define DRV_VERSION "1.0.2-k4" 55#define DRV_EXTRAVERSION "-k2"
56
57#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
56char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
57const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
58 60
@@ -67,6 +69,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
67 [board_ich9lan] = &e1000_ich9_info, 69 [board_ich9lan] = &e1000_ich9_info,
68 [board_ich10lan] = &e1000_ich10_info, 70 [board_ich10lan] = &e1000_ich10_info,
69 [board_pchlan] = &e1000_pch_info, 71 [board_pchlan] = &e1000_pch_info,
72 [board_pch2lan] = &e1000_pch2_info,
70}; 73};
71 74
72struct e1000_reg_info { 75struct e1000_reg_info {
@@ -221,10 +224,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
221 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 224 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
222 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 225 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
223 0, tx_ring->next_to_use, tx_ring->next_to_clean, 226 0, tx_ring->next_to_use, tx_ring->next_to_clean,
224 (u64)buffer_info->dma, 227 (unsigned long long)buffer_info->dma,
225 buffer_info->length, 228 buffer_info->length,
226 buffer_info->next_to_watch, 229 buffer_info->next_to_watch,
227 (u64)buffer_info->time_stamp); 230 (unsigned long long)buffer_info->time_stamp);
228 231
229 /* Print TX Rings */ 232 /* Print TX Rings */
230 if (!netif_msg_tx_done(adapter)) 233 if (!netif_msg_tx_done(adapter))
@@ -276,9 +279,11 @@ static void e1000e_dump(struct e1000_adapter *adapter)
276 "%04X %3X %016llX %p", 279 "%04X %3X %016llX %p",
277 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : 280 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
278 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, 281 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
279 le64_to_cpu(u0->a), le64_to_cpu(u0->b), 282 (unsigned long long)le64_to_cpu(u0->a),
280 (u64)buffer_info->dma, buffer_info->length, 283 (unsigned long long)le64_to_cpu(u0->b),
281 buffer_info->next_to_watch, (u64)buffer_info->time_stamp, 284 (unsigned long long)buffer_info->dma,
285 buffer_info->length, buffer_info->next_to_watch,
286 (unsigned long long)buffer_info->time_stamp,
282 buffer_info->skb); 287 buffer_info->skb);
283 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 288 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
284 printk(KERN_CONT " NTC/U\n"); 289 printk(KERN_CONT " NTC/U\n");
@@ -353,19 +358,19 @@ rx_ring_summary:
353 printk(KERN_INFO "RWB[0x%03X] %016llX " 358 printk(KERN_INFO "RWB[0x%03X] %016llX "
354 "%016llX %016llX %016llX " 359 "%016llX %016llX %016llX "
355 "---------------- %p", i, 360 "---------------- %p", i,
356 le64_to_cpu(u1->a), 361 (unsigned long long)le64_to_cpu(u1->a),
357 le64_to_cpu(u1->b), 362 (unsigned long long)le64_to_cpu(u1->b),
358 le64_to_cpu(u1->c), 363 (unsigned long long)le64_to_cpu(u1->c),
359 le64_to_cpu(u1->d), 364 (unsigned long long)le64_to_cpu(u1->d),
360 buffer_info->skb); 365 buffer_info->skb);
361 } else { 366 } else {
362 printk(KERN_INFO "R [0x%03X] %016llX " 367 printk(KERN_INFO "R [0x%03X] %016llX "
363 "%016llX %016llX %016llX %016llX %p", i, 368 "%016llX %016llX %016llX %016llX %p", i,
364 le64_to_cpu(u1->a), 369 (unsigned long long)le64_to_cpu(u1->a),
365 le64_to_cpu(u1->b), 370 (unsigned long long)le64_to_cpu(u1->b),
366 le64_to_cpu(u1->c), 371 (unsigned long long)le64_to_cpu(u1->c),
367 le64_to_cpu(u1->d), 372 (unsigned long long)le64_to_cpu(u1->d),
368 (u64)buffer_info->dma, 373 (unsigned long long)buffer_info->dma,
369 buffer_info->skb); 374 buffer_info->skb);
370 375
371 if (netif_msg_pktdata(adapter)) 376 if (netif_msg_pktdata(adapter))
@@ -402,9 +407,11 @@ rx_ring_summary:
402 buffer_info = &rx_ring->buffer_info[i]; 407 buffer_info = &rx_ring->buffer_info[i];
403 u0 = (struct my_u0 *)rx_desc; 408 u0 = (struct my_u0 *)rx_desc;
404 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " 409 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
405 "%016llX %p", 410 "%016llX %p", i,
406 i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), 411 (unsigned long long)le64_to_cpu(u0->a),
407 (u64)buffer_info->dma, buffer_info->skb); 412 (unsigned long long)le64_to_cpu(u0->b),
413 (unsigned long long)buffer_info->dma,
414 buffer_info->skb);
408 if (i == rx_ring->next_to_use) 415 if (i == rx_ring->next_to_use)
409 printk(KERN_CONT " NTU\n"); 416 printk(KERN_CONT " NTU\n");
410 else if (i == rx_ring->next_to_clean) 417 else if (i == rx_ring->next_to_clean)
@@ -1778,25 +1785,25 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1778void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) 1785void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1779{ 1786{
1780 int err; 1787 int err;
1781 int numvecs, i; 1788 int i;
1782
1783 1789
1784 switch (adapter->int_mode) { 1790 switch (adapter->int_mode) {
1785 case E1000E_INT_MODE_MSIX: 1791 case E1000E_INT_MODE_MSIX:
1786 if (adapter->flags & FLAG_HAS_MSIX) { 1792 if (adapter->flags & FLAG_HAS_MSIX) {
1787 numvecs = 3; /* RxQ0, TxQ0 and other */ 1793 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1788 adapter->msix_entries = kcalloc(numvecs, 1794 adapter->msix_entries = kcalloc(adapter->num_vectors,
1789 sizeof(struct msix_entry), 1795 sizeof(struct msix_entry),
1790 GFP_KERNEL); 1796 GFP_KERNEL);
1791 if (adapter->msix_entries) { 1797 if (adapter->msix_entries) {
1792 for (i = 0; i < numvecs; i++) 1798 for (i = 0; i < adapter->num_vectors; i++)
1793 adapter->msix_entries[i].entry = i; 1799 adapter->msix_entries[i].entry = i;
1794 1800
1795 err = pci_enable_msix(adapter->pdev, 1801 err = pci_enable_msix(adapter->pdev,
1796 adapter->msix_entries, 1802 adapter->msix_entries,
1797 numvecs); 1803 adapter->num_vectors);
1798 if (err == 0) 1804 if (err == 0) {
1799 return; 1805 return;
1806 }
1800 } 1807 }
1801 /* MSI-X failed, so fall through and try MSI */ 1808 /* MSI-X failed, so fall through and try MSI */
1802 e_err("Failed to initialize MSI-X interrupts. " 1809 e_err("Failed to initialize MSI-X interrupts. "
@@ -1818,6 +1825,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1818 /* Don't do anything; this is the system default */ 1825 /* Don't do anything; this is the system default */
1819 break; 1826 break;
1820 } 1827 }
1828
1829 /* store the number of vectors being used */
1830 adapter->num_vectors = 1;
1821} 1831}
1822 1832
1823/** 1833/**
@@ -1939,7 +1949,14 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
1939 if (adapter->msix_entries) 1949 if (adapter->msix_entries)
1940 ew32(EIAC_82574, 0); 1950 ew32(EIAC_82574, 0);
1941 e1e_flush(); 1951 e1e_flush();
1942 synchronize_irq(adapter->pdev->irq); 1952
1953 if (adapter->msix_entries) {
1954 int i;
1955 for (i = 0; i < adapter->num_vectors; i++)
1956 synchronize_irq(adapter->msix_entries[i].vector);
1957 } else {
1958 synchronize_irq(adapter->pdev->irq);
1959 }
1943} 1960}
1944 1961
1945/** 1962/**
@@ -2723,6 +2740,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2723 e1e_wphy(hw, 22, phy_data); 2740 e1e_wphy(hw, 22, phy_data);
2724 } 2741 }
2725 2742
2743 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2744 if (hw->mac.type == e1000_pch2lan) {
2745 s32 ret_val;
2746
2747 if (rctl & E1000_RCTL_LPE)
2748 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2749 else
2750 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2751 }
2752
2726 /* Setup buffer sizes */ 2753 /* Setup buffer sizes */
2727 rctl &= ~E1000_RCTL_SZ_4096; 2754 rctl &= ~E1000_RCTL_SZ_4096;
2728 rctl |= E1000_RCTL_BSEX; 2755 rctl |= E1000_RCTL_BSEX;
@@ -2759,7 +2786,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2759 * per packet. 2786 * per packet.
2760 */ 2787 */
2761 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 2788 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2762 if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && 2789 if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
2763 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) 2790 (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2764 adapter->rx_ps_pages = pages; 2791 adapter->rx_ps_pages = pages;
2765 else 2792 else
@@ -3118,7 +3145,27 @@ void e1000e_reset(struct e1000_adapter *adapter)
3118 * with ERT support assuming ERT set to E1000_ERT_2048), or 3145 * with ERT support assuming ERT set to E1000_ERT_2048), or
3119 * - the full Rx FIFO size minus one full frame 3146 * - the full Rx FIFO size minus one full frame
3120 */ 3147 */
3121 if (hw->mac.type == e1000_pchlan) { 3148 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3149 fc->pause_time = 0xFFFF;
3150 else
3151 fc->pause_time = E1000_FC_PAUSE_TIME;
3152 fc->send_xon = 1;
3153 fc->current_mode = fc->requested_mode;
3154
3155 switch (hw->mac.type) {
3156 default:
3157 if ((adapter->flags & FLAG_HAS_ERT) &&
3158 (adapter->netdev->mtu > ETH_DATA_LEN))
3159 hwm = min(((pba << 10) * 9 / 10),
3160 ((pba << 10) - (E1000_ERT_2048 << 3)));
3161 else
3162 hwm = min(((pba << 10) * 9 / 10),
3163 ((pba << 10) - adapter->max_frame_size));
3164
3165 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3166 fc->low_water = fc->high_water - 8;
3167 break;
3168 case e1000_pchlan:
3122 /* 3169 /*
3123 * Workaround PCH LOM adapter hangs with certain network 3170 * Workaround PCH LOM adapter hangs with certain network
3124 * loads. If hangs persist, try disabling Tx flow control. 3171 * loads. If hangs persist, try disabling Tx flow control.
@@ -3131,26 +3178,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
3131 fc->low_water = 0x3000; 3178 fc->low_water = 0x3000;
3132 } 3179 }
3133 fc->refresh_time = 0x1000; 3180 fc->refresh_time = 0x1000;
3134 } else { 3181 break;
3135 if ((adapter->flags & FLAG_HAS_ERT) && 3182 case e1000_pch2lan:
3136 (adapter->netdev->mtu > ETH_DATA_LEN)) 3183 fc->high_water = 0x05C20;
3137 hwm = min(((pba << 10) * 9 / 10), 3184 fc->low_water = 0x05048;
3138 ((pba << 10) - (E1000_ERT_2048 << 3))); 3185 fc->pause_time = 0x0650;
3139 else 3186 fc->refresh_time = 0x0400;
3140 hwm = min(((pba << 10) * 9 / 10), 3187 break;
3141 ((pba << 10) - adapter->max_frame_size));
3142
3143 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3144 fc->low_water = fc->high_water - 8;
3145 } 3188 }
3146 3189
3147 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3148 fc->pause_time = 0xFFFF;
3149 else
3150 fc->pause_time = E1000_FC_PAUSE_TIME;
3151 fc->send_xon = 1;
3152 fc->current_mode = fc->requested_mode;
3153
3154 /* Allow time for pending master requests to run */ 3190 /* Allow time for pending master requests to run */
3155 mac->ops.reset_hw(hw); 3191 mac->ops.reset_hw(hw);
3156 3192
@@ -3162,8 +3198,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
3162 e1000_get_hw_control(adapter); 3198 e1000_get_hw_control(adapter);
3163 3199
3164 ew32(WUC, 0); 3200 ew32(WUC, 0);
3165 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
3166 e1e_wphy(&adapter->hw, BM_WUC, 0);
3167 3201
3168 if (mac->ops.init_hw(hw)) 3202 if (mac->ops.init_hw(hw))
3169 e_err("Hardware Error\n"); 3203 e_err("Hardware Error\n");
@@ -3194,12 +3228,6 @@ int e1000e_up(struct e1000_adapter *adapter)
3194{ 3228{
3195 struct e1000_hw *hw = &adapter->hw; 3229 struct e1000_hw *hw = &adapter->hw;
3196 3230
3197 /* DMA latency requirement to workaround early-receive/jumbo issue */
3198 if (adapter->flags & FLAG_HAS_ERT)
3199 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3200 PM_QOS_CPU_DMA_LATENCY,
3201 PM_QOS_DEFAULT_VALUE);
3202
3203 /* hardware has been reset, we need to reload some things */ 3231 /* hardware has been reset, we need to reload some things */
3204 e1000_configure(adapter); 3232 e1000_configure(adapter);
3205 3233
@@ -3263,9 +3291,6 @@ void e1000e_down(struct e1000_adapter *adapter)
3263 e1000_clean_tx_ring(adapter); 3291 e1000_clean_tx_ring(adapter);
3264 e1000_clean_rx_ring(adapter); 3292 e1000_clean_rx_ring(adapter);
3265 3293
3266 if (adapter->flags & FLAG_HAS_ERT)
3267 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3268
3269 /* 3294 /*
3270 * TODO: for power management, we could drop the link and 3295 * TODO: for power management, we could drop the link and
3271 * pci_disable_device here. 3296 * pci_disable_device here.
@@ -3416,13 +3441,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
3416 3441
3417 /* disable SERR in case the MSI write causes a master abort */ 3442 /* disable SERR in case the MSI write causes a master abort */
3418 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); 3443 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3419 pci_write_config_word(adapter->pdev, PCI_COMMAND, 3444 if (pci_cmd & PCI_COMMAND_SERR)
3420 pci_cmd & ~PCI_COMMAND_SERR); 3445 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3446 pci_cmd & ~PCI_COMMAND_SERR);
3421 3447
3422 err = e1000_test_msi_interrupt(adapter); 3448 err = e1000_test_msi_interrupt(adapter);
3423 3449
3424 /* restore previous setting of command word */ 3450 /* re-enable SERR */
3425 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3451 if (pci_cmd & PCI_COMMAND_SERR) {
3452 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3453 pci_cmd |= PCI_COMMAND_SERR;
3454 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3455 }
3426 3456
3427 /* success ! */ 3457 /* success ! */
3428 if (!err) 3458 if (!err)
@@ -3495,6 +3525,12 @@ static int e1000_open(struct net_device *netdev)
3495 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 3525 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3496 e1000_update_mng_vlan(adapter); 3526 e1000_update_mng_vlan(adapter);
3497 3527
3528 /* DMA latency requirement to workaround early-receive/jumbo issue */
3529 if (adapter->flags & FLAG_HAS_ERT)
3530 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3531 PM_QOS_CPU_DMA_LATENCY,
3532 PM_QOS_DEFAULT_VALUE);
3533
3498 /* 3534 /*
3499 * before we allocate an interrupt, we must be ready to handle it. 3535 * before we allocate an interrupt, we must be ready to handle it.
3500 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3536 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -3599,6 +3635,9 @@ static int e1000_close(struct net_device *netdev)
3599 if (adapter->flags & FLAG_HAS_AMT) 3635 if (adapter->flags & FLAG_HAS_AMT)
3600 e1000_release_hw_control(adapter); 3636 e1000_release_hw_control(adapter);
3601 3637
3638 if (adapter->flags & FLAG_HAS_ERT)
3639 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3640
3602 pm_runtime_put_sync(&pdev->dev); 3641 pm_runtime_put_sync(&pdev->dev);
3603 3642
3604 return 0; 3643 return 0;
@@ -3669,6 +3708,110 @@ static void e1000_update_phy_info(unsigned long data)
3669} 3708}
3670 3709
3671/** 3710/**
3711 * e1000e_update_phy_stats - Update the PHY statistics counters
3712 * @adapter: board private structure
3713 **/
3714static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
3715{
3716 struct e1000_hw *hw = &adapter->hw;
3717 s32 ret_val;
3718 u16 phy_data;
3719
3720 ret_val = hw->phy.ops.acquire(hw);
3721 if (ret_val)
3722 return;
3723
3724 hw->phy.addr = 1;
3725
3726#define HV_PHY_STATS_PAGE 778
3727 /*
3728 * A page set is expensive so check if already on desired page.
3729 * If not, set to the page with the PHY status registers.
3730 */
3731 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
3732 &phy_data);
3733 if (ret_val)
3734 goto release;
3735 if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
3736 ret_val = e1000e_write_phy_reg_mdic(hw,
3737 IGP01E1000_PHY_PAGE_SELECT,
3738 (HV_PHY_STATS_PAGE <<
3739 IGP_PAGE_SHIFT));
3740 if (ret_val)
3741 goto release;
3742 }
3743
3744 /* Read/clear the upper 16-bit registers and read/accumulate lower */
3745
3746 /* Single Collision Count */
3747 e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
3748 &phy_data);
3749 ret_val = e1000e_read_phy_reg_mdic(hw,
3750 HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
3751 &phy_data);
3752 if (!ret_val)
3753 adapter->stats.scc += phy_data;
3754
3755 /* Excessive Collision Count */
3756 e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
3757 &phy_data);
3758 ret_val = e1000e_read_phy_reg_mdic(hw,
3759 HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
3760 &phy_data);
3761 if (!ret_val)
3762 adapter->stats.ecol += phy_data;
3763
3764 /* Multiple Collision Count */
3765 e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
3766 &phy_data);
3767 ret_val = e1000e_read_phy_reg_mdic(hw,
3768 HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
3769 &phy_data);
3770 if (!ret_val)
3771 adapter->stats.mcc += phy_data;
3772
3773 /* Late Collision Count */
3774 e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
3775 &phy_data);
3776 ret_val = e1000e_read_phy_reg_mdic(hw,
3777 HV_LATECOL_LOWER &
3778 MAX_PHY_REG_ADDRESS,
3779 &phy_data);
3780 if (!ret_val)
3781 adapter->stats.latecol += phy_data;
3782
3783 /* Collision Count - also used for adaptive IFS */
3784 e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
3785 &phy_data);
3786 ret_val = e1000e_read_phy_reg_mdic(hw,
3787 HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
3788 &phy_data);
3789 if (!ret_val)
3790 hw->mac.collision_delta = phy_data;
3791
3792 /* Defer Count */
3793 e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
3794 &phy_data);
3795 ret_val = e1000e_read_phy_reg_mdic(hw,
3796 HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
3797 &phy_data);
3798 if (!ret_val)
3799 adapter->stats.dc += phy_data;
3800
3801 /* Transmit with no CRS */
3802 e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
3803 &phy_data);
3804 ret_val = e1000e_read_phy_reg_mdic(hw,
3805 HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
3806 &phy_data);
3807 if (!ret_val)
3808 adapter->stats.tncrs += phy_data;
3809
3810release:
3811 hw->phy.ops.release(hw);
3812}
3813
3814/**
3672 * e1000e_update_stats - Update the board statistics counters 3815 * e1000e_update_stats - Update the board statistics counters
3673 * @adapter: board private structure 3816 * @adapter: board private structure
3674 **/ 3817 **/
@@ -3677,7 +3820,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3677 struct net_device *netdev = adapter->netdev; 3820 struct net_device *netdev = adapter->netdev;
3678 struct e1000_hw *hw = &adapter->hw; 3821 struct e1000_hw *hw = &adapter->hw;
3679 struct pci_dev *pdev = adapter->pdev; 3822 struct pci_dev *pdev = adapter->pdev;
3680 u16 phy_data;
3681 3823
3682 /* 3824 /*
3683 * Prevent stats update while adapter is being reset, or if the pci 3825 * Prevent stats update while adapter is being reset, or if the pci
@@ -3697,34 +3839,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3697 adapter->stats.roc += er32(ROC); 3839 adapter->stats.roc += er32(ROC);
3698 3840
3699 adapter->stats.mpc += er32(MPC); 3841 adapter->stats.mpc += er32(MPC);
3700 if ((hw->phy.type == e1000_phy_82578) || 3842
3701 (hw->phy.type == e1000_phy_82577)) { 3843 /* Half-duplex statistics */
3702 e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3844 if (adapter->link_duplex == HALF_DUPLEX) {
3703 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) 3845 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
3704 adapter->stats.scc += phy_data; 3846 e1000e_update_phy_stats(adapter);
3705 3847 } else {
3706 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3848 adapter->stats.scc += er32(SCC);
3707 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) 3849 adapter->stats.ecol += er32(ECOL);
3708 adapter->stats.ecol += phy_data; 3850 adapter->stats.mcc += er32(MCC);
3709 3851 adapter->stats.latecol += er32(LATECOL);
3710 e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3852 adapter->stats.dc += er32(DC);
3711 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) 3853
3712 adapter->stats.mcc += phy_data; 3854 hw->mac.collision_delta = er32(COLC);
3713 3855
3714 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3856 if ((hw->mac.type != e1000_82574) &&
3715 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) 3857 (hw->mac.type != e1000_82583))
3716 adapter->stats.latecol += phy_data; 3858 adapter->stats.tncrs += er32(TNCRS);
3717 3859 }
3718 e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3860 adapter->stats.colc += hw->mac.collision_delta;
3719 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3720 adapter->stats.dc += phy_data;
3721 } else {
3722 adapter->stats.scc += er32(SCC);
3723 adapter->stats.ecol += er32(ECOL);
3724 adapter->stats.mcc += er32(MCC);
3725 adapter->stats.latecol += er32(LATECOL);
3726 adapter->stats.dc += er32(DC);
3727 } 3861 }
3862
3728 adapter->stats.xonrxc += er32(XONRXC); 3863 adapter->stats.xonrxc += er32(XONRXC);
3729 adapter->stats.xontxc += er32(XONTXC); 3864 adapter->stats.xontxc += er32(XONTXC);
3730 adapter->stats.xoffrxc += er32(XOFFRXC); 3865 adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3742,28 +3877,9 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3742 3877
3743 hw->mac.tx_packet_delta = er32(TPT); 3878 hw->mac.tx_packet_delta = er32(TPT);
3744 adapter->stats.tpt += hw->mac.tx_packet_delta; 3879 adapter->stats.tpt += hw->mac.tx_packet_delta;
3745 if ((hw->phy.type == e1000_phy_82578) ||
3746 (hw->phy.type == e1000_phy_82577)) {
3747 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3748 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3749 hw->mac.collision_delta = phy_data;
3750 } else {
3751 hw->mac.collision_delta = er32(COLC);
3752 }
3753 adapter->stats.colc += hw->mac.collision_delta;
3754 3880
3755 adapter->stats.algnerrc += er32(ALGNERRC); 3881 adapter->stats.algnerrc += er32(ALGNERRC);
3756 adapter->stats.rxerrc += er32(RXERRC); 3882 adapter->stats.rxerrc += er32(RXERRC);
3757 if ((hw->phy.type == e1000_phy_82578) ||
3758 (hw->phy.type == e1000_phy_82577)) {
3759 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3760 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3761 adapter->stats.tncrs += phy_data;
3762 } else {
3763 if ((hw->mac.type != e1000_82574) &&
3764 (hw->mac.type != e1000_82583))
3765 adapter->stats.tncrs += er32(TNCRS);
3766 }
3767 adapter->stats.cexterr += er32(CEXTERR); 3883 adapter->stats.cexterr += er32(CEXTERR);
3768 adapter->stats.tsctc += er32(TSCTC); 3884 adapter->stats.tsctc += er32(TSCTC);
3769 adapter->stats.tsctfc += er32(TSCTFC); 3885 adapter->stats.tsctfc += er32(TSCTFC);
@@ -3862,7 +3978,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
3862 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 3978 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3863} 3979}
3864 3980
3865bool e1000e_has_link(struct e1000_adapter *adapter) 3981static bool e1000e_has_link(struct e1000_adapter *adapter)
3866{ 3982{
3867 struct e1000_hw *hw = &adapter->hw; 3983 struct e1000_hw *hw = &adapter->hw;
3868 bool link_active = 0; 3984 bool link_active = 0;
@@ -4838,14 +4954,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4838 int retval = 0; 4954 int retval = 0;
4839 4955
4840 /* copy MAC RARs to PHY RARs */ 4956 /* copy MAC RARs to PHY RARs */
4841 for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) { 4957 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
4842 mac_reg = er32(RAL(i));
4843 e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
4844 e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
4845 mac_reg = er32(RAH(i));
4846 e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
4847 e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
4848 }
4849 4958
4850 /* copy MAC MTA to PHY MTA */ 4959 /* copy MAC MTA to PHY MTA */
4851 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { 4960 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5548,8 +5657,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5548 if (err) 5657 if (err)
5549 goto err_sw_init; 5658 goto err_sw_init;
5550 5659
5551 err = -EIO;
5552
5553 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 5660 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5554 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 5661 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
5555 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 5662 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -5896,6 +6003,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5896 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, 6003 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
5897 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, 6004 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
5898 6005
6006 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6007 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6008
5899 { } /* terminate list */ 6009 { } /* terminate list */
5900}; 6010};
5901MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6011MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
@@ -5932,7 +6042,7 @@ static int __init e1000_init_module(void)
5932 int ret; 6042 int ret;
5933 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6043 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5934 e1000e_driver_version); 6044 e1000e_driver_version);
5935 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n"); 6045 pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
5936 ret = pci_register_driver(&e1000_driver); 6046 ret = pci_register_driver(&e1000_driver);
5937 6047
5938 return ret; 6048 return ret;